]> git.sesse.net Git - mlt/blob - src/modules/avformat/producer_avformat.c
caaaaebf56e7b92c5fabe0f625a8a141e2a1d361
[mlt] / src / modules / avformat / producer_avformat.c
1 /*
2  * producer_avformat.c -- avformat producer
3  * Copyright (C) 2003-2004 Ushodaya Enterprises Limited
4  * Author: Charles Yates <charles.yates@pandora.be>
5  * Much code borrowed from ffmpeg.c: Copyright (c) 2000-2003 Fabrice Bellard
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
20  */
21
22 // MLT Header files
23 #include <framework/mlt_producer.h>
24 #include <framework/mlt_frame.h>
25 #include <framework/mlt_profile.h>
26 #include <framework/mlt_log.h>
27
28 // ffmpeg Header files
29 #include <avformat.h>
30 #include <opt.h>
31 #ifdef SWSCALE
32 #  include <swscale.h>
33 #endif
34 #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
35 #  include "audioconvert.h"
36 #endif
37
38 // System header files
39 #include <stdlib.h>
40 #include <string.h>
41 #include <pthread.h>
42
43 #if LIBAVUTIL_VERSION_INT < (50<<16)
44 #define PIX_FMT_YUYV422 PIX_FMT_YUV422
45 #endif
46
47 void avformat_lock( );
48 void avformat_unlock( );
49
50 // Forward references.
51 static int producer_open( mlt_producer this, mlt_profile profile, char *file );
52 static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index );
53
54 /** Constructor for libavformat.
55 */
56
57 mlt_producer producer_avformat_init( mlt_profile profile, char *file )
58 {
59         int error = 0;
60
61         // Report information about available demuxers and codecs as YAML Tiny
62         if ( file && strstr( file, "f-list" ) )
63         {
64                 fprintf( stderr, "---\nformats:\n" );
65                 AVInputFormat *format = NULL;
66                 while ( ( format = av_iformat_next( format ) ) )
67                         fprintf( stderr, "  - %s\n", format->name );
68                 fprintf( stderr, "...\n" );
69                 error = 1;
70         }
71         if ( file && strstr( file, "acodec-list" ) )
72         {
73                 fprintf( stderr, "---\naudio_codecs:\n" );
74                 AVCodec *codec = NULL;
75                 while ( ( codec = av_codec_next( codec ) ) )
76                         if ( codec->decode && codec->type == CODEC_TYPE_AUDIO )
77                                 fprintf( stderr, "  - %s\n", codec->name );
78                 fprintf( stderr, "...\n" );
79                 error = 1;
80         }
81         if ( file && strstr( file, "vcodec-list" ) )
82         {
83                 fprintf( stderr, "---\nvideo_codecs:\n" );
84                 AVCodec *codec = NULL;
85                 while ( ( codec = av_codec_next( codec ) ) )
86                         if ( codec->decode && codec->type == CODEC_TYPE_VIDEO )
87                                 fprintf( stderr, "  - %s\n", codec->name );
88                 fprintf( stderr, "...\n" );
89                 error = 1;
90         }
91         if ( error )
92                 return NULL;
93
94         mlt_producer this = NULL;
95
96         // Check that we have a non-NULL argument
97         if ( file != NULL )
98         {
99                 // Construct the producer
100                 this = calloc( 1, sizeof( struct mlt_producer_s ) );
101
102                 // Initialise it
103                 if ( mlt_producer_init( this, NULL ) == 0 )
104                 {
105                         // Get the properties
106                         mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
107
108                         // Set the resource property (required for all producers)
109                         mlt_properties_set( properties, "resource", file );
110
111                         // Register our get_frame implementation
112                         this->get_frame = producer_get_frame;
113
114                         // Open the file
115                         if ( producer_open( this, profile, file ) != 0 )
116                         {
117                                 // Clean up
118                                 mlt_producer_close( this );
119                                 this = NULL;
120                         }
121                         else
122                         {
123                                 // Close the file to release resources for large playlists - reopen later as needed
124                                 mlt_properties_set_data( properties, "dummy_context", NULL, 0, NULL, NULL );
125                                 mlt_properties_set_data( properties, "audio_context", NULL, 0, NULL, NULL );
126                                 mlt_properties_set_data( properties, "video_context", NULL, 0, NULL, NULL );
127
128                                 // Default the user-selectable indices from the auto-detected indices
129                                 mlt_properties_set_int( properties, "audio_index",  mlt_properties_get_int( properties, "_audio_index" ) );
130                                 mlt_properties_set_int( properties, "video_index",  mlt_properties_get_int( properties, "_video_index" ) );
131                         }
132                 }
133         }
134
135         return this;
136 }
137
138 /** Find the default streams.
139 */
140
141 static mlt_properties find_default_streams( mlt_properties meta_media, AVFormatContext *context, int *audio_index, int *video_index )
142 {
143         int i;
144         char key[200];
145
146         mlt_properties_set_int( meta_media, "meta.media.nb_streams", context->nb_streams );
147
148         // Allow for multiple audio and video streams in the file and select first of each (if available)
149         for( i = 0; i < context->nb_streams; i++ )
150         {
151                 // Get the codec context
152                 AVStream *stream = context->streams[ i ];
153                 if ( ! stream ) continue;
154                 AVCodecContext *codec_context = stream->codec;
155                 if ( ! codec_context ) continue;
156                 AVCodec *codec = avcodec_find_decoder( codec_context->codec_id );
157                 if ( ! codec ) continue;
158
159                 snprintf( key, sizeof(key), "meta.media.%d.stream.type", i );
160
161                 // Determine the type and obtain the first index of each type
162                 switch( codec_context->codec_type )
163                 {
164                         case CODEC_TYPE_VIDEO:
165                                 if ( *video_index < 0 )
166                                         *video_index = i;
167                                 mlt_properties_set( meta_media, key, "video" );
168                                 snprintf( key, sizeof(key), "meta.media.%d.stream.frame_rate", i );
169                                 mlt_properties_set_double( meta_media, key, av_q2d( context->streams[ i ]->r_frame_rate ) );
170 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
171                                 snprintf( key, sizeof(key), "meta.media.%d.stream.sample_aspect_ratio", i );
172                                 mlt_properties_set_double( meta_media, key, av_q2d( context->streams[ i ]->sample_aspect_ratio ) );
173 #endif
174                                 snprintf( key, sizeof(key), "meta.media.%d.codec.pix_fmt", i );
175                                 mlt_properties_set( meta_media, key, avcodec_get_pix_fmt_name( codec_context->pix_fmt ) );
176                                 snprintf( key, sizeof(key), "meta.media.%d.codec.sample_aspect_ratio", i );
177                                 mlt_properties_set_double( meta_media, key, av_q2d( codec_context->sample_aspect_ratio ) );
178                                 break;
179                         case CODEC_TYPE_AUDIO:
180                                 if ( *audio_index < 0 )
181                                         *audio_index = i;
182                                 mlt_properties_set( meta_media, key, "audio" );
183 #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
184                                 snprintf( key, sizeof(key), "meta.media.%d.codec.sample_fmt", i );
185                                 mlt_properties_set( meta_media, key, avcodec_get_sample_fmt_name( codec_context->sample_fmt ) );
186 #endif
187                                 snprintf( key, sizeof(key), "meta.media.%d.codec.sample_rate", i );
188                                 mlt_properties_set_int( meta_media, key, codec_context->sample_rate );
189                                 snprintf( key, sizeof(key), "meta.media.%d.codec.channels", i );
190                                 mlt_properties_set_int( meta_media, key, codec_context->channels );
191                                 break;
192                         default:
193                                 break;
194                 }
195 //              snprintf( key, sizeof(key), "meta.media.%d.stream.time_base", i );
196 //              mlt_properties_set_double( meta_media, key, av_q2d( context->streams[ i ]->time_base ) );
197                 snprintf( key, sizeof(key), "meta.media.%d.codec.name", i );
198                 mlt_properties_set( meta_media, key, codec->name );
199 #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(55<<8)+0))
200                 snprintf( key, sizeof(key), "meta.media.%d.codec.long_name", i );
201                 mlt_properties_set( meta_media, key, codec->long_name );
202 #endif
203                 snprintf( key, sizeof(key), "meta.media.%d.codec.bit_rate", i );
204                 mlt_properties_set_int( meta_media, key, codec_context->bit_rate );
205 //              snprintf( key, sizeof(key), "meta.media.%d.codec.time_base", i );
206 //              mlt_properties_set_double( meta_media, key, av_q2d( codec_context->time_base ) );
207                 snprintf( key, sizeof(key), "meta.media.%d.codec.profile", i );
208                 mlt_properties_set_int( meta_media, key, codec_context->profile );
209                 snprintf( key, sizeof(key), "meta.media.%d.codec.level", i );
210                 mlt_properties_set_int( meta_media, key, codec_context->level );
211         }
212
213         return meta_media;
214 }
215
216 /** Producer file destructor.
217 */
218
219 static void producer_file_close( void *context )
220 {
221         if ( context != NULL )
222         {
223                 // Lock the mutex now
224                 avformat_lock( );
225
226                 // Close the file
227                 av_close_input_file( context );
228
229                 // Unlock the mutex now
230                 avformat_unlock( );
231         }
232 }
233
234 /** Producer file destructor.
235 */
236
237 static void producer_codec_close( void *codec )
238 {
239         if ( codec != NULL )
240         {
241                 // Lock the mutex now
242                 avformat_lock( );
243
244                 // Close the file
245                 avcodec_close( codec );
246
247                 // Unlock the mutex now
248                 avformat_unlock( );
249         }
250 }
251
252 static inline int dv_is_pal( AVPacket *pkt )
253 {
254         return pkt->data[3] & 0x80;
255 }
256
257 static int dv_is_wide( AVPacket *pkt )
258 {
259         int i = 80 /* block size */ *3 /* VAUX starts at block 3 */ +3 /* skip block header */;
260
261         for ( ; i < pkt->size; i += 5 /* packet size */ )
262         {
263                 if ( pkt->data[ i ] == 0x61 )
264                 {
265                         uint8_t x = pkt->data[ i + 2 ] & 0x7;
266                         return ( x == 2 ) || ( x == 7 );
267                 }
268         }
269         return 0;
270 }
271
272 static double get_aspect_ratio( AVStream *stream, AVCodecContext *codec_context, AVPacket *pkt )
273 {
274         double aspect_ratio = 1.0;
275
276         if ( codec_context->codec_id == CODEC_ID_DVVIDEO )
277         {
278                 if ( pkt )
279                 {
280                         if ( dv_is_pal( pkt ) )
281                         {
282                                 aspect_ratio = dv_is_wide( pkt )
283                                         ? 64.0/45.0 // 16:9 PAL
284                                         : 16.0/15.0; // 4:3 PAL
285                         }
286                         else
287                         {
288                                 aspect_ratio = dv_is_wide( pkt )
289                                         ? 32.0/27.0 // 16:9 NTSC
290                                         : 8.0/9.0; // 4:3 NTSC
291                         }
292                 }
293                 else
294                 {
295                         AVRational ar =
296 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
297                                 stream->sample_aspect_ratio;
298 #else
299                                 codec_context->sample_aspect_ratio;
300 #endif
301                         // Override FFmpeg's notion of DV aspect ratios, which are
302                         // based upon a width of 704. Since we do not have a normaliser
303                         // that crops (nor is cropping 720 wide ITU-R 601 video always desirable)
304                         // we just coerce the values to facilitate a passive behaviour through
305                         // the rescale normaliser when using equivalent producers and consumers.
306                         // = display_aspect / (width * height)
307                         if ( ar.num == 10 && ar.den == 11 )
308                                 aspect_ratio = 8.0/9.0; // 4:3 NTSC
309                         else if ( ar.num == 59 && ar.den == 54 )
310                                 aspect_ratio = 16.0/15.0; // 4:3 PAL
311                         else if ( ar.num == 40 && ar.den == 33 )
312                                 aspect_ratio = 32.0/27.0; // 16:9 NTSC
313                         else if ( ar.num == 118 && ar.den == 81 )
314                                 aspect_ratio = 64.0/45.0; // 16:9 PAL
315                 }
316         }
317         else
318         {
319                 AVRational codec_sar = codec_context->sample_aspect_ratio;
320                 AVRational stream_sar =
321 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
322                         stream->sample_aspect_ratio;
323 #else
324                         { 0, 1 };
325 #endif
326                 if ( codec_sar.num > 0 )
327                         aspect_ratio = av_q2d( codec_sar );
328                 else if ( stream_sar.num > 0 )
329                         aspect_ratio = av_q2d( stream_sar );
330         }
331         return aspect_ratio;
332 }
333
334 /** Open the file.
335 */
336
337 static int producer_open( mlt_producer this, mlt_profile profile, char *file )
338 {
339         // Return an error code (0 == no error)
340         int error = 0;
341
342         // Context for avformat
343         AVFormatContext *context = NULL;
344
345         // Get the properties
346         mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
347
348         // We will treat everything with the producer fps
349         double fps = mlt_profile_fps( profile );
350
351         // Lock the mutex now
352         avformat_lock( );
353
354         // If "MRL", then create AVInputFormat
355         AVInputFormat *format = NULL;
356         AVFormatParameters *params = NULL;
357         char *standard = NULL;
358         char *mrl = strchr( file, ':' );
359
360         // AV option (0 = both, 1 = video, 2 = audio)
361         int av = 0;
362
363         // Only if there is not a protocol specification that avformat can handle
364         if ( mrl && !url_exist( file ) )
365         {
366                 // 'file' becomes format abbreviation
367                 mrl[0] = 0;
368
369                 // Lookup the format
370                 format = av_find_input_format( file );
371
372                 // Eat the format designator
373                 file = ++mrl;
374
375                 if ( format )
376                 {
377                         // Allocate params
378                         params = calloc( sizeof( AVFormatParameters ), 1 );
379
380                         // These are required by video4linux (defaults)
381                         params->width = 640;
382                         params->height = 480;
383                         params->time_base= (AVRational){1,25};
384                         // params->device = file;
385                         params->channels = 2;
386                         params->sample_rate = 48000;
387                 }
388
389                 // XXX: this does not work anymore since avdevice
390                 // TODO: make producer_avddevice?
391                 // Parse out params
392                 mrl = strchr( file, '?' );
393                 while ( mrl )
394                 {
395                         mrl[0] = 0;
396                         char *name = strdup( ++mrl );
397                         char *value = strchr( name, ':' );
398                         if ( value )
399                         {
400                                 value[0] = 0;
401                                 value++;
402                                 char *t = strchr( value, '&' );
403                                 if ( t )
404                                         t[0] = 0;
405                                 if ( !strcmp( name, "frame_rate" ) )
406                                         params->time_base.den = atoi( value );
407                                 else if ( !strcmp( name, "frame_rate_base" ) )
408                                         params->time_base.num = atoi( value );
409                                 else if ( !strcmp( name, "sample_rate" ) )
410                                         params->sample_rate = atoi( value );
411                                 else if ( !strcmp( name, "channels" ) )
412                                         params->channels = atoi( value );
413                                 else if ( !strcmp( name, "width" ) )
414                                         params->width = atoi( value );
415                                 else if ( !strcmp( name, "height" ) )
416                                         params->height = atoi( value );
417                                 else if ( !strcmp( name, "standard" ) )
418                                 {
419                                         standard = strdup( value );
420                                         params->standard = standard;
421                                 }
422                                 else if ( !strcmp( name, "av" ) )
423                                         av = atoi( value );
424                         }
425                         free( name );
426                         mrl = strchr( mrl, '&' );
427                 }
428         }
429
430         // Now attempt to open the file
431         error = av_open_input_file( &context, file, format, 0, params ) < 0;
432
433         // Cleanup AVFormatParameters
434         free( standard );
435         free( params );
436
437         // If successful, then try to get additional info
438         if ( error == 0 )
439         {
440                 // Get the stream info
441                 error = av_find_stream_info( context ) < 0;
442
443                 // Continue if no error
444                 if ( error == 0 )
445                 {
446                         // We will default to the first audio and video streams found
447                         int audio_index = -1;
448                         int video_index = -1;
449                         int av_bypass = 0;
450
451                         // Now set properties where we can (use default unknowns if required)
452                         if ( context->duration != AV_NOPTS_VALUE )
453                         {
454                                 // This isn't going to be accurate for all formats
455                                 mlt_position frames = ( mlt_position )( ( ( double )context->duration / ( double )AV_TIME_BASE ) * fps + 0.5 );
456                                 mlt_properties_set_position( properties, "out", frames - 1 );
457                                 mlt_properties_set_position( properties, "length", frames );
458                         }
459
460                         // Find default audio and video streams
461                         find_default_streams( properties, context, &audio_index, &video_index );
462
463                         if ( context->start_time != AV_NOPTS_VALUE )
464                                 mlt_properties_set_double( properties, "_start_time", context->start_time );
465
466                         // Check if we're seekable (something funny about mpeg here :-/)
467                         if ( strcmp( file, "pipe:" ) && strncmp( file, "http://", 6 )  && strncmp( file, "udp:", 4 )  && strncmp( file, "tcp:", 4 ) && strncmp( file, "rtsp:", 5 )  && strncmp( file, "rtp:", 4 ) )
468                         {
469                                 mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ), AVSEEK_FLAG_BACKWARD ) >= 0 );
470                                 mlt_properties_set_data( properties, "dummy_context", context, 0, producer_file_close, NULL );
471                                 av_open_input_file( &context, file, NULL, 0, NULL );
472                                 av_find_stream_info( context );
473                         }
474                         else
475                                 av_bypass = 1;
476
477                         // Store selected audio and video indexes on properties
478                         mlt_properties_set_int( properties, "_audio_index", audio_index );
479                         mlt_properties_set_int( properties, "_video_index", video_index );
480                         mlt_properties_set_int( properties, "_last_position", -1 );
481
482                         // Fetch the width, height and aspect ratio
483                         if ( video_index != -1 )
484                         {
485                                 AVCodecContext *codec_context = context->streams[ video_index ]->codec;
486                                 mlt_properties_set_int( properties, "width", codec_context->width );
487                                 mlt_properties_set_int( properties, "height", codec_context->height );
488
489                                 if ( codec_context->codec_id == CODEC_ID_DVVIDEO )
490                                 {
491                                         // Fetch the first frame of DV so we can read it directly
492                                         AVPacket pkt;
493                                         int ret = 0;
494                                         while ( ret >= 0 )
495                                         {
496                                                 ret = av_read_frame( context, &pkt );
497                                                 if ( ret >= 0 && pkt.stream_index == video_index && pkt.size > 0 )
498                                                 {
499                                                         mlt_properties_set_double( properties, "aspect_ratio",
500                                                                 get_aspect_ratio( context->streams[ video_index ], codec_context, &pkt ) );
501                                                         break;
502                                                 }
503                                         }
504                                 }
505                                 else
506                                 {
507                                         mlt_properties_set_double( properties, "aspect_ratio",
508                                                 get_aspect_ratio( context->streams[ video_index ], codec_context, NULL ) );
509                                 }
510                         }
511
512                         // Read Metadata
513                         if (context->title != NULL)
514                                 mlt_properties_set(properties, "meta.attr.title.markup", context->title );
515                         if (context->author != NULL)
516                                 mlt_properties_set(properties, "meta.attr.author.markup", context->author );
517                         if (context->copyright != NULL)
518                                 mlt_properties_set(properties, "meta.attr.copyright.markup", context->copyright );
519                         if (context->comment != NULL)
520                                 mlt_properties_set(properties, "meta.attr.comment.markup", context->comment );
521                         if (context->album != NULL)
522                                 mlt_properties_set(properties, "meta.attr.album.markup", context->album );
523                         if (context->year != 0)
524                                 mlt_properties_set_int(properties, "meta.attr.year.markup", context->year );
525                         if (context->track != 0)
526                                 mlt_properties_set_int(properties, "meta.attr.track.markup", context->track );
527
528                         // We're going to cheat here - for a/v files, we will have two contexts (reasoning will be clear later)
529                         if ( av == 0 && audio_index != -1 && video_index != -1 )
530                         {
531                                 // We'll use the open one as our video_context
532                                 mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL );
533
534                                 // And open again for our audio context
535                                 av_open_input_file( &context, file, NULL, 0, NULL );
536                                 av_find_stream_info( context );
537
538                                 // Audio context
539                                 mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL );
540                         }
541                         else if ( av != 2 && video_index != -1 )
542                         {
543                                 // We only have a video context
544                                 mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL );
545                         }
546                         else if ( audio_index != -1 )
547                         {
548                                 // We only have an audio context
549                                 mlt_properties_set_data( properties, "audio_context", context, 0, producer_file_close, NULL );
550                         }
551                         else
552                         {
553                                 // Something has gone wrong
554                                 error = -1;
555                         }
556
557                         mlt_properties_set_int( properties, "av_bypass", av_bypass );
558                 }
559         }
560
561         // Unlock the mutex now
562         avformat_unlock( );
563
564         return error;
565 }
566
567 /** Convert a frame position to a time code.
568 */
569
570 static double producer_time_of_frame( mlt_producer this, mlt_position position )
571 {
572         return ( double )position / mlt_producer_get_fps( this );
573 }
574
575 static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt, mlt_image_format *format, int width, int height )
576 {
577 #ifdef SWSCALE
578         if ( pix_fmt == PIX_FMT_RGB32 )
579         {
580                 *format = mlt_image_rgb24a;
581                 struct SwsContext *context = sws_getContext( width, height, pix_fmt,
582                         width, height, PIX_FMT_RGBA, SWS_FAST_BILINEAR, NULL, NULL, NULL);
583                 AVPicture output;
584                 avpicture_fill( &output, buffer, PIX_FMT_RGBA, width, height );
585                 sws_scale( context, frame->data, frame->linesize, 0, height,
586                         output.data, output.linesize);
587                 sws_freeContext( context );
588         }
589         else if ( *format == mlt_image_yuv420p )
590         {
591                 struct SwsContext *context = sws_getContext( width, height, pix_fmt,
592                         width, height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
593                 AVPicture output;
594                 output.data[0] = buffer;
595                 output.data[1] = buffer + width * height;
596                 output.data[2] = buffer + ( 3 * width * height ) / 2;
597                 output.linesize[0] = width;
598                 output.linesize[1] = width >> 1;
599                 output.linesize[2] = width >> 1;
600                 sws_scale( context, frame->data, frame->linesize, 0, height,
601                         output.data, output.linesize);
602                 sws_freeContext( context );
603         }
604         else if ( *format == mlt_image_rgb24 )
605         {
606                 struct SwsContext *context = sws_getContext( width, height, pix_fmt,
607                         width, height, PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL);
608                 AVPicture output;
609                 avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
610                 sws_scale( context, frame->data, frame->linesize, 0, height,
611                         output.data, output.linesize);
612                 sws_freeContext( context );
613         }
614         else
615         {
616                 struct SwsContext *context = sws_getContext( width, height, pix_fmt,
617                         width, height, PIX_FMT_YUYV422, SWS_FAST_BILINEAR, NULL, NULL, NULL);
618                 AVPicture output;
619                 avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height );
620                 sws_scale( context, frame->data, frame->linesize, 0, height,
621                         output.data, output.linesize);
622                 sws_freeContext( context );
623         }
624 #else
625         if ( *format == mlt_image_yuv420p )
626         {
627                 AVPicture pict;
628                 pict.data[0] = buffer;
629                 pict.data[1] = buffer + width * height;
630                 pict.data[2] = buffer + ( 3 * width * height ) / 2;
631                 pict.linesize[0] = width;
632                 pict.linesize[1] = width >> 1;
633                 pict.linesize[2] = width >> 1;
634                 img_convert( &pict, PIX_FMT_YUV420P, (AVPicture *)frame, pix_fmt, width, height );
635         }
636         else if ( *format == mlt_image_rgb24 )
637         {
638                 AVPicture output;
639                 avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
640                 img_convert( &output, PIX_FMT_RGB24, (AVPicture *)frame, pix_fmt, width, height );
641         }
642         else
643         {
644                 AVPicture output;
645                 avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height );
646                 img_convert( &output, PIX_FMT_YUYV422, (AVPicture *)frame, pix_fmt, width, height );
647         }
648 #endif
649 }
650
651 /** Allocate the image buffer and set it on the frame.
652 */
653
654 static int allocate_buffer( mlt_properties frame_properties, AVCodecContext *codec_context, uint8_t **buffer, mlt_image_format *format, int *width, int *height )
655 {
656         int size = 0;
657
658         if ( codec_context->width == 0 || codec_context->height == 0 )
659                 return size;
660
661         *width = codec_context->width;
662         *height = codec_context->height;
663         mlt_properties_set_int( frame_properties, "width", *width );
664         mlt_properties_set_int( frame_properties, "height", *height );
665
666         if ( codec_context->pix_fmt == PIX_FMT_RGB32 )
667                 size = *width * ( *height + 1 ) * 4;
668         else switch ( *format )
669         {
670                 case mlt_image_yuv420p:
671                         size = *width * 3 * ( *height + 1 ) / 2;
672                         break;
673                 case mlt_image_rgb24:
674                         size = *width * ( *height + 1 ) * 3;
675                         break;
676                 default:
677                         *format = mlt_image_yuv422;
678                         size = *width * ( *height + 1 ) * 2;
679                         break;
680         }
681
682         // Construct the output image
683         *buffer = mlt_pool_alloc( size );
684         if ( *buffer )
685                 mlt_properties_set_data( frame_properties, "image", *buffer, size, (mlt_destructor)mlt_pool_release, NULL );
686         else
687                 size = 0;
688
689         return size;
690 }
691
692 /** Get an image from a frame.
693 */
694
695 static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_format *format, int *width, int *height, int writable )
696 {
697         // Get the properties from the frame
698         mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
699
700         // Obtain the frame number of this frame
701         mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" );
702
703         // Get the producer
704         mlt_producer this = mlt_properties_get_data( frame_properties, "avformat_producer", NULL );
705
706         // Get the producer properties
707         mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
708
709         // Fetch the video_context
710         AVFormatContext *context = mlt_properties_get_data( properties, "video_context", NULL );
711
712         // Get the video_index
713         int index = mlt_properties_get_int( properties, "video_index" );
714
715         // Obtain the expected frame numer
716         mlt_position expected = mlt_properties_get_position( properties, "_video_expected" );
717
718         // Get the video stream
719         AVStream *stream = context->streams[ index ];
720
721         // Get codec context
722         AVCodecContext *codec_context = stream->codec;
723
724         // Packet
725         AVPacket pkt;
726
727         // Get the conversion frame
728         AVFrame *av_frame = mlt_properties_get_data( properties, "av_frame", NULL );
729
730         // Special case pause handling flag
731         int paused = 0;
732
733         // Special case ffwd handling
734         int ignore = 0;
735
736         // We may want to use the source fps if available
737         double source_fps = mlt_properties_get_double( properties, "source_fps" );
738         double fps = mlt_producer_get_fps( this );
739
740         // This is the physical frame position in the source
741         int req_position = ( int )( position / fps * source_fps + 0.5 );
742
743         // Get the seekable status
744         int seekable = mlt_properties_get_int( properties, "seekable" );
745
746         // Hopefully provide better support for streams...
747         int av_bypass = mlt_properties_get_int( properties, "av_bypass" );
748
749         // Determines if we have to decode all frames in a sequence
750         int must_decode = 1;
751
752         // Temporary hack to improve intra frame only
753         must_decode = strcmp( codec_context->codec->name, "dnxhd" ) &&
754                                   strcmp( codec_context->codec->name, "dvvideo" ) &&
755                                   strcmp( codec_context->codec->name, "huffyuv" ) &&
756                                   strcmp( codec_context->codec->name, "mjpeg" ) &&
757                                   strcmp( codec_context->codec->name, "rawvideo" );
758
759         int last_position = mlt_properties_get_int( properties, "_last_position" );
760
761         // Seek if necessary
762         if ( position != expected || last_position == -1 )
763         {
764                 if ( av_frame != NULL && position + 1 == expected )
765                 {
766                         // We're paused - use last image
767                         paused = 1;
768                 }
769                 else if ( !seekable && position > expected && ( position - expected ) < 250 )
770                 {
771                         // Fast forward - seeking is inefficient for small distances - just ignore following frames
772                         ignore = ( int )( ( position - expected ) / fps * source_fps );
773                 }
774                 else if ( seekable && ( position < expected || position - expected >= 12 || last_position == -1 ) )
775                 {
776                         // Calculate the timestamp for the requested frame
777                         int64_t timestamp = ( int64_t )( ( double )req_position / source_fps * AV_TIME_BASE + 0.5 );
778                         if ( ( uint64_t )context->start_time != AV_NOPTS_VALUE )
779                                 timestamp += context->start_time;
780                         if ( must_decode )
781                                 timestamp -= AV_TIME_BASE;
782                         if ( timestamp < 0 )
783                                 timestamp = 0;
784
785                         // Set to the timestamp
786                         mlt_log_debug( MLT_PRODUCER_SERVICE( this ), "seeking timestamp %lld position %d expected %d last_pos %d\n",
787                                 timestamp, position, expected, last_position );
788                         av_seek_frame( context, -1, timestamp, AVSEEK_FLAG_BACKWARD );
789
790                         // Remove the cached info relating to the previous position
791                         mlt_properties_set_int( properties, "_current_position", -1 );
792                         mlt_properties_set_int( properties, "_last_position", -1 );
793                         mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL );
794                         av_frame = NULL;
795                 }
796         }
797
798         // Duplicate the last image if necessary (see comment on rawvideo below)
799         int current_position = mlt_properties_get_int( properties, "_current_position" );
800         int got_picture = mlt_properties_get_int( properties, "_got_picture" );
801         if ( av_frame != NULL && got_picture && ( paused || current_position >= req_position ) && av_bypass == 0 )
802         {
803                 // Duplicate it
804                 if ( allocate_buffer( frame_properties, codec_context, buffer, format, width, height ) )
805                         convert_image( av_frame, *buffer, codec_context->pix_fmt, format, *width, *height );
806                 else
807                         mlt_frame_get_image( frame, buffer, format, width, height, writable );
808         }
809         else
810         {
811                 int ret = 0;
812                 int int_position = 0;
813                 got_picture = 0;
814
815                 av_init_packet( &pkt );
816
817                 // Construct an AVFrame for YUV422 conversion
818                 if ( av_frame == NULL )
819                         av_frame = avcodec_alloc_frame( );
820
821                 while( ret >= 0 && !got_picture )
822                 {
823                         // Read a packet
824                         ret = av_read_frame( context, &pkt );
825
826                         // We only deal with video from the selected video_index
827                         if ( ret >= 0 && pkt.stream_index == index && pkt.size > 0 )
828                         {
829                                 // Determine time code of the packet
830                                 if (pkt.dts != AV_NOPTS_VALUE)
831                                 {
832                                         int_position = ( int )( av_q2d( stream->time_base ) * pkt.dts * source_fps + 0.5 );
833                                         if ( context->start_time != AV_NOPTS_VALUE )
834                                                 int_position -= ( int )( context->start_time * source_fps / AV_TIME_BASE + 0.5 );
835                                         last_position = mlt_properties_get_int( properties, "_last_position" );
836                                         if ( int_position == last_position )
837                                                 int_position = last_position + 1;
838                                 }
839                                 else
840                                 {
841                                         int_position = req_position;
842                                 }
843                                 // Make a dumb assumption on streams that contain wild timestamps
844                                 if ( (unsigned) req_position - (unsigned) int_position > 999 )
845                                         int_position = req_position;
846                                 mlt_properties_set_int( properties, "_last_position", int_position );
847
848                                 // Decode the image
849                                 if ( must_decode || int_position >= req_position )
850                                         ret = avcodec_decode_video( codec_context, av_frame, &got_picture, pkt.data, pkt.size );
851
852                                 if ( got_picture )
853                                 {
854                                         // Handle ignore
855                                         if ( int_position < req_position )
856                                         {
857                                                 ignore = 0;
858                                                 got_picture = 0;
859                                         }
860                                         else if ( int_position >= req_position )
861                                         {
862                                                 ignore = 0;
863                                         }
864                                         else if ( ignore -- )
865                                         {
866                                                 got_picture = 0;
867                                         }
868                                 }
869                                 mlt_log_debug( MLT_PRODUCER_SERVICE(this), "pkt.dts %llu req_pos %d cur_pos %d pkt_pos %d got_pic %d key %d\n",
870                                         pkt.dts, req_position, current_position, int_position, got_picture, pkt.flags & PKT_FLAG_KEY );
871                                 av_free_packet( &pkt );
872                         }
873                         else if ( ret >= 0 )
874                         {
875                                 av_free_packet( &pkt );
876                         }
877
878                         // Now handle the picture if we have one
879                         if ( got_picture )
880                         {
881                                 if ( allocate_buffer( frame_properties, codec_context, buffer, format, width, height ) )
882                                 {
883                                         convert_image( av_frame, *buffer, codec_context->pix_fmt, format, *width, *height );
884                                         mlt_properties_set_int( frame_properties, "progressive", !av_frame->interlaced_frame );
885                                         mlt_properties_set_int( properties, "top_field_first", av_frame->top_field_first );
886                                         mlt_properties_set_int( properties, "_current_position", int_position );
887                                         mlt_properties_set_int( properties, "_got_picture", 1 );
888                                         mlt_properties_set_data( properties, "av_frame", av_frame, 0, av_free, NULL );
889                                 }
890                                 else
891                                 {
892                                         got_picture = 0;
893                                 }
894                         }
895                 }
896                 if ( !got_picture )
897                         mlt_frame_get_image( frame, buffer, format, width, height, writable );
898         }
899
900         // Very untidy - for rawvideo, the packet contains the frame, hence the free packet
901         // above will break the pause behaviour - so we wipe the frame now
902         if ( !strcmp( codec_context->codec->name, "rawvideo" ) )
903                 mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL );
904
905         // Set the field order property for this frame
906         mlt_properties_set_int( frame_properties, "top_field_first", mlt_properties_get_int( properties, "top_field_first" ) );
907
908         // Regardless of speed, we expect to get the next frame (cos we ain't too bright)
909         mlt_properties_set_position( properties, "_video_expected", position + 1 );
910
911         return 0;
912 }
913
914 /** Process properties as AVOptions and apply to AV context obj
915 */
916
917 static void apply_properties( void *obj, mlt_properties properties, int flags )
918 {
919         int i;
920         int count = mlt_properties_count( properties );
921         for ( i = 0; i < count; i++ )
922         {
923                 const char *opt_name = mlt_properties_get_name( properties, i );
924                 const AVOption *opt = av_find_opt( obj, opt_name, NULL, flags, flags );
925                 if ( opt != NULL )
926 #if LIBAVCODEC_VERSION_INT >= ((52<<16)+(7<<8)+0)
927                         av_set_string3( obj, opt_name, mlt_properties_get( properties, opt_name), 0, NULL );
928 #elif LIBAVCODEC_VERSION_INT >= ((51<<16)+(59<<8)+0)
929                         av_set_string2( obj, opt_name, mlt_properties_get( properties, opt_name), 0 );
930 #else
931                         av_set_string( obj, opt_name, mlt_properties_get( properties, opt_name) );
932 #endif
933         }
934 }
935
936 /** Set up video handling.
937 */
938
939 static void producer_set_up_video( mlt_producer this, mlt_frame frame )
940 {
941         // Get the properties
942         mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
943
944         // Fetch the video_context
945         AVFormatContext *context = mlt_properties_get_data( properties, "video_context", NULL );
946
947         // Get the video_index
948         int index = mlt_properties_get_int( properties, "video_index" );
949
950         // Reopen the file if necessary
951         if ( !context && index > -1 )
952         {
953                 mlt_events_block( properties, this );
954                 producer_open( this, mlt_service_profile( MLT_PRODUCER_SERVICE(this) ),
955                         mlt_properties_get( properties, "resource" ) );
956                 context = mlt_properties_get_data( properties, "video_context", NULL );
957                 mlt_properties_set_data( properties, "dummy_context", NULL, 0, NULL, NULL );
958                 mlt_events_unblock( properties, this );
959
960                 // Process properties as AVOptions
961                 apply_properties( context, properties, AV_OPT_FLAG_DECODING_PARAM );
962         }
963
964         // Exception handling for video_index
965         if ( context && index >= (int) context->nb_streams )
966         {
967                 // Get the last video stream
968                 for ( index = context->nb_streams - 1; index >= 0 && context->streams[ index ]->codec->codec_type != CODEC_TYPE_VIDEO; --index );
969                 mlt_properties_set_int( properties, "video_index", index );
970         }
971         if ( context && index > -1 && context->streams[ index ]->codec->codec_type != CODEC_TYPE_VIDEO )
972         {
973                 // Invalidate the video stream
974                 index = -1;
975                 mlt_properties_set_int( properties, "video_index", index );
976         }
977
978         // Get the frame properties
979         mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
980
981         if ( context && index > -1 )
982         {
983                 // Get the video stream
984                 AVStream *stream = context->streams[ index ];
985
986                 // Get codec context
987                 AVCodecContext *codec_context = stream->codec;
988
989                 // Get the codec
990                 AVCodec *codec = mlt_properties_get_data( properties, "video_codec", NULL );
991
992                 // Update the video properties if the index changed
993                 if ( index != mlt_properties_get_int( properties, "_video_index" ) )
994                 {
995                         // Reset the video properties if the index changed
996                         mlt_properties_set_int( properties, "_video_index", index );
997                         mlt_properties_set_data( properties, "video_codec", NULL, 0, NULL, NULL );
998                         mlt_properties_set_int( properties, "width", codec_context->width );
999                         mlt_properties_set_int( properties, "height", codec_context->height );
1000                         // TODO: get the first usable AVPacket and reset the stream position
1001                         mlt_properties_set_double( properties, "aspect_ratio",
1002                                 get_aspect_ratio( context->streams[ index ], codec_context, NULL ) );
1003                         codec = NULL;
1004                 }
1005
1006                 // Initialise the codec if necessary
1007                 if ( codec == NULL )
1008                 {
1009                         // Initialise multi-threading
1010                         int thread_count = mlt_properties_get_int( properties, "threads" );
1011                         if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) )
1012                                 thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) );
1013                         if ( thread_count > 1 )
1014                         {
1015                                 avcodec_thread_init( codec_context, thread_count );
1016                                 codec_context->thread_count = thread_count;
1017                         }
1018
1019                         // Find the codec
1020                         codec = avcodec_find_decoder( codec_context->codec_id );
1021
1022                         // If we don't have a codec and we can't initialise it, we can't do much more...
1023                         avformat_lock( );
1024                         if ( codec != NULL && avcodec_open( codec_context, codec ) >= 0 )
1025                         {
1026                                 // Now store the codec with its destructor
1027                                 mlt_properties_set_data( properties, "video_codec", codec_context, 0, producer_codec_close, NULL );
1028                         }
1029                         else
1030                         {
1031                                 // Remember that we can't use this later
1032                                 mlt_properties_set_int( properties, "video_index", -1 );
1033                                 index = -1;
1034                         }
1035                         avformat_unlock( );
1036
1037                         // Process properties as AVOptions
1038                         apply_properties( codec_context, properties, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
1039                 }
1040
1041                 // No codec, no show...
1042                 if ( codec && index > -1 )
1043                 {
1044                         double source_fps = 0;
1045                         double force_aspect_ratio = mlt_properties_get_double( properties, "force_aspect_ratio" );
1046                         double aspect_ratio = ( force_aspect_ratio > 0.0 ) ?
1047                                 force_aspect_ratio : mlt_properties_get_double( properties, "aspect_ratio" );
1048
1049                         // Determine the fps
1050                         source_fps = ( double )codec_context->time_base.den / ( codec_context->time_base.num == 0 ? 1 : codec_context->time_base.num );
1051
1052                         // We'll use fps if it's available
1053                         if ( source_fps > 0 )
1054                                 mlt_properties_set_double( properties, "source_fps", source_fps );
1055                         else
1056                                 mlt_properties_set_double( properties, "source_fps", mlt_producer_get_fps( this ) );
1057                         mlt_properties_set_double( properties, "aspect_ratio", aspect_ratio );
1058
1059                         // Set the width and height
1060                         mlt_properties_set_int( frame_properties, "width", codec_context->width );
1061                         mlt_properties_set_int( frame_properties, "height", codec_context->height );
1062                         mlt_properties_set_int( frame_properties, "real_width", codec_context->width );
1063                         mlt_properties_set_int( frame_properties, "real_height", codec_context->height );
1064                         mlt_properties_set_double( frame_properties, "aspect_ratio", aspect_ratio );
1065
1066                         mlt_frame_push_get_image( frame, producer_get_image );
1067                         mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL );
1068                 }
1069                 else
1070                 {
1071                         mlt_properties_set_int( frame_properties, "test_image", 1 );
1072                 }
1073         }
1074         else
1075         {
1076                 mlt_properties_set_int( frame_properties, "test_image", 1 );
1077         }
1078 }
1079
1080 /** Get the audio from a frame.
1081 */
1082
1083 static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
1084 {
1085         // Get the properties from the frame
1086         mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
1087
1088         // Obtain the frame number of this frame
1089         mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" );
1090
1091         // Get the producer
1092         mlt_producer this = mlt_properties_get_data( frame_properties, "avformat_producer", NULL );
1093
1094         // Get the producer properties
1095         mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
1096
1097         // Fetch the audio_context
1098         AVFormatContext *context = mlt_properties_get_data( properties, "audio_context", NULL );
1099
1100         // Get the audio_index
1101         int index = mlt_properties_get_int( properties, "audio_index" );
1102
1103         // Get the seekable status
1104         int seekable = mlt_properties_get_int( properties, "seekable" );
1105
1106         // Obtain the expected frame numer
1107         mlt_position expected = mlt_properties_get_position( properties, "_audio_expected" );
1108
1109         // Obtain the resample context if it exists (not always needed)
1110         ReSampleContext *resample = mlt_properties_get_data( properties, "audio_resample", NULL );
1111
1112         // Obtain the audio buffers
1113         int16_t *audio_buffer = mlt_properties_get_data( properties, "audio_buffer", NULL );
1114         int16_t *decode_buffer = mlt_properties_get_data( properties, "decode_buffer", NULL );
1115
1116         // Get amount of audio used
1117         int audio_used =  mlt_properties_get_int( properties, "_audio_used" );
1118
1119         // Calculate the real time code
1120         double real_timecode = producer_time_of_frame( this, position );
1121
1122         // Get the audio stream
1123         AVStream *stream = context->streams[ index ];
1124
1125         // Get codec context
1126         AVCodecContext *codec_context = stream->codec;
1127
1128         // Packet
1129         AVPacket pkt;
1130
1131         // Number of frames to ignore (for ffwd)
1132         int ignore = 0;
1133
1134         // Flag for paused (silence)
1135         int paused = 0;
1136
1137         // Check for resample and create if necessary
1138         if ( resample == NULL && codec_context->channels <= 2 )
1139         {
1140                 // Create the resampler
1141 #if (LIBAVCODEC_VERSION_INT >= ((52<<16)+(15<<8)+0))
1142                 resample = av_audio_resample_init( *channels, codec_context->channels, *frequency, codec_context->sample_rate,
1143                         SAMPLE_FMT_S16, codec_context->sample_fmt, 16, 10, 0, 0.8 );
1144 #else
1145                 resample = audio_resample_init( *channels, codec_context->channels, *frequency, codec_context->sample_rate );
1146 #endif
1147
1148                 // And store it on properties
1149                 mlt_properties_set_data( properties, "audio_resample", resample, 0, ( mlt_destructor )audio_resample_close, NULL );
1150         }
1151         else if ( resample == NULL )
1152         {
1153                 // TODO: uncomment and remove following line when full multi-channel support is ready
1154                 // *channels = codec_context->channels;
1155                 codec_context->request_channels = *channels;
1156
1157                 *frequency = codec_context->sample_rate;
1158         }
1159
1160         // Check for audio buffer and create if necessary
1161         if ( audio_buffer == NULL )
1162         {
1163                 // Allocate the audio buffer
1164                 audio_buffer = mlt_pool_alloc( AVCODEC_MAX_AUDIO_FRAME_SIZE * sizeof( int16_t ) );
1165
1166                 // And store it on properties for reuse
1167                 mlt_properties_set_data( properties, "audio_buffer", audio_buffer, 0, ( mlt_destructor )mlt_pool_release, NULL );
1168         }
1169
1170         // Check for decoder buffer and create if necessary
1171         if ( decode_buffer == NULL )
1172         {
1173                 // Allocate the audio buffer
1174                 decode_buffer = av_malloc( AVCODEC_MAX_AUDIO_FRAME_SIZE * sizeof( int16_t ) );
1175
1176                 // And store it on properties for reuse
1177                 mlt_properties_set_data( properties, "decode_buffer", decode_buffer, 0, ( mlt_destructor )av_free, NULL );
1178         }
1179
1180         // Seek if necessary
1181         if ( position != expected )
1182         {
1183                 if ( position + 1 == expected )
1184                 {
1185                         // We're paused - silence required
1186                         paused = 1;
1187                 }
1188                 else if ( !seekable && position > expected && ( position - expected ) < 250 )
1189                 {
1190                         // Fast forward - seeking is inefficient for small distances - just ignore following frames
1191                         ignore = position - expected;
1192                 }
1193                 else if ( position < expected || position - expected >= 12 )
1194                 {
1195                         int64_t timestamp = ( int64_t )( real_timecode * AV_TIME_BASE + 0.5 );
1196                         if ( context->start_time != AV_NOPTS_VALUE )
1197                                 timestamp += context->start_time;
1198                         if ( timestamp < 0 )
1199                                 timestamp = 0;
1200
1201                         // Set to the real timecode
1202                         if ( av_seek_frame( context, -1, timestamp, AVSEEK_FLAG_BACKWARD ) != 0 )
1203                                 paused = 1;
1204
1205                         // Clear the usage in the audio buffer
1206                         audio_used = 0;
1207                 }
1208         }
1209
1210         // Get the audio if required
1211         if ( !paused )
1212         {
1213                 int ret = 0;
1214                 int got_audio = 0;
1215
1216                 av_init_packet( &pkt );
1217
1218                 while( ret >= 0 && !got_audio )
1219                 {
1220                         // Check if the buffer already contains the samples required
1221                         if ( audio_used >= *samples && ignore == 0 )
1222                         {
1223                                 got_audio = 1;
1224                                 break;
1225                         }
1226
1227                         // Read a packet
1228                         ret = av_read_frame( context, &pkt );
1229
1230                         int len = pkt.size;
1231                         uint8_t *ptr = pkt.data;
1232
1233                         // We only deal with audio from the selected audio_index
1234                         while ( ptr != NULL && ret >= 0 && pkt.stream_index == index && len > 0 )
1235                         {
1236                                 int data_size = sizeof( int16_t ) * AVCODEC_MAX_AUDIO_FRAME_SIZE;
1237
1238                                 // Decode the audio
1239 #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(29<<8)+0))
1240                                 ret = avcodec_decode_audio2( codec_context, decode_buffer, &data_size, ptr, len );
1241 #else
1242                                 ret = avcodec_decode_audio( codec_context, decode_buffer, &data_size, ptr, len );
1243 #endif
1244                                 if ( ret < 0 )
1245                                 {
1246                                         ret = 0;
1247                                         break;
1248                                 }
1249
1250                                 len -= ret;
1251                                 ptr += ret;
1252
1253                                 if ( data_size > 0 && ( audio_used * *channels + data_size < AVCODEC_MAX_AUDIO_FRAME_SIZE ) )
1254                                 {
1255                                         if ( resample )
1256                                         {
1257                                                 int16_t *source = decode_buffer;
1258                                                 int16_t *dest = &audio_buffer[ audio_used * *channels ];
1259                                                 int convert_samples = data_size / av_get_bits_per_sample_format( codec_context->sample_fmt ) * 8 / codec_context->channels;
1260
1261                                                 audio_used += audio_resample( resample, dest, source, convert_samples );
1262                                         }
1263                                         else
1264                                         {
1265                                                 memcpy( &audio_buffer[ audio_used * *channels ], decode_buffer, data_size );
1266                                                 audio_used += data_size / *channels / av_get_bits_per_sample_format( codec_context->sample_fmt ) * 8;
1267                                         }
1268
1269                                         // Handle ignore
1270                                         while ( ignore && audio_used > *samples )
1271                                         {
1272                                                 ignore --;
1273                                                 audio_used -= *samples;
1274                                                 memmove( audio_buffer, &audio_buffer[ *samples * *channels ], audio_used * sizeof( int16_t ) );
1275                                         }
1276                                 }
1277
1278                                 // If we're behind, ignore this packet
1279                                 if ( pkt.pts >= 0 )
1280                                 {
1281                                         double current_pts = av_q2d( stream->time_base ) * pkt.pts;
1282                                         double source_fps = mlt_properties_get_double( properties, "source_fps" );
1283                                         int req_position = ( int )( real_timecode * source_fps + 0.5 );
1284                                         int int_position = ( int )( current_pts * source_fps + 0.5 );
1285
1286                                         if ( context->start_time != AV_NOPTS_VALUE )
1287                                                 int_position -= ( int )( context->start_time * source_fps / AV_TIME_BASE + 0.5 );
1288                                         if ( seekable && !ignore && int_position < req_position )
1289                                                 ignore = 1;
1290                                 }
1291                         }
1292
1293                         // We're finished with this packet regardless
1294                         av_free_packet( &pkt );
1295                 }
1296
1297                 *buffer = mlt_pool_alloc( *samples * *channels * sizeof( int16_t ) );
1298                 mlt_properties_set_data( frame_properties, "audio", *buffer, 0, ( mlt_destructor )mlt_pool_release, NULL );
1299
1300                 // Now handle the audio if we have enough
1301                 if ( audio_used >= *samples )
1302                 {
1303                         memcpy( *buffer, audio_buffer, *samples * *channels * sizeof( int16_t ) );
1304                         audio_used -= *samples;
1305                         memmove( audio_buffer, &audio_buffer[ *samples * *channels ], audio_used * *channels * sizeof( int16_t ) );
1306                 }
1307                 else
1308                 {
1309                         memset( *buffer, 0, *samples * *channels * sizeof( int16_t ) );
1310                 }
1311
1312                 // Store the number of audio samples still available
1313                 mlt_properties_set_int( properties, "_audio_used", audio_used );
1314         }
1315         else
1316         {
1317                 // Get silence and don't touch the context
1318                 mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
1319         }
1320
1321         // Regardless of speed (other than paused), we expect to get the next frame
1322         if ( !paused )
1323                 mlt_properties_set_position( properties, "_audio_expected", position + 1 );
1324
1325         return 0;
1326 }
1327
1328 /** Set up audio handling.
1329 */
1330
1331 static void producer_set_up_audio( mlt_producer this, mlt_frame frame )
1332 {
1333         // Get the properties
1334         mlt_properties properties = MLT_PRODUCER_PROPERTIES( this );
1335
1336         // Fetch the audio_context
1337         AVFormatContext *context = mlt_properties_get_data( properties, "audio_context", NULL );
1338
1339         // Get the audio_index
1340         int index = mlt_properties_get_int( properties, "audio_index" );
1341
1342         // Reopen the file if necessary
1343         if ( !context && index > -1 )
1344         {
1345                 mlt_events_block( properties, this );
1346                 producer_open( this, mlt_service_profile( MLT_PRODUCER_SERVICE(this) ),
1347                         mlt_properties_get( properties, "resource" ) );
1348                 context = mlt_properties_get_data( properties, "audio_context", NULL );
1349                 mlt_properties_set_data( properties, "dummy_context", NULL, 0, NULL, NULL );
1350                 mlt_events_unblock( properties, this );
1351         }
1352
1353         // Exception handling for audio_index
1354         if ( context && index >= (int) context->nb_streams )
1355         {
1356                 for ( index = context->nb_streams - 1; index >= 0 && context->streams[ index ]->codec->codec_type != CODEC_TYPE_AUDIO; --index );
1357                 mlt_properties_set_int( properties, "audio_index", index );
1358         }
1359         if ( context && index > -1 && context->streams[ index ]->codec->codec_type != CODEC_TYPE_AUDIO )
1360         {
1361                 index = -1;
1362                 mlt_properties_set_int( properties, "audio_index", index );
1363         }
1364
1365         // Update the audio properties if the index changed
1366         if ( index > -1 && index != mlt_properties_get_int( properties, "_audio_index" ) )
1367         {
1368                 mlt_properties_set_int( properties, "_audio_index", index );
1369                 mlt_properties_set_data( properties, "audio_codec", NULL, 0, NULL, NULL );
1370         }
1371
1372         // Deal with audio context
1373         if ( context != NULL && index > -1 )
1374         {
1375                 // Get the frame properties
1376                 mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
1377
1378                 // Get the audio stream
1379                 AVStream *stream = context->streams[ index ];
1380
1381                 // Get codec context
1382                 AVCodecContext *codec_context = stream->codec;
1383
1384                 // Get the codec
1385                 AVCodec *codec = mlt_properties_get_data( properties, "audio_codec", NULL );
1386
1387                 // Initialise the codec if necessary
1388                 if ( codec == NULL )
1389                 {
1390                         // Find the codec
1391                         codec = avcodec_find_decoder( codec_context->codec_id );
1392
1393                         // If we don't have a codec and we can't initialise it, we can't do much more...
1394                         avformat_lock( );
1395                         if ( codec != NULL && avcodec_open( codec_context, codec ) >= 0 )
1396                         {
1397                                 // Now store the codec with its destructor
1398                                 mlt_properties_set_data( properties, "audio_codec", codec_context, 0, producer_codec_close, NULL );
1399
1400                         }
1401                         else
1402                         {
1403                                 // Remember that we can't use this later
1404                                 mlt_properties_set_int( properties, "audio_index", -1 );
1405                                 index = -1;
1406                         }
1407                         avformat_unlock( );
1408
1409                         // Process properties as AVOptions
1410                         apply_properties( codec_context, properties, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
1411                 }
1412
1413                 // No codec, no show...
1414                 if ( codec && index > -1 )
1415                 {
1416                         mlt_frame_push_audio( frame, producer_get_audio );
1417                         mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL );
1418                         mlt_properties_set_int( frame_properties, "frequency", codec_context->sample_rate );
1419                         mlt_properties_set_int( frame_properties, "channels", codec_context->channels );
1420                 }
1421         }
1422 }
1423
1424 /** Our get frame implementation.
1425 */
1426
1427 static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index )
1428 {
1429         // Create an empty frame
1430         *frame = mlt_frame_init( MLT_PRODUCER_SERVICE( this ) );
1431
1432         // Update timecode on the frame we're creating
1433         mlt_frame_set_position( *frame, mlt_producer_position( this ) );
1434
1435         // Set the position of this producer
1436         mlt_properties_set_position( MLT_FRAME_PROPERTIES( *frame ), "avformat_position", mlt_producer_frame( this ) );
1437
1438         // Set up the video
1439         producer_set_up_video( this, *frame );
1440
1441         // Set up the audio
1442         producer_set_up_audio( this, *frame );
1443
1444         // Set the aspect_ratio
1445         mlt_properties_set_double( MLT_FRAME_PROPERTIES( *frame ), "aspect_ratio", mlt_properties_get_double( MLT_PRODUCER_PROPERTIES( this ), "aspect_ratio" ) );
1446
1447         // Calculate the next timecode
1448         mlt_producer_prepare_next( this );
1449
1450         return 0;
1451 }