]> git.sesse.net Git - mlt/blobdiff - src/modules/avformat/producer_avformat.c
Fix possible divide by zero exceptions.
[mlt] / src / modules / avformat / producer_avformat.c
index 2254ad1d06bd805ea43d910a058c7f4d426b64da..aafed345c6dddaa35cacbd3a89b34a471c6c1aac 100644 (file)
@@ -79,7 +79,7 @@ const char *avcodec_get_sample_fmt_name(int sample_fmt);
 #define POSITION_INITIAL (-2)
 #define POSITION_INVALID (-1)
 
-#define MAX_AUDIO_STREAMS (10)
+#define MAX_AUDIO_STREAMS (32)
 #define MAX_VDPAU_SURFACES (10)
 
 struct producer_avformat_s
@@ -91,6 +91,7 @@ struct producer_avformat_s
        AVCodecContext *audio_codec[ MAX_AUDIO_STREAMS ];
        AVCodecContext *video_codec;
        AVFrame *av_frame;
+       AVPacket pkt;
        ReSampleContext *audio_resample[ MAX_AUDIO_STREAMS ];
        mlt_position audio_expected;
        mlt_position video_expected;
@@ -112,9 +113,11 @@ struct producer_avformat_s
        int max_channel;
        int max_frequency;
        unsigned int invalid_pts_counter;
+       unsigned int invalid_dts_counter;
        double resample_factor;
        mlt_cache image_cache;
        int colorspace;
+       int full_luma;
        pthread_mutex_t video_mutex;
        pthread_mutex_t audio_mutex;
        mlt_deque apackets;
@@ -1020,8 +1023,19 @@ static void reopen_video( producer_avformat self, mlt_producer producer )
        mlt_service_unlock( MLT_PRODUCER_SERVICE( producer ) );
 }
 
+static int64_t best_pts( producer_avformat self, int64_t pts, int64_t dts )
+{
+       self->invalid_pts_counter += pts == AV_NOPTS_VALUE;
+       self->invalid_dts_counter += dts == AV_NOPTS_VALUE;
+       if ( ( self->invalid_pts_counter <= self->invalid_dts_counter
+                  || dts == AV_NOPTS_VALUE ) && pts != AV_NOPTS_VALUE )
+               return pts;
+       else
+               return dts;
+}
+
 static int seek_video( producer_avformat self, mlt_position position,
-       int64_t req_position, int preseek, int use_pts )
+       int64_t req_position, int preseek )
 {
        mlt_producer producer = self->parent;
        int paused = 0;
@@ -1047,18 +1061,18 @@ static int seek_video( producer_avformat self, mlt_position position,
                if ( self->last_position == POSITION_INITIAL )
                {
                        int ret = 0;
-                       int toscan = 100;
+                       int toscan = 500;
                        AVPacket pkt;
 
                        while ( ret >= 0 && toscan-- > 0 )
                        {
                                ret = av_read_frame( context, &pkt );
-                               if ( ret >= 0 && pkt.stream_index == self->video_index )
+                               if ( ret >= 0 && pkt.stream_index == self->video_index && ( pkt.flags & PKT_FLAG_KEY ) )
                                {
                                        mlt_log_debug( MLT_PRODUCER_SERVICE(producer),
                                                "first_pts %"PRId64" dts %"PRId64" pts_dts_delta %d\n",
                                                pkt.pts, pkt.dts, (int)(pkt.pts - pkt.dts) );
-                                       self->first_pts = use_pts? pkt.pts : pkt.dts;
+                                       self->first_pts = best_pts( self, pkt.pts, pkt.dts );
                                        if ( self->first_pts != AV_NOPTS_VALUE )
                                                toscan = 0;
                                }
@@ -1091,7 +1105,7 @@ static int seek_video( producer_avformat self, mlt_position position,
 
                        // Seek to the timestamp
                        // NOTE: reopen_video is disabled at this time because it is causing trouble with A/V sync.
-                       if ( 1 || use_pts || req_position > 0 || self->last_position <= 0 )
+                       if ( 1 || req_position > 0 || self->last_position <= 0 )
                        {
                                codec_context->skip_loop_filter = AVDISCARD_NONREF;
                                av_seek_frame( context, self->video_index, timestamp, AVSEEK_FLAG_BACKWARD );
@@ -1206,7 +1220,7 @@ static void set_luma_transfer( struct SwsContext *context, int colorspace, int u
 #endif
 }
 
-static mlt_image_format pick_format( enum PixelFormat pix_fmt )
+static mlt_image_format pick_pix_format( enum PixelFormat pix_fmt )
 {
        switch ( pix_fmt )
        {
@@ -1232,11 +1246,35 @@ static mlt_image_format pick_format( enum PixelFormat pix_fmt )
        }
 }
 
-static void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
-       mlt_image_format *format, int width, int height, int colorspace, uint8_t **alpha )
+static mlt_audio_format pick_audio_format( int sample_fmt )
+{
+       switch ( sample_fmt )
+       {
+       // interleaved
+       case AV_SAMPLE_FMT_S16:
+               return mlt_audio_s16;
+       case AV_SAMPLE_FMT_S32:
+               return mlt_audio_s32le;
+       case AV_SAMPLE_FMT_FLT:
+               return mlt_audio_f32le;
+       // planar - this producer converts planar to interleaved
+#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(17<<8)+0)
+       case AV_SAMPLE_FMT_S16P:
+               return mlt_audio_s16;
+       case AV_SAMPLE_FMT_S32P:
+               return mlt_audio_s32le;
+       case AV_SAMPLE_FMT_FLTP:
+               return mlt_audio_f32le;
+#endif
+       default:
+               return mlt_audio_none;
+       }
+}
+
+static void convert_image( producer_avformat self, AVFrame *frame, uint8_t *buffer, int pix_fmt,
+       mlt_image_format *format, int width, int height, uint8_t **alpha )
 {
 #ifdef SWSCALE
-       int full_range = -1;
        int flags = SWS_BICUBIC | SWS_ACCURATE_RND;
 
 #ifdef USE_MMX
@@ -1248,7 +1286,7 @@ static void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
 
        // extract alpha from planar formats
        if ( ( pix_fmt == PIX_FMT_YUVA420P
-#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(35<<8)+101)
+#if defined(FFUDIV) && LIBAVUTIL_VERSION_INT >= ((51<<16)+(35<<8)+101)
                        || pix_fmt == PIX_FMT_YUVA444P
 #endif
                        ) &&
@@ -1276,7 +1314,7 @@ static void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
                output.linesize[0] = width;
                output.linesize[1] = width >> 1;
                output.linesize[2] = width >> 1;
-               set_luma_transfer( context, colorspace, full_range );
+               set_luma_transfer( context, self->colorspace, -1 );
                sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
                        output.data, output.linesize);
                sws_freeContext( context );
@@ -1287,7 +1325,7 @@ static void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
                        width, height, PIX_FMT_RGB24, flags | SWS_FULL_CHR_H_INT, NULL, NULL, NULL);
                AVPicture output;
                avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
-               set_luma_transfer( context, colorspace, full_range );
+               set_luma_transfer( context, self->colorspace, self->full_luma );
                sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
                        output.data, output.linesize);
                sws_freeContext( context );
@@ -1298,7 +1336,7 @@ static void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
                        width, height, PIX_FMT_RGBA, flags | SWS_FULL_CHR_H_INT, NULL, NULL, NULL);
                AVPicture output;
                avpicture_fill( &output, buffer, PIX_FMT_RGBA, width, height );
-               set_luma_transfer( context, colorspace, full_range );
+               set_luma_transfer( context, self->colorspace, self->full_luma );
                sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
                        output.data, output.linesize);
                sws_freeContext( context );
@@ -1309,7 +1347,7 @@ static void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
                        width, height, PIX_FMT_YUYV422, flags | SWS_FULL_CHR_H_INP, NULL, NULL, NULL);
                AVPicture output;
                avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height );
-               set_luma_transfer( context, colorspace, full_range );
+               set_luma_transfer( context, self->colorspace, -1 );
                sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
                        output.data, output.linesize);
                sws_freeContext( context );
@@ -1332,7 +1370,7 @@ static void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
                avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
                img_convert( &output, PIX_FMT_RGB24, (AVPicture *)frame, pix_fmt, width, height );
        }
-       else if ( format == mlt_image_rgb24a || format == mlt_image_opengl )
+       else if ( *format == mlt_image_rgb24a || *format == mlt_image_opengl )
        {
                AVPicture output;
                avpicture_fill( &output, buffer, PIX_FMT_RGB32, width, height );
@@ -1381,7 +1419,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
        mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
 
        // Obtain the frame number of this frame
-       mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" );
+       mlt_position position = mlt_frame_original_position( frame );
 
        // Get the producer properties
        mlt_properties properties = MLT_PRODUCER_PROPERTIES( producer );
@@ -1402,13 +1440,30 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
        int image_size = 0;
 
        // Get the image cache
-       if ( ! self->image_cache && ! mlt_properties_get_int( properties, "noimagecache" ) )
+       if ( ! self->image_cache )
        {
-               self->image_cache = mlt_cache_init();
+               // if cache size supplied by environment variable
+               int cache_supplied = getenv( "MLT_AVFORMAT_CACHE" ) != NULL;
+               int cache_size = cache_supplied? atoi( getenv( "MLT_AVFORMAT_CACHE" ) ) : 0;
+
+               // cache size supplied via property
+               if ( mlt_properties_get( properties, "cache" ) )
+               {
+                       cache_supplied = 1;
+                       cache_size = mlt_properties_get_int( properties, "cache" );
+               }
+               if ( mlt_properties_get_int( properties, "noimagecache" ) )
+                       cache_size = 0;
+               // create cache if not disabled
+               if ( !cache_supplied || cache_size > 0 )
+                       self->image_cache = mlt_cache_init();
+               // set cache size if supplied
+               if ( self->image_cache && cache_supplied )
+                       mlt_cache_set_size( self->image_cache, cache_size );
        }
        if ( self->image_cache )
        {
-               mlt_frame original = mlt_cache_get_frame( self->image_cache, mlt_frame_get_position( frame ) );
+               mlt_frame original = mlt_cache_get_frame( self->image_cache, position );
                if ( original )
                {
                        mlt_properties orig_props = MLT_FRAME_PROPERTIES( original );
@@ -1436,9 +1491,6 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
        }
        // Cache miss
 
-       // Packet
-       AVPacket pkt;
-
        // We may want to use the source fps if available
        double source_fps = mlt_properties_get_double( properties, "meta.media.frame_rate_num" ) /
                mlt_properties_get_double( properties, "meta.media.frame_rate_den" );
@@ -1455,21 +1507,16 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                  strcmp( codec_context->codec->name, "mjpeg" ) &&
                                  strcmp( codec_context->codec->name, "rawvideo" ) );
 
-       // Turn on usage of new seek API and PTS for seeking
-       int use_pts = self->seekable &&
-               codec_context->codec_id == CODEC_ID_H264 && codec_context->has_b_frames;
-       if ( mlt_properties_get( properties, "use_pts" ) )
-               use_pts = mlt_properties_get_int( properties, "use_pts" );
        double delay = mlt_properties_get_double( properties, "video_delay" );
 
        // Seek if necessary
        const char *interp = mlt_properties_get( frame_properties, "rescale.interp" );
        int preseek = must_decode
 #if defined(FFUDIV) && LIBAVFORMAT_VERSION_INT >= ((53<<16)+(24<<8)+2)
-               && ( !use_pts || ( interp && strcmp( interp, "nearest" ) ) )
+               && ( interp && strcmp( interp, "nearest" ) )
 #endif
                && codec_context->has_b_frames;
-       int paused = seek_video( self, position, req_position, preseek, use_pts );
+       int paused = seek_video( self, position, req_position, preseek );
 
        // Seek might have reopened the file
        context = self->video_format;
@@ -1480,7 +1527,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                        codec_context->pix_fmt == PIX_FMT_RGBA ||
                        codec_context->pix_fmt == PIX_FMT_ABGR ||
                        codec_context->pix_fmt == PIX_FMT_BGRA )
-               *format = pick_format( codec_context->pix_fmt );
+               *format = pick_pix_format( codec_context->pix_fmt );
 
        // Duplicate the last image if necessary
        if ( self->av_frame && self->av_frame->linesize[0]
@@ -1502,13 +1549,13 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                picture.linesize[0] = codec_context->width;
                                picture.linesize[1] = codec_context->width / 2;
                                picture.linesize[2] = codec_context->width / 2;
-                               convert_image( (AVFrame*) &picture, *buffer,
-                                       PIX_FMT_YUV420P, format, *width, *height, self->colorspace, &alpha );
+                               convert_image( self, (AVFrame*) &picture, *buffer,
+                                       PIX_FMT_YUV420P, format, *width, *height, &alpha );
                        }
                        else
 #endif
-                       convert_image( self->av_frame, *buffer, codec_context->pix_fmt,
-                               format, *width, *height, self->colorspace, &alpha );
+                       convert_image( self, self->av_frame, *buffer, codec_context->pix_fmt,
+                               format, *width, *height, &alpha );
                        got_picture = 1;
                }
        }
@@ -1518,8 +1565,6 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                int64_t int_position = 0;
                int decode_errors = 0;
 
-               av_init_packet( &pkt );
-
                // Construct an AVFrame for YUV422 conversion
                if ( !self->av_frame )
                        self->av_frame = avcodec_alloc_frame( );
@@ -1527,22 +1572,25 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                while( ret >= 0 && !got_picture )
                {
                        // Read a packet
+                       if ( self->pkt.stream_index == self->video_index )
+                               av_free_packet( &self->pkt );
+                       av_init_packet( &self->pkt );
                        pthread_mutex_lock( &self->packets_mutex );
                        if ( mlt_deque_count( self->vpackets ) )
                        {
                                AVPacket *tmp = (AVPacket*) mlt_deque_pop_front( self->vpackets );
-                               pkt = *tmp;
+                               self->pkt = *tmp;
                                free( tmp );
                        }
                        else
                        {
-                               ret = av_read_frame( context, &pkt );
-                               if ( ret >= 0 && !self->seekable && pkt.stream_index == self->audio_index )
+                               ret = av_read_frame( context, &self->pkt );
+                               if ( ret >= 0 && !self->seekable && self->pkt.stream_index == self->audio_index )
                                {
-                                       if ( !av_dup_packet( &pkt ) )
+                                       if ( !av_dup_packet( &self->pkt ) )
                                        {
                                                AVPacket *tmp = malloc( sizeof(AVPacket) );
-                                               *tmp = pkt;
+                                               *tmp = self->pkt;
                                                mlt_deque_push_back( self->apackets, tmp );
                                        }
                                }
@@ -1550,24 +1598,9 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                        pthread_mutex_unlock( &self->packets_mutex );
 
                        // We only deal with video from the selected video_index
-                       if ( ret >= 0 && pkt.stream_index == self->video_index && pkt.size > 0 )
+                       if ( ret >= 0 && self->pkt.stream_index == self->video_index && self->pkt.size > 0 )
                        {
-                               // Determine time code of the packet
-                               if ( use_pts && pkt.pts == AV_NOPTS_VALUE )
-                               {
-                                       self->invalid_pts_counter++;
-                                       if ( self->invalid_pts_counter > 20 )
-                                       {
-                                               mlt_log_warning( MLT_PRODUCER_SERVICE(producer), "PTS invalid; using DTS instead\n" );
-                                               mlt_properties_set_int( properties, "use_pts", 0 );
-                                               use_pts = 0;
-                                       }
-                               }
-                               else
-                               {
-                                       self->invalid_pts_counter = 0;
-                               }
-                               int64_t pts = ( use_pts && pkt.pts != AV_NOPTS_VALUE )? pkt.pts : pkt.dts;
+                               int64_t pts = best_pts( self, self->pkt.pts, self->pkt.dts );
                                if ( pts != AV_NOPTS_VALUE )
                                {
                                        if ( !self->seekable && self->first_pts == AV_NOPTS_VALUE )
@@ -1582,7 +1615,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                }
                                mlt_log_debug( MLT_PRODUCER_SERVICE(producer),
                                        "V pkt.pts %"PRId64" pkt.dts %"PRId64" req_pos %"PRId64" cur_pos %"PRId64" pkt_pos %"PRId64"\n",
-                                       pkt.pts, pkt.dts, req_position, self->current_position, int_position );
+                                       self->pkt.pts, self->pkt.dts, req_position, self->current_position, int_position );
 
                                // Make a dumb assumption on streams that contain wild timestamps
                                if ( abs( req_position - int_position ) > 999 )
@@ -1609,9 +1642,9 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                        if ( int_position >= req_position )
                                                codec_context->skip_loop_filter = AVDISCARD_NONE;
 #if (LIBAVCODEC_VERSION_INT >= ((52<<16)+(26<<8)+0))
-                                       ret = avcodec_decode_video2( codec_context, self->av_frame, &got_picture, &pkt );
+                                       ret = avcodec_decode_video2( codec_context, self->av_frame, &got_picture, &self->pkt );
 #else
-                                       ret = avcodec_decode_video( codec_context, self->av_frame, &got_picture, pkt.data, pkt.size );
+                                       ret = avcodec_decode_video( codec_context, self->av_frame, &got_picture, self->pkt.data, self->pkt.size );
 #endif
                                        // Note: decode may fail at the beginning of MPEGfile (B-frames referencing before first I-frame), so allow a few errors.
                                        if ( ret < 0 )
@@ -1627,16 +1660,26 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
 
                                if ( got_picture )
                                {
-                                       if ( use_pts )
-                                               // Get position of reordered frame
-                                               int_position = self->av_frame->reordered_opaque;
+                                       // Get position of reordered frame
+                                       int_position = self->av_frame->reordered_opaque;
+#if (LIBAVCODEC_VERSION_INT >= ((52<<16)+(106<<8)+0))
+                                       pts = best_pts( self, self->av_frame->pkt_pts, self->av_frame->pkt_dts );
+                                       if ( pts != AV_NOPTS_VALUE )
+                                       {
+                                               if ( self->first_pts != AV_NOPTS_VALUE )
+                                                       pts -= self->first_pts;
+                                               else if ( context->start_time != AV_NOPTS_VALUE )
+                                                       pts -= context->start_time;
+                                               int_position = ( int64_t )( ( av_q2d( stream->time_base ) * pts + delay ) * source_fps + 0.5 );
+                                       }
+#endif
 
                                        if ( int_position < req_position )
                                                got_picture = 0;
                                        else if ( int_position >= req_position )
                                                codec_context->skip_loop_filter = AVDISCARD_NONE;
                                }
-                               mlt_log_debug( MLT_PRODUCER_SERVICE(producer), " got_pic %d key %d\n", got_picture, pkt.flags & PKT_FLAG_KEY );
+                               mlt_log_debug( MLT_PRODUCER_SERVICE(producer), " got_pic %d key %d\n", got_picture, self->pkt.flags & PKT_FLAG_KEY );
                        }
 
                        // Now handle the picture if we have one
@@ -1669,8 +1712,8 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                                        VdpStatus status = vdp_surface_get_bits( render->surface, dest_format, planes, pitches );
                                                        if ( status == VDP_STATUS_OK )
                                                        {
-                                                               convert_image( self->av_frame, *buffer, PIX_FMT_YUV420P,
-                                                                       format, *width, *height, self->colorspace, &alpha );
+                                                               convert_image( self, self->av_frame, *buffer, PIX_FMT_YUV420P,
+                                                                       format, *width, *height, &alpha );
                                                        }
                                                        else
                                                        {
@@ -1686,8 +1729,8 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                        }
                                        else
 #endif
-                                       convert_image( self->av_frame, *buffer, codec_context->pix_fmt,
-                                               format, *width, *height, self->colorspace, &alpha );
+                                       convert_image( self, self->av_frame, *buffer, codec_context->pix_fmt,
+                                               format, *width, *height, &alpha );
                                        self->top_field_first |= self->av_frame->top_field_first;
                                        self->current_position = int_position;
                                }
@@ -1696,8 +1739,11 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                        got_picture = 0;
                                }
                        }
-                       if ( self->seekable || pkt.stream_index != self->audio_index )
-                               av_free_packet( &pkt );
+
+                       // Free packet data if not video and not live audio packet
+                       if ( self->pkt.stream_index != self->video_index &&
+                                !( !self->seekable && self->pkt.stream_index == self->audio_index ) )
+                               av_free_packet( &self->pkt );
                }
        }
 
@@ -1732,13 +1778,13 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                picture.linesize[0] = codec_context->width;
                                picture.linesize[1] = codec_context->width / 2;
                                picture.linesize[2] = codec_context->width / 2;
-                               convert_image( (AVFrame*) &picture, *buffer,
-                                       PIX_FMT_YUV420P, format, *width, *height, self->colorspace, &alpha );
+                               convert_image( self, (AVFrame*) &picture, *buffer,
+                                       PIX_FMT_YUV420P, format, *width, *height, &alpha );
                        }
                        else
 #endif
-                       convert_image( self->av_frame, *buffer, codec_context->pix_fmt,
-                               format, *width, *height, self->colorspace, &alpha );
+                       convert_image( self, self->av_frame, *buffer, codec_context->pix_fmt,
+                               format, *width, *height, &alpha );
                        got_picture = 1;
                }
        }
@@ -1768,6 +1814,11 @@ exit_get_image:
        mlt_properties_set_int( properties, "meta.media.progressive", mlt_properties_get_int( frame_properties, "progressive" ) );
        mlt_service_unlock( MLT_PRODUCER_SERVICE( producer ) );
 
+       // If we already have RGB, then the full range processing either happened already
+       // or does not apply (RGB source).
+       if ( *format == mlt_image_rgb24 || *format == mlt_image_rgb24a || *format == mlt_image_opengl )
+               mlt_properties_set( frame_properties, "force_full_luma", NULL );
+
        return !got_picture;
 }
 
@@ -1870,8 +1921,11 @@ static int video_codec_init( producer_avformat self, int index, mlt_properties p
 #endif
 
                // Reset some image properties
-               mlt_properties_set_int( properties, "width", self->video_codec->width );
-               mlt_properties_set_int( properties, "height", self->video_codec->height );
+               if ( self->video_codec )
+               {
+                       mlt_properties_set_int( properties, "width", self->video_codec->width );
+                       mlt_properties_set_int( properties, "height", self->video_codec->height );
+               }
                // For DV, we'll just use the saved aspect ratio
                if ( codec_context->codec_id != CODEC_ID_DVVIDEO )
                        get_aspect_ratio( properties, stream, self->video_codec, NULL );
@@ -1880,14 +1934,6 @@ static int video_codec_init( producer_avformat self, int index, mlt_properties p
                double source_fps = (double) self->video_codec->time_base.den /
                                                                   ( self->video_codec->time_base.num == 0 ? 1 : self->video_codec->time_base.num );
                
-               if ( mlt_properties_get( properties, "force_fps" ) )
-               {
-                       source_fps = mlt_properties_get_double( properties, "force_fps" );
-                       stream->time_base = av_d2q( source_fps, 1024 );
-                       mlt_properties_set_int( properties, "meta.media.frame_rate_num", stream->time_base.num );
-                       mlt_properties_set_int( properties, "meta.media.frame_rate_den", stream->time_base.den );
-               }
-               else
                {
                        // If the muxer reports a frame rate different than the codec
 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(42<<8)+0)
@@ -1927,6 +1973,15 @@ static int video_codec_init( producer_avformat self, int index, mlt_properties p
                                mlt_properties_set_int( properties, "meta.media.frame_rate_den", frame_rate.den );
                        }
                }
+               if ( mlt_properties_get( properties, "force_fps" ) )
+               {
+                       double source_fps = mlt_properties_get_double( properties, "force_fps" );
+                       AVRational fps = av_d2q( source_fps, 1024 );
+                       stream->time_base.num *= mlt_properties_get_int( properties, "meta.media.frame_rate_num" ) * fps.den;
+                       stream->time_base.den *= mlt_properties_get_int( properties, "meta.media.frame_rate_den" ) * fps.num;
+                       mlt_properties_set_int( properties, "meta.media.frame_rate_num", fps.num );
+                       mlt_properties_set_int( properties, "meta.media.frame_rate_den", fps.den );
+               }
 
                // Set the YUV colorspace from override or detect
                self->colorspace = mlt_properties_get_int( properties, "force_colorspace" );
@@ -1954,6 +2009,15 @@ static int video_codec_init( producer_avformat self, int index, mlt_properties p
 #endif
                // Let apps get chosen colorspace
                mlt_properties_set_int( properties, "meta.media.colorspace", self->colorspace );
+
+               self->full_luma = -1;
+#if LIBAVCODEC_VERSION_INT >= ((52<<16)+(72<<8)+2)
+               mlt_log_debug( MLT_PRODUCER_SERVICE(self->parent), "color_range %d\n", codec_context->color_range );
+               if ( codec_context->color_range == AVCOL_RANGE_JPEG )
+                       self->full_luma = 1;
+#endif
+               if ( mlt_properties_get( properties, "set.force_full_luma" ) )
+                       self->full_luma = mlt_properties_get_int( properties, "set.force_full_luma" );
        }
        return self->video_codec && self->video_index > -1;
 }
@@ -2092,6 +2156,19 @@ static int sample_bytes( AVCodecContext *context )
 #endif
 }
 
+static void planar_to_interleaved( uint8_t *dest, uint8_t *src, int samples, int channels, int bytes_per_sample )
+{
+       int s, c;
+       for ( s = 0; s < samples; s++ )
+       {
+               for ( c = 0; c < channels; c++ )
+               {
+                       memcpy( dest, src + ( c * samples + s ) * bytes_per_sample, bytes_per_sample );
+                       dest += bytes_per_sample;
+               }
+       }
+}
+
 static int decode_audio( producer_avformat self, int *ignore, AVPacket pkt, int channels, int samples, double timecode, double fps )
 {
        // Fetch the audio_format
@@ -2159,8 +2236,21 @@ static int decode_audio( producer_avformat self, int *ignore, AVPacket pkt, int
                        }
                        else
                        {
-                               // Straight copy to audio buffer
-                               memcpy( &audio_buffer[ audio_used * codec_context->channels * sizeof_sample ], decode_buffer, data_size );
+                               uint8_t *source = decode_buffer;
+                               uint8_t *dest = &audio_buffer[ audio_used * codec_context->channels * sizeof_sample ];
+                               switch ( codec_context->sample_fmt )
+                               {
+#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(17<<8)+0)
+                               case AV_SAMPLE_FMT_S16P:
+                               case AV_SAMPLE_FMT_S32P:
+                               case AV_SAMPLE_FMT_FLTP:
+                                       planar_to_interleaved( dest, source, convert_samples, codec_context->channels, sizeof_sample );
+                                       break;
+#endif
+                               default:
+                                       // Straight copy to audio buffer
+                                       memcpy( dest, decode_buffer, data_size );
+                               }
                                audio_used += convert_samples;
                        }
 
@@ -2218,7 +2308,7 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
        pthread_mutex_lock( &self->audio_mutex );
        
        // Obtain the frame number of this frame
-       mlt_position position = mlt_properties_get_position( MLT_FRAME_PROPERTIES( frame ), "avformat_position" );
+       mlt_position position = mlt_frame_original_position( frame );
 
        // Calculate the real time code
        double real_timecode = producer_time_of_frame( self->parent, position );
@@ -2248,7 +2338,7 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
        if ( self->audio_index == INT_MAX )
        {
                index = 0;
-               index_max = context->nb_streams;
+               index_max = FFMIN( MAX_AUDIO_STREAMS, context->nb_streams );
                *channels = self->total_channels;
                *samples = *samples * FFMAX( self->max_frequency, *frequency ) / *frequency;
                *frequency = FFMAX( self->max_frequency, *frequency );
@@ -2262,6 +2352,7 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
 
                if ( codec_context && !self->audio_buffer[ index ] )
                {
+#if LIBAVCODEC_VERSION_INT < ((54<<16)+(26<<8)+0)
                        // Check for resample and create if necessary
                        if ( codec_context->channels <= 2 )
                        {
@@ -2284,6 +2375,7 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
 #endif
                        }
                        else
+#endif
                        {
                                codec_context->request_channels = self->audio_index == INT_MAX ? codec_context->channels : *channels;
                                sizeof_sample = sample_bytes( codec_context );
@@ -2299,7 +2391,7 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
        }
 
        // Get the audio if required
-       if ( !paused )
+       if ( !paused && *frequency > 0 )
        {
                int ret = 0;
                int got_audio = 0;
@@ -2327,7 +2419,7 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
                        {
                                // Check if there is enough audio for all streams
                                got_audio = 1;
-                               for ( index = 0; got_audio && index < context->nb_streams; index++ )
+                               for ( index = 0; got_audio && index < index_max; index++ )
                                        if ( ( self->audio_codec[ index ] && self->audio_used[ index ] < *samples ) || ignore[ index ] )
                                                got_audio = 0;
                                if ( got_audio )
@@ -2379,20 +2471,16 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
                        index = self->audio_index;
                        *channels = self->audio_codec[ index ]->channels;
                        *frequency = self->audio_codec[ index ]->sample_rate;
-                       *format = self->audio_codec[ index ]->sample_fmt == AV_SAMPLE_FMT_S32 ? mlt_audio_s32le
-                               : self->audio_codec[ index ]->sample_fmt == AV_SAMPLE_FMT_FLT ? mlt_audio_f32le
-                               : mlt_audio_s16;
+                       *format = pick_audio_format( self->audio_codec[ index ]->sample_fmt );
                        sizeof_sample = sample_bytes( self->audio_codec[ index ] );
                }
                else if ( self->audio_index == INT_MAX )
                {
-                       // This only works if all audio tracks have the same sample format.
                        for ( index = 0; index < index_max; index++ )
                                if ( self->audio_codec[ index ] && !self->audio_resample[ index ] )
                                {
-                                       *format = self->audio_codec[ index ]->sample_fmt == AV_SAMPLE_FMT_S32 ? mlt_audio_s32le
-                                               : self->audio_codec[ index ]->sample_fmt == AV_SAMPLE_FMT_FLT ? mlt_audio_f32le
-                                               : mlt_audio_s16;
+                                       // XXX: This only works if all audio tracks have the same sample format.
+                                       *format = pick_audio_format( self->audio_codec[ index ]->sample_fmt );
                                        sizeof_sample = sample_bytes( self->audio_codec[ index ] );
                                        break;
                                }
@@ -2647,7 +2735,7 @@ static int producer_get_frame( mlt_producer producer, mlt_frame_ptr frame, int i
 
        // Set the position of this producer
        mlt_position position = self->seekable ? mlt_producer_frame( producer ) : self->nonseek_position++;
-       mlt_properties_set_position( MLT_FRAME_PROPERTIES( *frame ), "avformat_position", position );
+       mlt_properties_set_position( MLT_FRAME_PROPERTIES( *frame ), "original_position", position );
 
        // Calculate the next timecode
        mlt_producer_prepare_next( producer );
@@ -2660,6 +2748,7 @@ static void producer_avformat_close( producer_avformat self )
        mlt_log_debug( NULL, "producer_avformat_close\n" );
 
        // Cleanup av contexts
+       av_free_packet( &self->pkt );
        av_free( self->av_frame );
        pthread_mutex_lock( &self->open_mutex );
        int i;