]> git.sesse.net Git - mlt/blobdiff - src/modules/avformat/producer_avformat.c
fix memleak if file fails to open
[mlt] / src / modules / avformat / producer_avformat.c
index 103fcae5992271398de222b851d4ed7ad9456c26..263972b8744895c02b58baaaf4d32429c5914a1e 100644 (file)
@@ -96,12 +96,11 @@ struct producer_avformat_s
        mlt_position video_expected;
        int audio_index;
        int video_index;
-       int first_pts;
+       int64_t first_pts;
        int64_t last_position;
        int seekable;
        int64_t current_position;
        mlt_position nonseek_position;
-       int got_picture;
        int top_field_first;
        uint8_t *audio_buffer[ MAX_AUDIO_STREAMS ];
        size_t audio_buffer_size[ MAX_AUDIO_STREAMS ];
@@ -200,6 +199,7 @@ mlt_producer producer_avformat_init( mlt_profile profile, const char *service, c
                                        // Clean up
                                        mlt_producer_close( producer );
                                        producer = NULL;
+                                       producer_avformat_close( self );
                                }
                                else if ( self->seekable )
                                {
@@ -328,6 +328,10 @@ static mlt_properties find_default_streams( producer_avformat self )
                                snprintf( key, sizeof(key), "meta.media.%d.stream.sample_aspect_ratio", i );
                                mlt_properties_set_double( meta_media, key, av_q2d( context->streams[ i ]->sample_aspect_ratio ) );
 #endif
+                               snprintf( key, sizeof(key), "meta.media.%d.codec.width", i );
+                               mlt_properties_set_int( meta_media, key, codec_context->width );
+                               snprintf( key, sizeof(key), "meta.media.%d.codec.height", i );
+                               mlt_properties_set_int( meta_media, key, codec_context->height );
                                snprintf( key, sizeof(key), "meta.media.%d.codec.frame_rate", i );
                                mlt_properties_set_double( meta_media, key, (double) codec_context->time_base.den /
                                                                                   ( codec_context->time_base.num == 0 ? 1 : codec_context->time_base.num ) );
@@ -361,6 +365,8 @@ static mlt_properties find_default_streams( producer_avformat self )
 #endif
                                break;
                        case CODEC_TYPE_AUDIO:
+                               if ( !codec_context->channels )
+                                       break;
                                // Use first audio stream
                                if ( self->audio_index < 0 )
                                        self->audio_index = i;
@@ -887,7 +893,7 @@ static int producer_open( producer_avformat self, mlt_profile profile, const cha
                        error = get_basic_info( self, profile, filename );
 
                        // Initialize position info
-                       self->first_pts = -1;
+                       self->first_pts = AV_NOPTS_VALUE;
                        self->last_position = POSITION_INITIAL;
 
                        if ( !self->audio_format )
@@ -1011,7 +1017,7 @@ static void reopen_video( producer_avformat self, mlt_producer producer )
 }
 
 static int seek_video( producer_avformat self, mlt_position position,
-       int64_t req_position, int must_decode, int use_new_seek, int *ignore )
+       int64_t req_position, int preseek, int use_pts )
 {
        mlt_producer producer = self->parent;
        int paused = 0;
@@ -1033,75 +1039,61 @@ static int seek_video( producer_avformat self, mlt_position position,
                double source_fps = mlt_properties_get_double( properties, "meta.media.frame_rate_num" ) /
                        mlt_properties_get_double( properties, "meta.media.frame_rate_den" );
 
-               if ( self->av_frame && position + 1 == self->video_expected )
+               // find initial PTS
+               if ( self->last_position == POSITION_INITIAL )
                {
-                       // We're paused - use last image
-                       paused = 1;
-               }
-               else if ( !self->seekable && position > self->video_expected && ( position - self->video_expected ) < 250 )
-               {
-                       // Fast forward - seeking is inefficient for small distances - just ignore following frames
-                       *ignore = ( int )( ( position - self->video_expected ) / mlt_producer_get_fps( producer ) * source_fps );
-                       codec_context->skip_loop_filter = AVDISCARD_NONREF;
-               }
-               else if ( self->seekable && ( position < self->video_expected || position - self->video_expected >= 12 || self->last_position < 0 ) )
-               {
-                       if ( use_new_seek && self->last_position == POSITION_INITIAL )
-                       {
-                               // find first key frame
-                               int ret = 0;
-                               int toscan = 100;
-                               AVPacket pkt;
+                       int ret = 0;
+                       int toscan = 100;
+                       AVPacket pkt;
 
-                               while ( ret >= 0 && toscan-- > 0 )
+                       while ( ret >= 0 && toscan-- > 0 )
+                       {
+                               ret = av_read_frame( context, &pkt );
+                               if ( ret >= 0 && pkt.stream_index == self->video_index )
                                {
-                                       ret = av_read_frame( context, &pkt );
-                                       if ( ret >= 0 && ( pkt.flags & PKT_FLAG_KEY ) && pkt.stream_index == self->video_index )
-                                       {
-                                               mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "first_pts %"PRId64" dts %"PRId64" pts_dts_delta %d\n", pkt.pts, pkt.dts, (int)(pkt.pts - pkt.dts) );
-                                               self->first_pts = pkt.pts;
+                                       mlt_log_debug( MLT_PRODUCER_SERVICE(producer),
+                                               "first_pts %"PRId64" dts %"PRId64" pts_dts_delta %d\n",
+                                               pkt.pts, pkt.dts, (int)(pkt.pts - pkt.dts) );
+                                       self->first_pts = use_pts? pkt.pts : pkt.dts;
+                                       if ( self->first_pts != AV_NOPTS_VALUE )
                                                toscan = 0;
-                                       }
-                                       av_free_packet( &pkt );
                                }
-                               // Rewind
-                               av_seek_frame( context, -1, 0, AVSEEK_FLAG_BACKWARD );
+                               av_free_packet( &pkt );
                        }
+                       av_seek_frame( context, -1, 0, AVSEEK_FLAG_BACKWARD );
+               }
 
+               if ( self->av_frame && position + 1 == self->video_expected )
+               {
+                       // We're paused - use last image
+                       paused = 1;
+               }
+               else if ( self->seekable && ( position < self->video_expected || position - self->video_expected >= 12 || self->last_position < 0 ) )
+               {
                        // Calculate the timestamp for the requested frame
-                       int64_t timestamp;
-                       if ( use_new_seek )
-                       {
-                               timestamp = ( req_position - 0.1 / source_fps ) /
-                                       ( av_q2d( stream->time_base ) * source_fps );
-                               mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "pos %"PRId64" pts %"PRId64"\n", req_position, timestamp );
-                               if ( self->first_pts > 0 )
-                                       timestamp += self->first_pts;
-                               else if ( context->start_time != AV_NOPTS_VALUE )
-                                       timestamp += context->start_time;
-                       }
-                       else
-                       {
-                               timestamp = ( int64_t )( ( double )req_position / source_fps * AV_TIME_BASE + 0.5 );
-                               if ( context->start_time != AV_NOPTS_VALUE )
-                                       timestamp += context->start_time;
-                       }
-                       if ( must_decode )
-                               timestamp -= AV_TIME_BASE;
+                       int64_t timestamp = req_position / ( av_q2d( stream->time_base ) * source_fps );
+                       if ( req_position <= 0 )
+                               timestamp = 0;
+                       else if ( self->first_pts != AV_NOPTS_VALUE )
+                               timestamp += self->first_pts;
+                       else if ( context->start_time != AV_NOPTS_VALUE )
+                               timestamp += context->start_time;
+                       if ( preseek && av_q2d( stream->time_base ) != 0 )
+                               timestamp -= 2 / av_q2d( stream->time_base );
                        if ( timestamp < 0 )
                                timestamp = 0;
                        mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "seeking timestamp %"PRId64" position %d expected %d last_pos %"PRId64"\n",
                                timestamp, position, self->video_expected, self->last_position );
 
                        // Seek to the timestamp
-                       if ( use_new_seek )
+                       // NOTE: reopen_video is disabled at this time because it is causing trouble with A/V sync.
+                       if ( 1 || use_pts || req_position > 0 || self->last_position <= 0 )
                        {
                                codec_context->skip_loop_filter = AVDISCARD_NONREF;
                                av_seek_frame( context, self->video_index, timestamp, AVSEEK_FLAG_BACKWARD );
-                       }
-                       else if ( req_position > 0 || self->last_position <= 0 )
-                       {
-                               av_seek_frame( context, -1, timestamp, AVSEEK_FLAG_BACKWARD );
+
+                               // flush any pictures still in decode buffer
+                               avcodec_flush_buffers( codec_context );
                        }
                        else
                        {
@@ -1115,12 +1107,6 @@ static int seek_video( producer_avformat self, mlt_position position,
                        self->current_position = POSITION_INVALID;
                        self->last_position = POSITION_INVALID;
                        av_freep( &self->av_frame );
-
-                       if ( use_new_seek )
-                       {
-                               // flush any pictures still in decode buffer
-                               avcodec_flush_buffers( codec_context );
-                       }
                }
        }
        return paused;
@@ -1243,11 +1229,11 @@ static mlt_image_format pick_format( enum PixelFormat pix_fmt )
 }
 
 static void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
-       mlt_image_format *format, int width, int height, int colorspace )
+       mlt_image_format *format, int width, int height, int colorspace, uint8_t **alpha )
 {
 #ifdef SWSCALE
        int full_range = -1;
-       int flags = SWS_BILINEAR | SWS_ACCURATE_RND;
+       int flags = SWS_BICUBIC | SWS_ACCURATE_RND;
 
 #ifdef USE_MMX
        flags |= SWS_CPU_CAPS_MMX;
@@ -1256,6 +1242,25 @@ static void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
        flags |= SWS_CPU_CAPS_MMX2;
 #endif
 
+       // extract alpha from planar formats
+       if ( ( pix_fmt == PIX_FMT_YUVA420P
+#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(35<<8)+101)
+                       || pix_fmt == PIX_FMT_YUVA444P
+#endif
+                       ) &&
+               *format != mlt_image_rgb24a && *format != mlt_image_opengl &&
+               frame->data[3] && frame->linesize[3] )
+       {
+               int i;
+               uint8_t *src, *dst;
+
+               dst = *alpha = mlt_pool_alloc( width * height );
+               src = frame->data[3];
+
+               for ( i = 0; i < height; dst += width, src += frame->linesize[3], i++ )
+                       memcpy( dst, src, FFMIN( width, frame->linesize[3] ) );
+       }
+
        if ( *format == mlt_image_yuv420p )
        {
                struct SwsContext *context = sws_getContext( width, height, pix_fmt,
@@ -1388,15 +1393,31 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
        // Get codec context
        AVCodecContext *codec_context = stream->codec;
 
+       uint8_t *alpha = NULL;
+       int got_picture = 0;
+       int image_size = 0;
+
        // Get the image cache
        if ( ! self->image_cache && ! mlt_properties_get_int( properties, "noimagecache" ) )
+       {
                self->image_cache = mlt_cache_init();
+       }
        if ( self->image_cache )
        {
-               mlt_cache_item item = mlt_cache_get( self->image_cache, (void*) position );
-               uint8_t *original = mlt_cache_item_data( item, (int*) format );
+               mlt_frame original = mlt_cache_get_frame( self->image_cache, mlt_frame_get_position( frame ) );
                if ( original )
                {
+                       mlt_properties orig_props = MLT_FRAME_PROPERTIES( original );
+                       int size = 0;
+
+                       *buffer = mlt_properties_get_data( orig_props, "alpha", &size );
+                       if (*buffer)
+                               mlt_frame_set_alpha( frame, *buffer, size, NULL );
+                       *buffer = mlt_properties_get_data( orig_props, "image", &size );
+                       mlt_frame_set_image( frame, *buffer, size, NULL );
+                       mlt_properties_set_data( frame_properties, "avformat.image_cache", original, 0, (mlt_destructor) mlt_frame_close, NULL );
+                       *format = mlt_properties_get_int( orig_props, "format" );
+
                        // Set the resolution
                        *width = codec_context->width;
                        *height = codec_context->height;
@@ -1405,35 +1426,15 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                        if ( *height == 1088 && mlt_profile_dar( mlt_service_profile( MLT_PRODUCER_SERVICE( producer ) ) ) == 16.0/9.0 )
                                *height = 1080;
 
-                       // Cache hit
-                       int size = mlt_image_format_size( *format, *width, *height, NULL );
-                       if ( writable )
-                       {
-                               *buffer = mlt_pool_alloc( size );
-                               mlt_frame_set_image( frame, *buffer, size, mlt_pool_release );
-                               memcpy( *buffer, original, size );
-                               mlt_cache_item_close( item );
-                       }
-                       else
-                       {
-                               *buffer = original;
-                               mlt_properties_set_data( frame_properties, "avformat.image_cache", item, 0, ( mlt_destructor )mlt_cache_item_close, NULL );
-                               mlt_frame_set_image( frame, *buffer, size, NULL );
-                       }
-                       self->got_picture = 1;
-
+                       got_picture = 1;
                        goto exit_get_image;
                }
        }
        // Cache miss
-       int image_size = 0;
 
        // Packet
        AVPacket pkt;
 
-       // Special case ffwd handling
-       int ignore = 0;
-
        // We may want to use the source fps if available
        double source_fps = mlt_properties_get_double( properties, "meta.media.frame_rate_num" ) /
                mlt_properties_get_double( properties, "meta.media.frame_rate_den" );
@@ -1451,20 +1452,25 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                  strcmp( codec_context->codec->name, "rawvideo" ) );
 
        // Turn on usage of new seek API and PTS for seeking
-       int use_new_seek = self->seekable &&
-               codec_context->codec_id == CODEC_ID_H264 && !strcmp( context->iformat->name, "mpegts" );
-       if ( mlt_properties_get( properties, "new_seek" ) )
-               use_new_seek = mlt_properties_get_int( properties, "new_seek" );
+       int use_pts = self->seekable &&
+               codec_context->codec_id == CODEC_ID_H264 && codec_context->has_b_frames;
+       if ( mlt_properties_get( properties, "use_pts" ) )
+               use_pts = mlt_properties_get_int( properties, "use_pts" );
        double delay = mlt_properties_get_double( properties, "video_delay" );
 
        // Seek if necessary
-       int paused = seek_video( self, position, req_position, must_decode, use_new_seek, &ignore );
+       const char *interp = mlt_properties_get( frame_properties, "rescale.interp" );
+       int preseek = must_decode
+#if defined(FFUDIV) && LIBAVFORMAT_VERSION_INT >= ((53<<16)+(24<<8)+2)
+               && ( !use_pts || ( interp && strcmp( interp, "nearest" ) ) )
+#endif
+               && codec_context->has_b_frames;
+       int paused = seek_video( self, position, req_position, preseek, use_pts );
 
        // Seek might have reopened the file
        context = self->video_format;
        stream = context->streams[ self->video_index ];
        codec_context = stream->codec;
-
        if ( *format == mlt_image_none ||
                        codec_context->pix_fmt == PIX_FMT_ARGB ||
                        codec_context->pix_fmt == PIX_FMT_RGBA ||
@@ -1473,10 +1479,8 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                *format = pick_format( codec_context->pix_fmt );
 
        // Duplicate the last image if necessary
-       if ( self->av_frame && self->av_frame->linesize[0] && self->got_picture
-                && ( paused
-                         || self->current_position == req_position
-                         || ( !use_new_seek && self->current_position > req_position ) ) )
+       if ( self->av_frame && self->av_frame->linesize[0]
+                && ( paused || self->current_position >= req_position ) )
        {
                // Duplicate it
                if ( ( image_size = allocate_buffer( frame, codec_context, buffer, format, width, height ) ) )
@@ -1495,22 +1499,20 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                picture.linesize[1] = codec_context->width / 2;
                                picture.linesize[2] = codec_context->width / 2;
                                convert_image( (AVFrame*) &picture, *buffer,
-                                       PIX_FMT_YUV420P, format, *width, *height, self->colorspace );
+                                       PIX_FMT_YUV420P, format, *width, *height, self->colorspace, &alpha );
                        }
                        else
 #endif
                        convert_image( self->av_frame, *buffer, codec_context->pix_fmt,
-                               format, *width, *height, self->colorspace );
+                               format, *width, *height, self->colorspace, &alpha );
+                       got_picture = 1;
                }
-               else
-                       mlt_frame_get_image( frame, buffer, format, width, height, writable );
        }
        else
        {
                int ret = 0;
                int64_t int_position = 0;
                int decode_errors = 0;
-               int got_picture = 0;
 
                av_init_packet( &pkt );
 
@@ -1547,50 +1549,42 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                        if ( ret >= 0 && pkt.stream_index == self->video_index && pkt.size > 0 )
                        {
                                // Determine time code of the packet
-                               if ( use_new_seek )
+                               if ( use_pts && pkt.pts == AV_NOPTS_VALUE )
                                {
-                                       int64_t pts = pkt.pts;
-                                       if ( self->first_pts > 0 )
-                                               pts -= self->first_pts;
-                                       else if ( context->start_time != AV_NOPTS_VALUE )
-                                               pts -= context->start_time;
-                                       int_position = ( int64_t )( ( av_q2d( stream->time_base ) * pts + delay ) * source_fps + 0.1 );
-                                       if ( pkt.pts == AV_NOPTS_VALUE )
+                                       self->invalid_pts_counter++;
+                                       if ( self->invalid_pts_counter > 20 )
                                        {
-                                               self->invalid_pts_counter++;
-                                               if ( self->invalid_pts_counter > 20 )
-                                               {
-                                                       mlt_log_panic( MLT_PRODUCER_SERVICE(producer), "\ainvalid PTS; DISABLING NEW_SEEK!\n" );
-                                                       mlt_properties_set_int( properties, "new_seek", 0 );
-                                                       int_position = req_position;
-                                                       use_new_seek = 0;
-                                               }
+                                               mlt_log_warning( MLT_PRODUCER_SERVICE(producer), "PTS invalid; using DTS instead\n" );
+                                               mlt_properties_set_int( properties, "use_pts", 0 );
+                                               use_pts = 0;
                                        }
-                                       else
-                                       {
-                                               self->invalid_pts_counter = 0;
-                                       }
-                                       mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "pkt.pts %"PRId64" req_pos %"PRId64" cur_pos %"PRId64" pkt_pos %"PRId64"\n",
-                                               pkt.pts, req_position, self->current_position, int_position );
                                }
                                else
                                {
-                                       if ( pkt.dts != AV_NOPTS_VALUE )
-                                       {
-                                               int_position = ( int64_t )( ( av_q2d( stream->time_base ) * pkt.dts + delay ) * source_fps + 0.5 );
-                                               if ( context->start_time != AV_NOPTS_VALUE )
-                                                       int_position -= ( int64_t )( context->start_time * source_fps / AV_TIME_BASE + 0.5 );
-                                               if ( int_position == self->last_position )
-                                                       int_position = self->last_position + 1;
-                                       }
-                                       mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "pkt.dts %"PRId64" req_pos %"PRId64" cur_pos %"PRId64" pkt_pos %"PRId64"\n",
-                                               pkt.dts, req_position, self->current_position, int_position );
-                                       // Make a dumb assumption on streams that contain wild timestamps
-                                       if ( abs( req_position - int_position ) > 999 )
-                                       {
-                                               int_position = req_position;
-                                               mlt_log_debug( MLT_PRODUCER_SERVICE(producer), " WILD TIMESTAMP!" );
-                                       }
+                                       self->invalid_pts_counter = 0;
+                               }
+                               int64_t pts = ( use_pts && pkt.pts != AV_NOPTS_VALUE )? pkt.pts : pkt.dts;
+                               if ( pts != AV_NOPTS_VALUE )
+                               {
+                                       if ( !self->seekable && self->first_pts == AV_NOPTS_VALUE )
+                                               self->first_pts = pts;
+                                       if ( self->first_pts != AV_NOPTS_VALUE )
+                                               pts -= self->first_pts;
+                                       else if ( context->start_time != AV_NOPTS_VALUE )
+                                               pts -= context->start_time;
+                                       int_position = ( int64_t )( ( av_q2d( stream->time_base ) * pts + delay ) * source_fps + 0.5 );
+                                       if ( int_position == self->last_position )
+                                               int_position = self->last_position + 1;
+                               }
+                               mlt_log_debug( MLT_PRODUCER_SERVICE(producer),
+                                       "V pkt.pts %"PRId64" pkt.dts %"PRId64" req_pos %"PRId64" cur_pos %"PRId64" pkt_pos %"PRId64"\n",
+                                       pkt.pts, pkt.dts, req_position, self->current_position, int_position );
+
+                               // Make a dumb assumption on streams that contain wild timestamps
+                               if ( abs( req_position - int_position ) > 999 )
+                               {
+                                       int_position = req_position;
+                                       mlt_log_warning( MLT_PRODUCER_SERVICE(producer), " WILD TIMESTAMP!\n" );
                                }
                                self->last_position = int_position;
 
@@ -1607,7 +1601,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                                self->vdpau->is_decoded = 0;
                                        }
 #endif
-                                       codec_context->reordered_opaque = pkt.pts;
+                                       codec_context->reordered_opaque = int_position;
                                        if ( int_position >= req_position )
                                                codec_context->skip_loop_filter = AVDISCARD_NONE;
 #if (LIBAVCODEC_VERSION_INT >= ((52<<16)+(26<<8)+0))
@@ -1629,32 +1623,14 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
 
                                if ( got_picture )
                                {
-                                       if ( use_new_seek )
-                                       {
-                                               // Determine time code of the packet
-                                               int64_t pts = self->av_frame->reordered_opaque;
-                                               if ( self->first_pts > 0 )
-                                                       pts -= self->first_pts;
-                                               else if ( context->start_time != AV_NOPTS_VALUE )
-                                                       pts -= context->start_time;
-                                               int_position = ( int64_t )( av_q2d( stream->time_base) * pts * source_fps + 0.1 );
-                                               mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "got frame %"PRId64", key %d\n", int_position, self->av_frame->key_frame );
-                                       }
-                                       // Handle ignore
+                                       if ( use_pts )
+                                               // Get position of reordered frame
+                                               int_position = self->av_frame->reordered_opaque;
+
                                        if ( int_position < req_position )
-                                       {
-                                               ignore = 0;
                                                got_picture = 0;
-                                       }
                                        else if ( int_position >= req_position )
-                                       {
-                                               ignore = 0;
                                                codec_context->skip_loop_filter = AVDISCARD_NONE;
-                                       }
-                                       else if ( ignore -- )
-                                       {
-                                               got_picture = 0;
-                                       }
                                }
                                mlt_log_debug( MLT_PRODUCER_SERVICE(producer), " got_pic %d key %d\n", got_picture, pkt.flags & PKT_FLAG_KEY );
                        }
@@ -1690,7 +1666,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                                        if ( status == VDP_STATUS_OK )
                                                        {
                                                                convert_image( self->av_frame, *buffer, PIX_FMT_YUV420P,
-                                                                       format, *width, *height, self->colorspace );
+                                                                       format, *width, *height, self->colorspace, &alpha );
                                                        }
                                                        else
                                                        {
@@ -1707,10 +1683,9 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                        else
 #endif
                                        convert_image( self->av_frame, *buffer, codec_context->pix_fmt,
-                                               format, *width, *height, self->colorspace );
+                                               format, *width, *height, self->colorspace, &alpha );
                                        self->top_field_first |= self->av_frame->top_field_first;
                                        self->current_position = int_position;
-                                       self->got_picture = 1;
                                }
                                else
                                {
@@ -1722,15 +1697,20 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                }
        }
 
-       if ( self->got_picture && image_size > 0 && self->image_cache )
+       // set alpha
+       if ( alpha )
+               mlt_frame_set_alpha( frame, alpha, (*width) * (*height), mlt_pool_release );
+
+       if ( image_size > 0 && self->image_cache )
        {
-               // Copy buffer to image cache   
-               uint8_t *image = mlt_pool_alloc( image_size );
-               memcpy( image, *buffer, image_size );
-               mlt_cache_put( self->image_cache, (void*) position, image, *format, mlt_pool_release );
+               mlt_properties_set_int( frame_properties, "format", *format );
+               mlt_cache_put_frame( self->image_cache, frame );
        }
+
        // Try to duplicate last image if there was a decoding failure
-       else if ( !image_size && self->av_frame && self->av_frame->linesize[0] )
+       // TODO: with multithread decoding a partial frame decoding resulting
+       // in failure also resets av_frame making test below fail.
+       if ( !image_size && self->av_frame && self->av_frame->linesize[0] )
        {
                // Duplicate it
                if ( ( image_size = allocate_buffer( frame, codec_context, buffer, format, width, height ) ) )
@@ -1749,16 +1729,14 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                picture.linesize[1] = codec_context->width / 2;
                                picture.linesize[2] = codec_context->width / 2;
                                convert_image( (AVFrame*) &picture, *buffer,
-                                       PIX_FMT_YUV420P, format, *width, *height, self->colorspace );
+                                       PIX_FMT_YUV420P, format, *width, *height, self->colorspace, &alpha );
                        }
                        else
 #endif
                        convert_image( self->av_frame, *buffer, codec_context->pix_fmt,
-                               format, *width, *height, self->colorspace );
-                       self->got_picture = 1;
+                               format, *width, *height, self->colorspace, &alpha );
+                       got_picture = 1;
                }
-               else
-                       mlt_frame_get_image( frame, buffer, format, width, height, writable );
        }
 
        // Regardless of speed, we expect to get the next frame (cos we ain't too bright)
@@ -1786,7 +1764,7 @@ exit_get_image:
        mlt_properties_set_int( properties, "meta.media.progressive", mlt_properties_get_int( frame_properties, "progressive" ) );
        mlt_service_unlock( MLT_PRODUCER_SERVICE( producer ) );
 
-       return !self->got_picture;
+       return !got_picture;
 }
 
 /** Process properties as AVOptions and apply to AV context obj
@@ -1936,7 +1914,6 @@ static int video_codec_init( producer_avformat self, int index, mlt_properties p
 #endif
                                mlt_properties_set_int( properties, "meta.media.frame_rate_num", frame_rate.num );
                                mlt_properties_set_int( properties, "meta.media.frame_rate_den", frame_rate.den );
-                               source_fps = av_q2d( frame_rate );
                        }
                        else
                        {
@@ -1947,12 +1924,6 @@ static int video_codec_init( producer_avformat self, int index, mlt_properties p
                        }
                }
 
-               // source_fps is deprecated in favor of meta.media.frame_rate_num and .frame_rate_den
-               if ( source_fps > 0 )
-                       mlt_properties_set_double( properties, "source_fps", source_fps );
-               else
-                       mlt_properties_set_double( properties, "source_fps", mlt_producer_get_fps( self->parent ) );
-
                // Set the YUV colorspace from override or detect
                self->colorspace = mlt_properties_get_int( properties, "force_colorspace" );
 #if LIBAVCODEC_VERSION_INT > ((52<<16)+(28<<8)+0)              
@@ -2050,11 +2021,8 @@ static void producer_set_up_video( producer_avformat self, mlt_frame frame )
                // Set the width and height
                mlt_properties_set_int( frame_properties, "width", self->video_codec->width );
                mlt_properties_set_int( frame_properties, "height", self->video_codec->height );
-               // real_width and real_height are deprecated in favor of meta.media.width and .height
                mlt_properties_set_int( properties, "meta.media.width", self->video_codec->width );
                mlt_properties_set_int( properties, "meta.media.height", self->video_codec->height );
-               mlt_properties_set_int( frame_properties, "real_width", self->video_codec->width );
-               mlt_properties_set_int( frame_properties, "real_height", self->video_codec->height );
                mlt_properties_set_double( frame_properties, "aspect_ratio", aspect_ratio );
                mlt_properties_set_int( frame_properties, "colorspace", self->colorspace );
 
@@ -2062,7 +2030,6 @@ static void producer_set_up_video( producer_avformat self, mlt_frame frame )
                if ( self->video_codec->height == 1088 && mlt_profile_dar( mlt_service_profile( MLT_PRODUCER_SERVICE( producer ) ) ) == 16.0/9.0 )
                {
                        mlt_properties_set_int( properties, "meta.media.height", 1080 );
-                       mlt_properties_set_int( frame_properties, "real_height", 1080 );
                }
 
                // Add our image operation
@@ -2076,7 +2043,7 @@ static void producer_set_up_video( producer_avformat self, mlt_frame frame )
        }
 }
 
-static int seek_audio( producer_avformat self, mlt_position position, double timecode, int *ignore )
+static int seek_audio( producer_avformat self, mlt_position position, double timecode )
 {
        int paused = 0;
 
@@ -2088,11 +2055,6 @@ static int seek_audio( producer_avformat self, mlt_position position, double tim
                        // We're paused - silence required
                        paused = 1;
                }
-               else if ( !self->seekable && position > self->audio_expected && ( position - self->audio_expected ) < 250 )
-               {
-                       // Fast forward - seeking is inefficient for small distances - just ignore following frames
-                       *ignore = position - self->audio_expected;
-               }
                else if ( position < self->audio_expected || position - self->audio_expected >= 12 )
                {
                        AVFormatContext *context = self->audio_format;
@@ -2213,11 +2175,18 @@ static int decode_audio( producer_avformat self, int *ignore, AVPacket pkt, int
        // Skip this on non-seekable, audio-only inputs.
        if ( pkt.pts >= 0 && ( self->seekable || self->video_format ) && *ignore == 0 && audio_used > samples / 2 )
        {
-               double current_pts = av_q2d( context->streams[ index ]->time_base ) * pkt.pts;
+               int64_t pts = pkt.pts;
+               if ( self->first_pts != 0 )
+                       pts -= self->first_pts;
+               else if ( context->start_time != AV_NOPTS_VALUE )
+                       pts -= context->start_time;
+               double timebase = av_q2d( context->streams[ index ]->time_base );
+               int64_t int_position = ( int64_t )( timebase * pts * fps + 0.5 );
                int64_t req_position = ( int64_t )( timecode * fps + 0.5 );
-               int64_t int_position = ( int64_t )( current_pts * fps + 0.5 );
-               if ( context->start_time != AV_NOPTS_VALUE )
-                       int_position -= ( int64_t )( fps * context->start_time / AV_TIME_BASE + 0.5 );
+
+               mlt_log_debug( MLT_PRODUCER_SERVICE(self->parent),
+                       "A pkt.pts %"PRId64" pkt.dts %"PRId64" req_pos %"PRId64" cur_pos %"PRId64" pkt_pos %"PRId64"\n",
+                       pkt.pts, pkt.dts, req_position, self->current_position, int_position );
 
                if ( int_position > 0 )
                {
@@ -2226,7 +2195,7 @@ static int decode_audio( producer_avformat self, int *ignore, AVPacket pkt, int
                                *ignore = req_position - int_position;
                        else if ( self->audio_index != INT_MAX && int_position > req_position + 2 )
                                // We are ahead, so seek backwards some more
-                               seek_audio( self, req_position, timecode - 1.0, ignore );
+                               seek_audio( self, req_position, timecode - 1.0 );
                }
        }
 
@@ -2257,7 +2226,7 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
        int ignore[ MAX_AUDIO_STREAMS ] = { 0 };
 
        // Flag for paused (silence)
-       int paused = seek_audio( self, position, real_timecode, &ignore[0] );
+       int paused = seek_audio( self, position, real_timecode );
 
        // Initialize ignore for all streams from the seek return value
        int i = MAX_AUDIO_STREAMS;