]> git.sesse.net Git - mlt/blobdiff - src/modules/avformat/producer_avformat.c
Fix sync of multiple audio streams with audio_index=all.
[mlt] / src / modules / avformat / producer_avformat.c
index 5206f097af8efa444629c77e4b1b0444dca8e8cb..536c64ba5b3cda60b3077ec350d072828c52e55b 100644 (file)
 #include <framework/mlt_cache.h>
 
 // ffmpeg Header files
-#include <avformat.h>
-#include <opt.h>
+#include <libavformat/avformat.h>
 #ifdef SWSCALE
-#  include <swscale.h>
+#  include <libswscale/swscale.h>
 #endif
-#if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
-#  include "audioconvert.h"
+#if LIBAVCODEC_VERSION_MAJOR > 52
+#include <libavutil/samplefmt.h>
+#elif (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
+const char *avcodec_get_sample_fmt_name(int sample_fmt);
 #endif
 #ifdef VDPAU
-#include <vdpau.h>
+#  include <libavcodec/vdpau.h>
+#endif
+#if (LIBAVUTIL_VERSION_INT > ((50<<16)+(7<<8)+0))
+#  include <libavutil/pixdesc.h>
 #endif
 
 // System header files
 #define PIX_FMT_YUYV422 PIX_FMT_YUV422
 #endif
 
+#if LIBAVCODEC_VERSION_MAJOR > 52
+#include <libavutil/opt.h>
+#define CODEC_TYPE_VIDEO      AVMEDIA_TYPE_VIDEO
+#define CODEC_TYPE_AUDIO      AVMEDIA_TYPE_AUDIO
+#define PKT_FLAG_KEY AV_PKT_FLAG_KEY
+#else
+#include <libavcodec/opt.h>
+#endif
+
 #define POSITION_INITIAL (-2)
 #define POSITION_INVALID (-1)
 
@@ -83,9 +96,9 @@ struct producer_avformat_s
        int current_position;
        int got_picture;
        int top_field_first;
-       int16_t *audio_buffer[ MAX_AUDIO_STREAMS ];
+       uint8_t *audio_buffer[ MAX_AUDIO_STREAMS ];
        size_t audio_buffer_size[ MAX_AUDIO_STREAMS ];
-       int16_t *decode_buffer[ MAX_AUDIO_STREAMS ];
+       uint8_t *decode_buffer[ MAX_AUDIO_STREAMS ];
        int audio_used[ MAX_AUDIO_STREAMS ];
        int audio_streams;
        int audio_max_stream;
@@ -95,6 +108,12 @@ struct producer_avformat_s
        unsigned int invalid_pts_counter;
        double resample_factor;
        mlt_cache image_cache;
+       int colorspace;
+       pthread_mutex_t video_mutex;
+       pthread_mutex_t audio_mutex;
+       mlt_deque apackets;
+       mlt_deque vpackets;
+       pthread_mutex_t packets_mutex;
 #ifdef VDPAU
        struct
        {
@@ -113,10 +132,16 @@ struct producer_avformat_s
 typedef struct producer_avformat_s *producer_avformat;
 
 // Forward references.
-static int producer_open( producer_avformat this, mlt_profile profile, char *file );
-static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index );
+static int list_components( char* file );
+static int producer_open( producer_avformat self, mlt_profile profile, const char *URL );
+static int producer_get_frame( mlt_producer producer, mlt_frame_ptr frame, int index );
 static void producer_avformat_close( producer_avformat );
 static void producer_close( mlt_producer parent );
+static void producer_set_up_video( producer_avformat self, mlt_frame frame );
+static void producer_set_up_audio( producer_avformat self, mlt_frame frame );
+static void apply_properties( void *obj, mlt_properties properties, int flags );
+static int video_codec_init( producer_avformat self, int index, mlt_properties properties );
+static void get_audio_streams_info( producer_avformat self );
 
 #ifdef VDPAU
 #include "vdpau.c"
@@ -127,50 +152,22 @@ static void producer_close( mlt_producer parent );
 
 mlt_producer producer_avformat_init( mlt_profile profile, const char *service, char *file )
 {
-       int skip = 0;
+       if ( list_components( file ) )
+               return NULL;
 
-       // Report information about available demuxers and codecs as YAML Tiny
-       if ( file && strstr( file, "f-list" ) )
-       {
-               fprintf( stderr, "---\nformats:\n" );
-               AVInputFormat *format = NULL;
-               while ( ( format = av_iformat_next( format ) ) )
-                       fprintf( stderr, "  - %s\n", format->name );
-               fprintf( stderr, "...\n" );
-               skip = 1;
-       }
-       if ( file && strstr( file, "acodec-list" ) )
-       {
-               fprintf( stderr, "---\naudio_codecs:\n" );
-               AVCodec *codec = NULL;
-               while ( ( codec = av_codec_next( codec ) ) )
-                       if ( codec->decode && codec->type == CODEC_TYPE_AUDIO )
-                               fprintf( stderr, "  - %s\n", codec->name );
-               fprintf( stderr, "...\n" );
-               skip = 1;
-       }
-       if ( file && strstr( file, "vcodec-list" ) )
-       {
-               fprintf( stderr, "---\nvideo_codecs:\n" );
-               AVCodec *codec = NULL;
-               while ( ( codec = av_codec_next( codec ) ) )
-                       if ( codec->decode && codec->type == CODEC_TYPE_VIDEO )
-                               fprintf( stderr, "  - %s\n", codec->name );
-               fprintf( stderr, "...\n" );
-               skip = 1;
-       }
+       mlt_producer producer = NULL;
 
        // Check that we have a non-NULL argument
-       if ( !skip && file )
+       if ( file )
        {
                // Construct the producer
-               mlt_producer producer = calloc( 1, sizeof( struct mlt_producer_s ) );
-               producer_avformat this = calloc( 1, sizeof( struct producer_avformat_s ) );
+               producer_avformat self = calloc( 1, sizeof( struct producer_avformat_s ) );
+               producer = calloc( 1, sizeof( struct mlt_producer_s ) );
 
                // Initialise it
-               if ( mlt_producer_init( producer, this ) == 0 )
+               if ( mlt_producer_init( producer, self ) == 0 )
                {
-                       this->parent = producer;
+                       self->parent = producer;
 
                        // Get the properties
                        mlt_properties properties = MLT_PRODUCER_PROPERTIES( producer );
@@ -183,61 +180,96 @@ mlt_producer producer_avformat_init( mlt_profile profile, const char *service, c
 
                        // Register our get_frame implementation
                        producer->get_frame = producer_get_frame;
-                       
+
                        if ( strcmp( service, "avformat-novalidate" ) )
                        {
                                // Open the file
-                               if ( producer_open( this, profile, file ) != 0 )
+                               if ( producer_open( self, profile, file ) != 0 )
                                {
                                        // Clean up
                                        mlt_producer_close( producer );
                                        producer = NULL;
                                }
-                               else
+                               else if ( self->seekable )
                                {
                                        // Close the file to release resources for large playlists - reopen later as needed
                                        avformat_lock();
-                                       if ( this->dummy_context )
-                                               av_close_input_file( this->dummy_context );
-                                       this->dummy_context = NULL;
-                                       if ( this->audio_format )
-                                               av_close_input_file( this->audio_format );
-                                       this->audio_format = NULL;
-                                       if ( this->video_format )
-                                               av_close_input_file( this->video_format );
-                                       this->video_format = NULL;
+                                       if ( self->audio_format )
+                                               av_close_input_file( self->audio_format );
+                                       self->audio_format = NULL;
+                                       if ( self->video_format )
+                                               av_close_input_file( self->video_format );
+                                       self->video_format = NULL;
                                        avformat_unlock();
-       
-                                       // Default the user-selectable indices from the auto-detected indices
-                                       mlt_properties_set_int( properties, "audio_index",  this->audio_index );
-                                       mlt_properties_set_int( properties, "video_index",  this->video_index );
-                                       
-#ifdef VDPAU
-                                       mlt_service_cache_set_size( MLT_PRODUCER_SERVICE(producer), "producer_avformat", 5 );
-#endif
-                                       mlt_service_cache_put( MLT_PRODUCER_SERVICE(producer), "producer_avformat", this, 0, (mlt_destructor) producer_avformat_close );
                                }
                        }
-                       else
+                       if ( producer )
                        {
+                               // Default the user-selectable indices from the auto-detected indices
+                               mlt_properties_set_int( properties, "audio_index",  self->audio_index );
+                               mlt_properties_set_int( properties, "video_index",  self->video_index );
 #ifdef VDPAU
                                mlt_service_cache_set_size( MLT_PRODUCER_SERVICE(producer), "producer_avformat", 5 );
 #endif
-                               mlt_service_cache_put( MLT_PRODUCER_SERVICE(producer), "producer_avformat", this, 0, (mlt_destructor) producer_avformat_close );
+                               mlt_service_cache_put( MLT_PRODUCER_SERVICE(producer), "producer_avformat", self, 0, (mlt_destructor) producer_avformat_close );
                        }
-                       return producer;
                }
        }
-       return NULL;
+       return producer;
+}
+
+int list_components( char* file )
+{
+       int skip = 0;
+
+       // Report information about available demuxers and codecs as YAML Tiny
+       if ( file && strstr( file, "f-list" ) )
+       {
+               fprintf( stderr, "---\nformats:\n" );
+               AVInputFormat *format = NULL;
+               while ( ( format = av_iformat_next( format ) ) )
+                       fprintf( stderr, "  - %s\n", format->name );
+               fprintf( stderr, "...\n" );
+               skip = 1;
+       }
+       if ( file && strstr( file, "acodec-list" ) )
+       {
+               fprintf( stderr, "---\naudio_codecs:\n" );
+               AVCodec *codec = NULL;
+               while ( ( codec = av_codec_next( codec ) ) )
+                       if ( codec->decode && codec->type == CODEC_TYPE_AUDIO )
+                               fprintf( stderr, "  - %s\n", codec->name );
+               fprintf( stderr, "...\n" );
+               skip = 1;
+       }
+       if ( file && strstr( file, "vcodec-list" ) )
+       {
+               fprintf( stderr, "---\nvideo_codecs:\n" );
+               AVCodec *codec = NULL;
+               while ( ( codec = av_codec_next( codec ) ) )
+                       if ( codec->decode && codec->type == CODEC_TYPE_VIDEO )
+                               fprintf( stderr, "  - %s\n", codec->name );
+               fprintf( stderr, "...\n" );
+               skip = 1;
+       }
+
+       return skip;
 }
 
 /** Find the default streams.
 */
 
-static mlt_properties find_default_streams( mlt_properties meta_media, AVFormatContext *context, int *audio_index, int *video_index )
+static mlt_properties find_default_streams( producer_avformat self )
 {
        int i;
        char key[200];
+       AVMetadataTag *tag = NULL;
+       AVFormatContext *context = self->video_format;
+       mlt_properties meta_media = MLT_PRODUCER_PROPERTIES( self->parent );
+
+       // Default to the first audio and video streams found
+       self->audio_index = -1;
+       self->video_index = -1;
 
        mlt_properties_set_int( meta_media, "meta.media.nb_streams", context->nb_streams );
 
@@ -258,11 +290,20 @@ static mlt_properties find_default_streams( mlt_properties meta_media, AVFormatC
                switch( codec_context->codec_type )
                {
                        case CODEC_TYPE_VIDEO:
-                               if ( *video_index < 0 )
-                                       *video_index = i;
+                               // Use first video stream
+                               if ( self->video_index < 0 )
+                                       self->video_index = i;
                                mlt_properties_set( meta_media, key, "video" );
                                snprintf( key, sizeof(key), "meta.media.%d.stream.frame_rate", i );
+#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(42<<8)+0)
+                               double ffmpeg_fps = av_q2d( context->streams[ i ]->avg_frame_rate );
+                               if ( isnan( ffmpeg_fps ) || ffmpeg_fps == 0 )
+                                       ffmpeg_fps = av_q2d( context->streams[ i ]->r_frame_rate );
+                               mlt_properties_set_double( meta_media, key, ffmpeg_fps );
+#else
                                mlt_properties_set_double( meta_media, key, av_q2d( context->streams[ i ]->r_frame_rate ) );
+#endif
+
 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
                                snprintf( key, sizeof(key), "meta.media.%d.stream.sample_aspect_ratio", i );
                                mlt_properties_set_double( meta_media, key, av_q2d( context->streams[ i ]->sample_aspect_ratio ) );
@@ -274,12 +315,36 @@ static mlt_properties find_default_streams( mlt_properties meta_media, AVFormatC
                                mlt_properties_set( meta_media, key, avcodec_get_pix_fmt_name( codec_context->pix_fmt ) );
                                snprintf( key, sizeof(key), "meta.media.%d.codec.sample_aspect_ratio", i );
                                mlt_properties_set_double( meta_media, key, av_q2d( codec_context->sample_aspect_ratio ) );
+#if LIBAVCODEC_VERSION_INT > ((52<<16)+(28<<8)+0)
+                               snprintf( key, sizeof(key), "meta.media.%d.codec.colorspace", i );
+                               switch ( codec_context->colorspace )
+                               {
+                               case AVCOL_SPC_SMPTE240M:
+                                       mlt_properties_set_int( meta_media, key, 240 );
+                                       break;
+                               case AVCOL_SPC_BT470BG:
+                               case AVCOL_SPC_SMPTE170M:
+                                       mlt_properties_set_int( meta_media, key, 601 );
+                                       break;
+                               case AVCOL_SPC_BT709:
+                                       mlt_properties_set_int( meta_media, key, 709 );
+                                       break;
+                               default:
+                                       // This is a heuristic Charles Poynton suggests in "Digital Video and HDTV"
+                                       mlt_properties_set_int( meta_media, key, codec_context->width * codec_context->height > 750000 ? 709 : 601 );
+                                       break;
+                               }
+#endif
                                break;
                        case CODEC_TYPE_AUDIO:
-                               if ( *audio_index < 0 )
-                                       *audio_index = i;
+                               // Use first audio stream
+                               if ( self->audio_index < 0 )
+                                       self->audio_index = i;
                                mlt_properties_set( meta_media, key, "audio" );
-#if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
+#if LIBAVCODEC_VERSION_MAJOR > 52
+                               snprintf( key, sizeof(key), "meta.media.%d.codec.sample_fmt", i );
+                               mlt_properties_set( meta_media, key, av_get_sample_fmt_name( codec_context->sample_fmt ) );
+#elif (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
                                snprintf( key, sizeof(key), "meta.media.%d.codec.sample_fmt", i );
                                mlt_properties_set( meta_media, key, avcodec_get_sample_fmt_name( codec_context->sample_fmt ) );
 #endif
@@ -307,7 +372,44 @@ static mlt_properties find_default_streams( mlt_properties meta_media, AVFormatC
 //             mlt_properties_set_int( meta_media, key, codec_context->profile );
 //             snprintf( key, sizeof(key), "meta.media.%d.codec.level", i );
 //             mlt_properties_set_int( meta_media, key, codec_context->level );
+
+               // Read Metadata
+#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(31<<8)+0)
+               while ( ( tag = av_metadata_get( stream->metadata, "", tag, AV_METADATA_IGNORE_SUFFIX ) ) )
+               {
+                       if ( tag->value && strcmp( tag->value, "" ) && strcmp( tag->value, "und" ) )
+                       {
+                               snprintf( key, sizeof(key), "meta.attr.%d.stream.%s.markup", i, tag->key );
+                               mlt_properties_set( meta_media, key, tag->value );
+                       }
+               }
+#endif
+       }
+#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(31<<8)+0)
+       while ( ( tag = av_metadata_get( context->metadata, "", tag, AV_METADATA_IGNORE_SUFFIX ) ) )
+       {
+               if ( tag->value && strcmp( tag->value, "" ) && strcmp( tag->value, "und" ) )
+               {
+                       snprintf( key, sizeof(key), "meta.attr.%s.markup", tag->key );
+                       mlt_properties_set( meta_media, key, tag->value );
+               }
        }
+#else
+       if ( context->title && strcmp( context->title, "" ) )
+               mlt_properties_set(properties, "meta.attr.title.markup", context->title );
+       if ( context->author && strcmp( context->author, "" ) )
+               mlt_properties_set(properties, "meta.attr.author.markup", context->author );
+       if ( context->copyright && strcmp( context->copyright, "" ) )
+               mlt_properties_set(properties, "meta.attr.copyright.markup", context->copyright );
+       if ( context->comment )
+               mlt_properties_set(properties, "meta.attr.comment.markup", context->comment );
+       if ( context->album )
+               mlt_properties_set(properties, "meta.attr.album.markup", context->album );
+       if ( context->year )
+               mlt_properties_set_int(properties, "meta.attr.year.markup", context->year );
+       if ( context->track )
+               mlt_properties_set_int(properties, "meta.attr.track.markup", context->track );
+#endif
 
        return meta_media;
 }
@@ -332,7 +434,7 @@ static int dv_is_wide( AVPacket *pkt )
        return 0;
 }
 
-static double get_aspect_ratio( AVStream *stream, AVCodecContext *codec_context, AVPacket *pkt )
+static double get_aspect_ratio( mlt_properties properties, AVStream *stream, AVCodecContext *codec_context, AVPacket *pkt )
 {
        double aspect_ratio = 1.0;
 
@@ -342,15 +444,29 @@ static double get_aspect_ratio( AVStream *stream, AVCodecContext *codec_context,
                {
                        if ( dv_is_pal( pkt ) )
                        {
-                               aspect_ratio = dv_is_wide( pkt )
-                                       ? 64.0/45.0 // 16:9 PAL
-                                       : 16.0/15.0; // 4:3 PAL
+                               if ( dv_is_wide( pkt ) )
+                               {
+                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 64 );
+                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 45 );
+                               }
+                               else
+                               {
+                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 16 );
+                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 15 );
+                               }
                        }
                        else
                        {
-                               aspect_ratio = dv_is_wide( pkt )
-                                       ? 32.0/27.0 // 16:9 NTSC
-                                       : 8.0/9.0; // 4:3 NTSC
+                               if ( dv_is_wide( pkt ) )
+                               {
+                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 32 );
+                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 27 );
+                               }
+                               else
+                               {
+                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 8 );
+                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 9 );
+                               }
                        }
                }
                else
@@ -368,13 +484,29 @@ static double get_aspect_ratio( AVStream *stream, AVCodecContext *codec_context,
                        // the rescale normaliser when using equivalent producers and consumers.
                        // = display_aspect / (width * height)
                        if ( ar.num == 10 && ar.den == 11 )
-                               aspect_ratio = 8.0/9.0; // 4:3 NTSC
+                       {
+                               // 4:3 NTSC
+                               mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 8 );
+                               mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 9 );
+                       }
                        else if ( ar.num == 59 && ar.den == 54 )
-                               aspect_ratio = 16.0/15.0; // 4:3 PAL
+                       {
+                               // 4:3 PAL
+                               mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 16 );
+                               mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 15 );
+                       }
                        else if ( ar.num == 40 && ar.den == 33 )
-                               aspect_ratio = 32.0/27.0; // 16:9 NTSC
+                       {
+                               // 16:9 NTSC
+                               mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 32 );
+                               mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 27 );
+                       }
                        else if ( ar.num == 118 && ar.den == 81 )
-                               aspect_ratio = 64.0/45.0; // 16:9 PAL
+                       {
+                               // 16:9 PAL
+                               mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 64 );
+                               mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 45 );
+                       }
                }
        }
        else
@@ -387,261 +519,500 @@ static double get_aspect_ratio( AVStream *stream, AVCodecContext *codec_context,
                        { 0, 1 };
 #endif
                if ( codec_sar.num > 0 )
-                       aspect_ratio = av_q2d( codec_sar );
+               {
+                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", codec_sar.num );
+                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", codec_sar.den );
+               }
                else if ( stream_sar.num > 0 )
-                       aspect_ratio = av_q2d( stream_sar );
+               {
+                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", stream_sar.num );
+                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", stream_sar.den );
+               }
+               else
+               {
+                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 1 );
+                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 1 );
+               }
        }
+       AVRational ar = { mlt_properties_get_double( properties, "meta.media.sample_aspect_num" ), mlt_properties_get_double( properties, "meta.media.sample_aspect_den" ) };
+       aspect_ratio = av_q2d( ar );
+       mlt_properties_set_double( properties, "aspect_ratio", aspect_ratio );
+
        return aspect_ratio;
 }
 
-/** Open the file.
-*/
-
-static int producer_open( producer_avformat this, mlt_profile profile, char *file )
+static char* parse_url( mlt_profile profile, const char* URL, AVInputFormat **format, AVFormatParameters *params )
 {
-       // Return an error code (0 == no error)
-       int error = 0;
-
-       // Context for avformat
-       AVFormatContext *context = NULL;
-
-       // Get the properties
-       mlt_properties properties = MLT_PRODUCER_PROPERTIES( this->parent );
-
-       // We will treat everything with the producer fps
-       double fps = mlt_profile_fps( profile );
-
-       // Lock the mutex now
-       avformat_lock( );
-
-       // If "MRL", then create AVInputFormat
-       AVInputFormat *format = NULL;
-       AVFormatParameters *params = NULL;
-       char *standard = NULL;
-       char *mrl = strchr( file, ':' );
+       if ( !URL ) return NULL;
 
-       // AV option (0 = both, 1 = video, 2 = audio)
-       int av = 0;
+       const char *result = URL;
+       char *protocol = strdup( URL );
+       char *url = strchr( protocol, ':' );
 
        // Only if there is not a protocol specification that avformat can handle
-       if ( mrl && !url_exist( file ) )
+#if LIBAVFORMAT_VERSION_MAJOR > 52
+       if ( url && avio_check( URL, 0 ) < 0 )
+#else
+       if ( url && !url_exist( URL ) )
+#endif
        {
-               // 'file' becomes format abbreviation
-               mrl[0] = 0;
+               // Truncate protocol string
+               url[0] = 0;
+               mlt_log_debug( NULL, "%s: protocol=%s resource=%s\n", __FUNCTION__, protocol, url + 1 );
 
                // Lookup the format
-               format = av_find_input_format( file );
+               *format = av_find_input_format( protocol );
 
                // Eat the format designator
-               file = ++mrl;
+               result = ++url;
 
-               if ( format )
+               if ( *format )
                {
-                       // Allocate params
-                       params = calloc( sizeof( AVFormatParameters ), 1 );
-
-                       // These are required by video4linux (defaults)
-                       params->width = 640;
-                       params->height = 480;
-                       params->time_base= (AVRational){1,25};
-                       // params->device = file;
+                       // These are required by video4linux2 (defaults)
+                       params->width = profile->width;
+                       params->height = profile->height;
+                       params->time_base= (AVRational){ profile->frame_rate_den, profile->frame_rate_num };
                        params->channels = 2;
                        params->sample_rate = 48000;
+
+                       // Parse out params
+                       url = strchr( url, '?' );
+                       while ( url )
+                       {
+                               url[0] = 0;
+                               char *name = strdup( ++url );
+                               char *value = strchr( name, ':' );
+                               if ( value )
+                               {
+                                       value[0] = 0;
+                                       value++;
+                                       char *t = strchr( value, '&' );
+                                       if ( t )
+                                               t[0] = 0;
+                                       if ( !strcmp( name, "frame_rate" ) )
+                                               params->time_base.den = atoi( value );
+                                       else if ( !strcmp( name, "frame_rate_base" ) )
+                                               params->time_base.num = atoi( value );
+                                       else if ( !strcmp( name, "sample_rate" ) )
+                                               params->sample_rate = atoi( value );
+                                       else if ( !strcmp( name, "channel" ) )
+                                               params->channel = atoi( value );
+                                       else if ( !strcmp( name, "channels" ) )
+                                               params->channels = atoi( value );
+#if (LIBAVUTIL_VERSION_INT > ((50<<16)+(7<<8)+0))
+                                       else if ( !strcmp( name, "pix_fmt" ) )
+                                               params->pix_fmt = av_get_pix_fmt( value );
+#endif
+                                       else if ( !strcmp( name, "width" ) )
+                                               params->width = atoi( value );
+                                       else if ( !strcmp( name, "height" ) )
+                                               params->height = atoi( value );
+                                       else if ( !strcmp( name, "standard" ) )
+                                               params->standard = strdup( value );
+                               }
+                               free( name );
+                               url = strchr( url, '&' );
+                       }
                }
+       }
+       free( protocol );
+       return strdup( result );
+}
+
+static int get_basic_info( producer_avformat self, mlt_profile profile, const char *filename )
+{
+       int error = 0;
+
+       // Get the properties
+       mlt_properties properties = MLT_PRODUCER_PROPERTIES( self->parent );
+
+       AVFormatContext *format = self->video_format;
+
+       // We will treat everything with the producer fps.
+       // TODO: make this more flexible.
+       double fps = mlt_profile_fps( profile );
+
+       // Get the duration
+       if ( !mlt_properties_get_int( properties, "_length_computed" ) )
+       {
+               // The _length_computed flag prevents overwriting explicity set length/out/eof properties
+               // when producer_open is called after initial call when restoring or reseting the producer.
+               if ( format->duration != AV_NOPTS_VALUE )
+               {
+                       // This isn't going to be accurate for all formats
+                       mlt_position frames = ( mlt_position )( ( ( double )format->duration / ( double )AV_TIME_BASE ) * fps );
+                       mlt_properties_set_position( properties, "out", frames - 1 );
+                       mlt_properties_set_position( properties, "length", frames );
+                       mlt_properties_set_int( properties, "_length_computed", 1 );
+               }
+               else
+               {
+                       // Set live sources to run forever
+                       mlt_properties_set_position( properties, "length", INT_MAX );
+                       mlt_properties_set_position( properties, "out", INT_MAX - 1 );
+                       mlt_properties_set( properties, "eof", "loop" );
+                       mlt_properties_set_int( properties, "_length_computed", 1 );
+               }
+       }
+
+       if ( format->start_time != AV_NOPTS_VALUE )
+               self->start_time = format->start_time;
+
+       // Check if we're seekable
+       // avdevices are typically AVFMT_NOFILE and not seekable
+       self->seekable = !format->iformat || !( format->iformat->flags & AVFMT_NOFILE );
+       if ( format->pb )
+       {
+               // protocols can indicate if they support seeking
+#if LIBAVFORMAT_VERSION_MAJOR > 52
+               self->seekable = format->pb->seekable;
+#else
+               URLContext *uc = url_fileno( format->pb );
+               if ( uc )
+                       self->seekable = !uc->is_streamed;
+#endif
+       }
+       if ( self->seekable )
+       {
+               // Do a more rigourous test of seekable on a disposable context
+               self->seekable = av_seek_frame( format, -1, self->start_time, AVSEEK_FLAG_BACKWARD ) >= 0;
+               mlt_properties_set_int( properties, "seekable", self->seekable );
+               self->dummy_context = format;
+               av_open_input_file( &self->video_format, filename, NULL, 0, NULL );
+               format = self->video_format;
+               av_find_stream_info( format );
+       }
+
+       // Fetch the width, height and aspect ratio
+       if ( self->video_index != -1 )
+       {
+               AVCodecContext *codec_context = format->streams[ self->video_index ]->codec;
+               mlt_properties_set_int( properties, "width", codec_context->width );
+               mlt_properties_set_int( properties, "height", codec_context->height );
 
-               // Parse out params
-               mrl = strchr( file, '?' );
-               while ( mrl )
+               if ( codec_context->codec_id == CODEC_ID_DVVIDEO )
                {
-                       mrl[0] = 0;
-                       char *name = strdup( ++mrl );
-                       char *value = strchr( name, ':' );
-                       if ( value )
+                       // Fetch the first frame of DV so we can read it directly
+                       AVPacket pkt;
+                       int ret = 0;
+                       while ( ret >= 0 )
                        {
-                               value[0] = 0;
-                               value++;
-                               char *t = strchr( value, '&' );
-                               if ( t )
-                                       t[0] = 0;
-                               if ( !strcmp( name, "frame_rate" ) )
-                                       params->time_base.den = atoi( value );
-                               else if ( !strcmp( name, "frame_rate_base" ) )
-                                       params->time_base.num = atoi( value );
-                               else if ( !strcmp( name, "sample_rate" ) )
-                                       params->sample_rate = atoi( value );
-                               else if ( !strcmp( name, "channels" ) )
-                                       params->channels = atoi( value );
-                               else if ( !strcmp( name, "width" ) )
-                                       params->width = atoi( value );
-                               else if ( !strcmp( name, "height" ) )
-                                       params->height = atoi( value );
-                               else if ( !strcmp( name, "standard" ) )
+                               ret = av_read_frame( format, &pkt );
+                               if ( ret >= 0 && pkt.stream_index == self->video_index && pkt.size > 0 )
                                {
-                                       standard = strdup( value );
-                                       params->standard = standard;
+                                       get_aspect_ratio( properties, format->streams[ self->video_index ], codec_context, &pkt );
+                                       break;
                                }
-                               else if ( !strcmp( name, "av" ) )
-                                       av = atoi( value );
                        }
-                       free( name );
-                       mrl = strchr( mrl, '&' );
                }
+               else
+               {
+                       get_aspect_ratio( properties, format->streams[ self->video_index ], codec_context, NULL );
+               }
+
+#ifdef SWSCALE
+               // Verify that we can convert this to YUV 4:2:2
+               // TODO: we can now also return RGB and RGBA and quite possibly more in the future.
+               struct SwsContext *context = sws_getContext( codec_context->width, codec_context->height, codec_context->pix_fmt,
+                       codec_context->width, codec_context->height, PIX_FMT_YUYV422, SWS_BILINEAR, NULL, NULL, NULL);
+               if ( context )
+                       sws_freeContext( context );
+               else
+                       error = 1;
+#endif
        }
+       return error;
+}
 
-       // Now attempt to open the file
-       error = av_open_input_file( &context, file, format, 0, params ) < 0;
+/** Open the file.
+*/
+
+static int producer_open( producer_avformat self, mlt_profile profile, const char *URL )
+{
+       // Return an error code (0 == no error)
+       int error = 0;
+       mlt_properties properties = MLT_PRODUCER_PROPERTIES( self->parent );
+
+       // Lock the service
+       pthread_mutex_init( &self->audio_mutex, NULL );
+       pthread_mutex_init( &self->video_mutex, NULL );
+       pthread_mutex_init( &self->packets_mutex, NULL );
+       mlt_events_block( properties, self->parent );
+       pthread_mutex_lock( &self->audio_mutex );
+       pthread_mutex_lock( &self->video_mutex );
+
+       // Parse URL
+       AVInputFormat *format = NULL;
+       AVFormatParameters params;
+       memset( &params, 0, sizeof(params) );
+       char *filename = parse_url( profile, URL, &format, &params );
+
+       // Now attempt to open the file or device with filename
+       error = av_open_input_file( &self->video_format, filename, format, 0, &params ) < 0;
+       if ( error )
+               // If the URL is a network stream URL, then we probably need to open with full URL
+               error = av_open_input_file( &self->video_format, URL, format, 0, &params ) < 0;
+
+       // Set MLT properties onto video AVFormatContext
+       if ( !error && self->video_format )
+       {
+               apply_properties( self->video_format, properties, AV_OPT_FLAG_DECODING_PARAM );
+#if LIBAVFORMAT_VERSION_MAJOR > 52
+               if ( self->video_format->iformat && self->video_format->iformat->priv_class && self->video_format->priv_data )
+                       apply_properties( self->video_format->priv_data, properties, AV_OPT_FLAG_DECODING_PARAM );
+#endif
+       }
 
        // Cleanup AVFormatParameters
-       free( standard );
-       free( params );
+       if ( params.standard )
+               free( (void*) params.standard );
 
        // If successful, then try to get additional info
        if ( !error )
        {
                // Get the stream info
-               error = av_find_stream_info( context ) < 0;
+               error = av_find_stream_info( self->video_format ) < 0;
 
                // Continue if no error
                if ( !error )
                {
-                       // We will default to the first audio and video streams found
-                       int audio_index = -1;
-                       int video_index = -1;
+                       // Find default audio and video streams
+                       find_default_streams( self );
+                       error = get_basic_info( self, profile, filename );
+
+                       // Initialize position info
+                       self->first_pts = -1;
+                       self->last_position = POSITION_INITIAL;
 
-                       // Now set properties where we can (use default unknowns if required)
-                       if ( context->duration != AV_NOPTS_VALUE )
+                       // We're going to cheat here - for seekable A/V files, we will have separate contexts
+                       // to support independent seeking of audio from video.
+                       // TODO: Is this really necessary?
+                       if ( self->audio_index != -1 && self->video_index != -1 )
                        {
-                               // This isn't going to be accurate for all formats
-                               mlt_position frames = ( mlt_position )( ( ( double )context->duration / ( double )AV_TIME_BASE ) * fps + 0.5 );
-                               mlt_properties_set_position( properties, "out", frames - 1 );
-                               mlt_properties_set_position( properties, "length", frames );
+                               if ( self->seekable )
+                               {
+                                       // And open again for our audio context
+                                       av_open_input_file( &self->audio_format, filename, NULL, 0, NULL );
+                                       apply_properties( self->audio_format, properties, AV_OPT_FLAG_DECODING_PARAM );
+#if LIBAVFORMAT_VERSION_MAJOR > 52
+                                       if ( self->audio_format->iformat && self->audio_format->iformat->priv_class && self->audio_format->priv_data )
+                                               apply_properties( self->audio_format->priv_data, properties, AV_OPT_FLAG_DECODING_PARAM );
+#endif
+                                       av_find_stream_info( self->audio_format );
+                               }
+                               else
+                               {
+                                       self->audio_format = self->video_format;
+                               }
                        }
-
-                       // Find default audio and video streams
-                       find_default_streams( properties, context, &audio_index, &video_index );
-
-                       if ( context->start_time != AV_NOPTS_VALUE )
-                               this->start_time = context->start_time;
-
-                       // Check if we're seekable (something funny about mpeg here :-/)
-                       if ( strncmp( file, "pipe:", 5 ) &&
-                                strncmp( file, "/dev/", 5 ) &&
-                                strncmp( file, "http:", 5 ) &&
-                                strncmp( file, "udp:", 4 )  &&
-                                strncmp( file, "tcp:", 4 )  &&
-                                strncmp( file, "rtsp:", 5 ) &&
-                                strncmp( file, "rtp:", 4 ) )
+                       else if ( self->audio_index != -1 )
                        {
-                               this->seekable = av_seek_frame( context, -1, this->start_time, AVSEEK_FLAG_BACKWARD ) >= 0;
-                               mlt_properties_set_int( properties, "seekable", this->seekable );
-                               this->dummy_context = context;
-                               av_open_input_file( &context, file, NULL, 0, NULL );
-                               av_find_stream_info( context );
+                               // We only have an audio context
+                               self->audio_format = self->video_format;
+                               self->video_format = NULL;
+                       }
+                       else if ( self->video_index == -1 )
+                       {
+                               // Something has gone wrong
+                               error = -1;
                        }
+                       if ( self->audio_format && !self->audio_streams )
+                               get_audio_streams_info( self );
+               }
+       }
+       if ( filename )
+               free( filename );
+       if ( !error )
+       {
+               self->apackets = mlt_deque_init();
+               self->vpackets = mlt_deque_init();
+       }
 
-                       // Store selected audio and video indexes on properties
-                       this->audio_index = audio_index;
-                       this->video_index = video_index;
-                       this->first_pts = -1;
-                       this->last_position = POSITION_INITIAL;
+       if ( self->dummy_context )
+       {
+               av_close_input_file( self->dummy_context );
+               self->dummy_context = NULL;
+       }
 
-                       // Fetch the width, height and aspect ratio
-                       if ( video_index != -1 )
+       // Unlock the service
+       pthread_mutex_unlock( &self->audio_mutex );
+       pthread_mutex_unlock( &self->video_mutex );
+       mlt_events_unblock( properties, self->parent );
+
+       return error;
+}
+
+static void reopen_video( producer_avformat self, mlt_producer producer )
+{
+       mlt_properties properties = MLT_PRODUCER_PROPERTIES( producer );
+       mlt_service_lock( MLT_PRODUCER_SERVICE( producer ) );
+       pthread_mutex_lock( &self->audio_mutex );
+
+       if ( self->video_codec )
+       {
+               avformat_lock();
+               avcodec_close( self->video_codec );
+               avformat_unlock();
+       }
+       self->video_codec = NULL;
+       if ( self->dummy_context )
+               av_close_input_file( self->dummy_context );
+       self->dummy_context = NULL;
+       if ( self->video_format )
+               av_close_input_file( self->video_format );
+       self->video_format = NULL;
+
+       int audio_index = self->audio_index;
+       int video_index = self->video_index;
+
+       pthread_mutex_unlock( &self->audio_mutex );
+       pthread_mutex_unlock( &self->video_mutex );
+       producer_open( self, mlt_service_profile( MLT_PRODUCER_SERVICE(producer) ),
+               mlt_properties_get( properties, "resource" ) );
+       pthread_mutex_lock( &self->video_mutex );
+       pthread_mutex_lock( &self->audio_mutex );
+
+       self->audio_index = audio_index;
+       if ( self->video_format && video_index > -1 )
+       {
+               self->video_index = video_index;
+               video_codec_init( self, video_index, properties );
+       }
+
+       pthread_mutex_unlock( &self->audio_mutex );
+       mlt_service_unlock( MLT_PRODUCER_SERVICE( producer ) );
+}
+
+static int seek_video( producer_avformat self, mlt_position position,
+       int req_position, int must_decode, int use_new_seek, int *ignore )
+{
+       mlt_producer producer = self->parent;
+       int paused = 0;
+
+       if ( self->seekable && ( position != self->video_expected || self->last_position < 0 ) )
+       {
+               mlt_properties properties = MLT_PRODUCER_PROPERTIES( producer );
+
+               // Fetch the video format context
+               AVFormatContext *context = self->video_format;
+
+               // Get the video stream
+               AVStream *stream = context->streams[ self->video_index ];
+
+               // Get codec context
+               AVCodecContext *codec_context = stream->codec;
+
+               // We may want to use the source fps if available
+               double source_fps = mlt_properties_get_double( properties, "meta.media.frame_rate_num" ) /
+                       mlt_properties_get_double( properties, "meta.media.frame_rate_den" );
+
+               if ( self->av_frame && position + 1 == self->video_expected )
+               {
+                       // We're paused - use last image
+                       paused = 1;
+               }
+               else if ( !self->seekable && position > self->video_expected && ( position - self->video_expected ) < 250 )
+               {
+                       // Fast forward - seeking is inefficient for small distances - just ignore following frames
+                       *ignore = ( int )( ( position - self->video_expected ) / mlt_producer_get_fps( producer ) * source_fps );
+                       codec_context->skip_loop_filter = AVDISCARD_NONREF;
+               }
+               else if ( self->seekable && ( position < self->video_expected || position - self->video_expected >= 12 || self->last_position < 0 ) )
+               {
+                       if ( use_new_seek && self->last_position == POSITION_INITIAL )
                        {
-                               AVCodecContext *codec_context = context->streams[ video_index ]->codec;
-                               mlt_properties_set_int( properties, "width", codec_context->width );
-                               mlt_properties_set_int( properties, "height", codec_context->height );
+                               // find first key frame
+                               int ret = 0;
+                               int toscan = 100;
+                               AVPacket pkt;
 
-                               if ( codec_context->codec_id == CODEC_ID_DVVIDEO )
+                               while ( ret >= 0 && toscan-- > 0 )
                                {
-                                       // Fetch the first frame of DV so we can read it directly
-                                       AVPacket pkt;
-                                       int ret = 0;
-                                       while ( ret >= 0 )
+                                       ret = av_read_frame( context, &pkt );
+                                       if ( ret >= 0 && ( pkt.flags & PKT_FLAG_KEY ) && pkt.stream_index == self->video_index )
                                        {
-                                               ret = av_read_frame( context, &pkt );
-                                               if ( ret >= 0 && pkt.stream_index == video_index && pkt.size > 0 )
-                                               {
-                                                       mlt_properties_set_double( properties, "aspect_ratio",
-                                                               get_aspect_ratio( context->streams[ video_index ], codec_context, &pkt ) );
-                                                       break;
-                                               }
+                                               mlt_log_verbose( MLT_PRODUCER_SERVICE(producer), "first_pts %"PRId64" dts %"PRId64" pts_dts_delta %d\n", pkt.pts, pkt.dts, (int)(pkt.pts - pkt.dts) );
+                                               self->first_pts = pkt.pts;
+                                               toscan = 0;
                                        }
+                                       av_free_packet( &pkt );
                                }
-                               else
-                               {
-                                       mlt_properties_set_double( properties, "aspect_ratio",
-                                               get_aspect_ratio( context->streams[ video_index ], codec_context, NULL ) );
-                               }
+                               // Rewind
+                               av_seek_frame( context, -1, 0, AVSEEK_FLAG_BACKWARD );
                        }
 
-                       // Read Metadata
-                       if ( context->title )
-                               mlt_properties_set(properties, "meta.attr.title.markup", context->title );
-                       if ( context->author )
-                               mlt_properties_set(properties, "meta.attr.author.markup", context->author );
-                       if ( context->copyright )
-                               mlt_properties_set(properties, "meta.attr.copyright.markup", context->copyright );
-                       if ( context->comment )
-                               mlt_properties_set(properties, "meta.attr.comment.markup", context->comment );
-                       if ( context->album )
-                               mlt_properties_set(properties, "meta.attr.album.markup", context->album );
-                       if ( context->year )
-                               mlt_properties_set_int(properties, "meta.attr.year.markup", context->year );
-                       if ( context->track )
-                               mlt_properties_set_int(properties, "meta.attr.track.markup", context->track );
-
-                       // We're going to cheat here - for a/v files, we will have two contexts (reasoning will be clear later)
-                       if ( av == 0 && audio_index != -1 && video_index != -1 )
+                       // Calculate the timestamp for the requested frame
+                       int64_t timestamp;
+                       if ( use_new_seek )
                        {
-                               // We'll use the open one as our video_format
-                               this->video_format = context;
-
-                               // And open again for our audio context
-                               av_open_input_file( &context, file, NULL, 0, NULL );
-                               av_find_stream_info( context );
-
-                               // Audio context
-                               this->audio_format = context;
+                               timestamp = ( req_position - 0.1 / source_fps ) /
+                                       ( av_q2d( stream->time_base ) * source_fps );
+                               mlt_log_verbose( MLT_PRODUCER_SERVICE(producer), "pos %d pts %"PRId64" ", req_position, timestamp );
+                               if ( self->first_pts > 0 )
+                                       timestamp += self->first_pts;
+                               else if ( context->start_time != AV_NOPTS_VALUE )
+                                       timestamp += context->start_time;
                        }
-                       else if ( av != 2 && video_index != -1 )
+                       else
+                       {
+                               timestamp = ( int64_t )( ( double )req_position / source_fps * AV_TIME_BASE + 0.5 );
+                               if ( context->start_time != AV_NOPTS_VALUE )
+                                       timestamp += context->start_time;
+                       }
+                       if ( must_decode )
+                               timestamp -= AV_TIME_BASE;
+                       if ( timestamp < 0 )
+                               timestamp = 0;
+                       mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "seeking timestamp %"PRId64" position %d expected %d last_pos %d\n",
+                               timestamp, position, self->video_expected, self->last_position );
+
+                       // Seek to the timestamp
+                       if ( use_new_seek )
                        {
-                               // We only have a video context
-                               this->video_format = context;
+                               codec_context->skip_loop_filter = AVDISCARD_NONREF;
+                               av_seek_frame( context, self->video_index, timestamp, AVSEEK_FLAG_BACKWARD );
                        }
-                       else if ( audio_index != -1 )
+                       else if ( req_position > 0 || self->last_position <= 0 )
                        {
-                               // We only have an audio context
-                               this->audio_format = context;
+                               av_seek_frame( context, -1, timestamp, AVSEEK_FLAG_BACKWARD );
                        }
                        else
                        {
-                               // Something has gone wrong
-                               error = -1;
+                               // Re-open video stream when rewinding to beginning from somewhere else.
+                               // This is rather ugly, and I prefer not to do it this way, but ffmpeg is
+                               // not reliably seeking to the first frame across formats.
+                               reopen_video( self, producer );
+                       }
+
+                       // Remove the cached info relating to the previous position
+                       self->current_position = POSITION_INVALID;
+                       self->last_position = POSITION_INVALID;
+                       av_freep( &self->av_frame );
+
+                       if ( use_new_seek )
+                       {
+                               // flush any pictures still in decode buffer
+                               avcodec_flush_buffers( codec_context );
                        }
                }
        }
-
-       // Unlock the mutex now
-       avformat_unlock( );
-
-       return error;
+       return paused;
 }
 
 /** Convert a frame position to a time code.
 */
 
-static double producer_time_of_frame( mlt_producer this, mlt_position position )
+static double producer_time_of_frame( mlt_producer producer, mlt_position position )
 {
-       return ( double )position / mlt_producer_get_fps( this );
+       return ( double )position / mlt_producer_get_fps( producer );
 }
 
-               // Collect information about all audio streams
+// Collect information about all audio streams
 
-static void get_audio_streams_info( producer_avformat this )
+static void get_audio_streams_info( producer_avformat self )
 {
        // Fetch the audio format context
-       AVFormatContext *context = this->audio_format;
+       AVFormatContext *context = self->audio_format;
        int i;
 
        for ( i = 0;
@@ -657,43 +1028,93 @@ static void get_audio_streams_info( producer_avformat this )
                        avformat_lock( );
                        if ( codec && avcodec_open( codec_context, codec ) >= 0 )
                        {
-                               this->audio_streams++;
-                               this->audio_max_stream = i;
-                               this->total_channels += codec_context->channels;
-                               if ( codec_context->channels > this->max_channel )
-                                       this->max_channel = codec_context->channels;
-                               if ( codec_context->sample_rate > this->max_frequency )
-                                       this->max_frequency = codec_context->sample_rate;
+                               self->audio_streams++;
+                               self->audio_max_stream = i;
+                               self->total_channels += codec_context->channels;
+                               if ( codec_context->channels > self->max_channel )
+                                       self->max_channel = codec_context->channels;
+                               if ( codec_context->sample_rate > self->max_frequency )
+                                       self->max_frequency = codec_context->sample_rate;
                                avcodec_close( codec_context );
                        }
                        avformat_unlock( );
                }
        }
        mlt_log_verbose( NULL, "[producer avformat] audio: total_streams %d max_stream %d total_channels %d max_channels %d\n",
-               this->audio_streams, this->audio_max_stream, this->total_channels, this->max_channel );
+               self->audio_streams, self->audio_max_stream, self->total_channels, self->max_channel );
        
        // Other audio-specific initializations
-       this->resample_factor = 1.0;
+       self->resample_factor = 1.0;
+}
+
+static void set_luma_transfer( struct SwsContext *context, int colorspace, int use_full_range )
+{
+#if defined(SWSCALE) && (LIBSWSCALE_VERSION_INT >= ((0<<16)+(7<<8)+2))
+       int *coefficients;
+       const int *new_coefficients;
+       int full_range;
+       int brightness, contrast, saturation;
+
+       if ( sws_getColorspaceDetails( context, &coefficients, &full_range, &coefficients, &full_range,
+                       &brightness, &contrast, &saturation ) != -1 )
+       {
+               // Don't change these from defaults unless explicitly told to.
+               if ( use_full_range >= 0 )
+                       full_range = use_full_range;
+               switch ( colorspace )
+               {
+               case 170:
+               case 470:
+               case 601:
+               case 624:
+                       new_coefficients = sws_getCoefficients( SWS_CS_ITU601 );
+                       break;
+               case 240:
+                       new_coefficients = sws_getCoefficients( SWS_CS_SMPTE240M );
+                       break;
+               case 709:
+                       new_coefficients = sws_getCoefficients( SWS_CS_ITU709 );
+                       break;
+               default:
+                       new_coefficients = coefficients;
+                       break;
+               }
+               sws_setColorspaceDetails( context, new_coefficients, full_range, new_coefficients, full_range,
+                       brightness, contrast, saturation );
+       }
+#endif
 }
 
-static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt, mlt_image_format *format, int width, int height )
+static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
+       mlt_image_format *format, int width, int height, int colorspace )
 {
 #ifdef SWSCALE
+       int full_range = -1;
+       int flags = SWS_BILINEAR | SWS_ACCURATE_RND;
+
+#ifdef USE_MMX
+       flags |= SWS_CPU_CAPS_MMX;
+#endif
+#ifdef USE_SSE
+       flags |= SWS_CPU_CAPS_MMX2;
+#endif
+
        if ( pix_fmt == PIX_FMT_RGB32 )
        {
                *format = mlt_image_rgb24a;
                struct SwsContext *context = sws_getContext( width, height, pix_fmt,
-                       width, height, PIX_FMT_RGBA, SWS_BILINEAR, NULL, NULL, NULL);
+                       width, height, PIX_FMT_RGBA, flags, NULL, NULL, NULL);
                AVPicture output;
                avpicture_fill( &output, buffer, PIX_FMT_RGBA, width, height );
-               sws_scale( context, frame->data, frame->linesize, 0, height,
+               set_luma_transfer( context, colorspace, full_range );
+               sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
                        output.data, output.linesize);
                sws_freeContext( context );
        }
        else if ( *format == mlt_image_yuv420p )
        {
                struct SwsContext *context = sws_getContext( width, height, pix_fmt,
-                       width, height, PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);
+                       width, height, PIX_FMT_YUV420P, flags, NULL, NULL, NULL);
                AVPicture output;
                output.data[0] = buffer;
                output.data[1] = buffer + width * height;
@@ -701,37 +1122,41 @@ static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
                output.linesize[0] = width;
                output.linesize[1] = width >> 1;
                output.linesize[2] = width >> 1;
-               sws_scale( context, frame->data, frame->linesize, 0, height,
+               set_luma_transfer( context, colorspace, full_range );
+               sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
                        output.data, output.linesize);
                sws_freeContext( context );
        }
        else if ( *format == mlt_image_rgb24 )
        {
                struct SwsContext *context = sws_getContext( width, height, pix_fmt,
-                       width, height, PIX_FMT_RGB24, SWS_BILINEAR, NULL, NULL, NULL);
+                       width, height, PIX_FMT_RGB24, flags | SWS_FULL_CHR_H_INT, NULL, NULL, NULL);
                AVPicture output;
                avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
-               sws_scale( context, frame->data, frame->linesize, 0, height,
+               set_luma_transfer( context, colorspace, full_range );
+               sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
                        output.data, output.linesize);
                sws_freeContext( context );
        }
        else if ( *format == mlt_image_rgb24a || *format == mlt_image_opengl )
        {
                struct SwsContext *context = sws_getContext( width, height, pix_fmt,
-                       width, height, PIX_FMT_RGBA, SWS_BILINEAR, NULL, NULL, NULL);
+                       width, height, PIX_FMT_RGBA, flags | SWS_FULL_CHR_H_INT, NULL, NULL, NULL);
                AVPicture output;
                avpicture_fill( &output, buffer, PIX_FMT_RGBA, width, height );
-               sws_scale( context, frame->data, frame->linesize, 0, height,
+               set_luma_transfer( context, colorspace, full_range );
+               sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
                        output.data, output.linesize);
                sws_freeContext( context );
        }
        else
        {
                struct SwsContext *context = sws_getContext( width, height, pix_fmt,
-                       width, height, PIX_FMT_YUYV422, SWS_BILINEAR, NULL, NULL, NULL);
+                       width, height, PIX_FMT_YUYV422, flags | SWS_FULL_CHR_H_INP, NULL, NULL, NULL);
                AVPicture output;
                avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height );
-               sws_scale( context, frame->data, frame->linesize, 0, height,
+               set_luma_transfer( context, colorspace, full_range );
+               sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
                        output.data, output.linesize);
                sws_freeContext( context );
        }
@@ -771,7 +1196,7 @@ static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
 /** Allocate the image buffer and set it on the frame.
 */
 
-static int allocate_buffer( mlt_properties frame_properties, AVCodecContext *codec_context, uint8_t **buffer, mlt_image_format *format, int *width, int *height )
+static int allocate_buffer( mlt_frame frame, AVCodecContext *codec_context, uint8_t **buffer, mlt_image_format *format, int *width, int *height )
 {
        int size = 0;
 
@@ -780,33 +1205,16 @@ static int allocate_buffer( mlt_properties frame_properties, AVCodecContext *cod
 
        *width = codec_context->width;
        *height = codec_context->height;
-       mlt_properties_set_int( frame_properties, "width", *width );
-       mlt_properties_set_int( frame_properties, "height", *height );
 
        if ( codec_context->pix_fmt == PIX_FMT_RGB32 )
                size = *width * ( *height + 1 ) * 4;
-       else switch ( *format )
-       {
-               case mlt_image_yuv420p:
-                       size = *width * 3 * ( *height + 1 ) / 2;
-                       break;
-               case mlt_image_rgb24:
-                       size = *width * ( *height + 1 ) * 3;
-                       break;
-               case mlt_image_rgb24a:
-               case mlt_image_opengl:
-                       size = *width * ( *height + 1 ) * 4;
-                       break;
-               default:
-                       *format = mlt_image_yuv422;
-                       size = *width * ( *height + 1 ) * 2;
-                       break;
-       }
+       else
+               size = mlt_image_format_size( *format, *width, *height, NULL );
 
        // Construct the output image
        *buffer = mlt_pool_alloc( size );
        if ( *buffer )
-               mlt_properties_set_data( frame_properties, "image", *buffer, size, mlt_pool_release, NULL );
+               mlt_frame_set_image( frame, *buffer, size, mlt_pool_release );
        else
                size = 0;
 
@@ -819,8 +1227,8 @@ static int allocate_buffer( mlt_properties frame_properties, AVCodecContext *cod
 static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_format *format, int *width, int *height, int writable )
 {
        // Get the producer
-       producer_avformat this = mlt_frame_pop_service( frame );
-       mlt_producer producer = this->parent;
+       producer_avformat self = mlt_frame_pop_service( frame );
+       mlt_producer producer = self->parent;
 
        // Get the properties from the frame
        mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
@@ -831,53 +1239,50 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
        // Get the producer properties
        mlt_properties properties = MLT_PRODUCER_PROPERTIES( producer );
 
+       pthread_mutex_lock( &self->video_mutex );
+
        // Fetch the video format context
-       AVFormatContext *context = this->video_format;
+       AVFormatContext *context = self->video_format;
 
        // Get the video stream
-       AVStream *stream = context->streams[ this->video_index ];
+       AVStream *stream = context->streams[ self->video_index ];
 
        // Get codec context
        AVCodecContext *codec_context = stream->codec;
 
        // Get the image cache
-       if ( ! this->image_cache && ! mlt_properties_get_int( properties, "noimagecache" ) )
-               this->image_cache = mlt_cache_init();
-       if ( this->image_cache )
+       if ( ! self->image_cache && ! mlt_properties_get_int( properties, "noimagecache" ) )
+               self->image_cache = mlt_cache_init();
+       if ( self->image_cache )
        {
-               mlt_cache_item item = mlt_cache_get( this->image_cache, (void*) position );
-               *buffer = mlt_cache_item_data( item, format );
-               if ( *buffer )
+               mlt_cache_item item = mlt_cache_get( self->image_cache, (void*) position );
+               uint8_t *original = mlt_cache_item_data( item, (int*) format );
+               if ( original )
                {
                        // Set the resolution
                        *width = codec_context->width;
                        *height = codec_context->height;
-                       mlt_properties_set_int( frame_properties, "width", *width );
-                       mlt_properties_set_int( frame_properties, "height", *height );
+
+                       // Workaround 1088 encodings missing cropping info.
+                       if ( *height == 1088 && mlt_profile_dar( mlt_service_profile( MLT_PRODUCER_SERVICE( producer ) ) ) == 16.0/9.0 )
+                               *height = 1080;
 
                        // Cache hit
-                       int size;
-                       switch ( *format )
+                       int size = mlt_image_format_size( *format, *width, *height, NULL );
+                       if ( writable )
                        {
-                               case mlt_image_yuv420p:
-                                       size = *width * 3 * ( *height + 1 ) / 2;
-                                       break;
-                               case mlt_image_rgb24:
-                                       size = *width * ( *height + 1 ) * 3;
-                                       break;
-                               case mlt_image_rgb24a:
-                               case mlt_image_opengl:
-                                       size = *width * ( *height + 1 ) * 4;
-                                       break;
-                               default:
-                                       *format = mlt_image_yuv422;
-                                       size = *width * ( *height + 1 ) * 2;
-                                       break;
+                               *buffer = mlt_pool_alloc( size );
+                               mlt_frame_set_image( frame, *buffer, size, mlt_pool_release );
+                               memcpy( *buffer, original, size );
+                               mlt_cache_item_close( item );
                        }
-                       mlt_properties_set_data( frame_properties, "avformat.image_cache", item, 0, ( mlt_destructor )mlt_cache_item_close, NULL );
-                       mlt_properties_set_data( frame_properties, "image", *buffer, size, NULL, NULL );
-                       this->top_field_first = mlt_properties_get_int( frame_properties, "top_field_first" );
-                       this->got_picture = 1;
+                       else
+                       {
+                               *buffer = original;
+                               mlt_properties_set_data( frame_properties, "avformat.image_cache", item, 0, ( mlt_destructor )mlt_cache_item_close, NULL );
+                               mlt_frame_set_image( frame, *buffer, size, NULL );
+                       }
+                       self->got_picture = 1;
 
                        goto exit_get_image;
                }
@@ -885,23 +1290,18 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
        // Cache miss
        int image_size = 0;
 
-       avformat_lock();
-
        // Packet
        AVPacket pkt;
 
-       // Special case pause handling flag
-       int paused = 0;
-
        // Special case ffwd handling
        int ignore = 0;
 
        // We may want to use the source fps if available
-       double source_fps = mlt_properties_get_double( properties, "source_fps" );
-       double fps = mlt_producer_get_fps( producer );
+       double source_fps = mlt_properties_get_double( properties, "meta.media.frame_rate_num" ) /
+               mlt_properties_get_double( properties, "meta.media.frame_rate_den" );
 
        // This is the physical frame position in the source
-       int req_position = ( int )( position / fps * source_fps + 0.5 );
+       int req_position = ( int )( position / mlt_producer_get_fps( producer ) * source_fps + 0.5 );
 
        // Determines if we have to decode all frames in a sequence
        // Temporary hack to improve intra frame only
@@ -911,122 +1311,50 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                  strcmp( codec_context->codec->name, "mjpeg" ) &&
                                  strcmp( codec_context->codec->name, "rawvideo" );
 
-       int last_position = this->last_position;
-
        // Turn on usage of new seek API and PTS for seeking
-       int use_new_seek = codec_context->codec_id == CODEC_ID_H264 && !strcmp( context->iformat->name, "mpegts" );
+       int use_new_seek = self->seekable &&
+               codec_context->codec_id == CODEC_ID_H264 && !strcmp( context->iformat->name, "mpegts" );
        if ( mlt_properties_get( properties, "new_seek" ) )
                use_new_seek = mlt_properties_get_int( properties, "new_seek" );
 
        // Seek if necessary
-       if ( position != this->video_expected || last_position < 0 )
-       {
-               if ( this->av_frame && position + 1 == this->video_expected )
-               {
-                       // We're paused - use last image
-                       paused = 1;
-               }
-               else if ( !this->seekable && position > this->video_expected && ( position - this->video_expected ) < 250 )
-               {
-                       // Fast forward - seeking is inefficient for small distances - just ignore following frames
-                       ignore = ( int )( ( position - this->video_expected ) / fps * source_fps );
-                       codec_context->skip_loop_filter = AVDISCARD_NONREF;
-               }
-               else if ( this->seekable && ( position < this->video_expected || position - this->video_expected >= 12 || last_position < 0 ) )
-               {
-                       if ( use_new_seek && last_position == POSITION_INITIAL )
-                       {
-                               // find first key frame
-                               int ret = 0;
-                               int toscan = 100;
-
-                               while ( ret >= 0 && toscan-- > 0 )
-                               {
-                                       ret = av_read_frame( context, &pkt );
-                                       if ( ret >= 0 && ( pkt.flags & PKT_FLAG_KEY ) && pkt.stream_index == this->video_index )
-                                       {
-                                               mlt_log_verbose( MLT_PRODUCER_SERVICE(producer), "first_pts %lld dts %lld pts_dts_delta %d\n", pkt.pts, pkt.dts, (int)(pkt.pts - pkt.dts) );
-                                               this->first_pts = pkt.pts;
-                                               toscan = 0;
-                                       }
-                                       av_free_packet( &pkt );
-                               }
-                               // Rewind
-                               av_seek_frame( context, -1, 0, AVSEEK_FLAG_BACKWARD );
-                       }
-
-                       // Calculate the timestamp for the requested frame
-                       int64_t timestamp;
-                       if ( use_new_seek )
-                       {
-                               timestamp = ( req_position - 0.1 / source_fps ) /
-                                       ( av_q2d( stream->time_base ) * source_fps );
-                               mlt_log_verbose( MLT_PRODUCER_SERVICE(producer), "pos %d pts %lld ", req_position, timestamp );
-                               if ( this->first_pts > 0 )
-                                       timestamp += this->first_pts;
-                               else if ( context->start_time != AV_NOPTS_VALUE )
-                                       timestamp += context->start_time;
-                       }
-                       else
-                       {
-                               timestamp = ( int64_t )( ( double )req_position / source_fps * AV_TIME_BASE + 0.5 );
-                               if ( context->start_time != AV_NOPTS_VALUE )
-                                       timestamp += context->start_time;
-                       }
-                       if ( must_decode )
-                               timestamp -= AV_TIME_BASE;
-                       if ( timestamp < 0 )
-                               timestamp = 0;
-                       mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "seeking timestamp %lld position %d expected %d last_pos %d\n",
-                               timestamp, position, this->video_expected, last_position );
-
-                       // Seek to the timestamp
-                       if ( use_new_seek )
-                       {
-                               codec_context->skip_loop_filter = AVDISCARD_NONREF;
-                               av_seek_frame( context, this->video_index, timestamp, AVSEEK_FLAG_BACKWARD );
-                       }
-                       else
-                       {
-                               av_seek_frame( context, -1, timestamp, AVSEEK_FLAG_BACKWARD );
-                       }
+       int paused = seek_video( self, position, req_position, must_decode, use_new_seek, &ignore );
 
-                       // Remove the cached info relating to the previous position
-                       this->current_position = POSITION_INVALID;
-                       this->last_position = POSITION_INVALID;
-                       av_freep( &this->av_frame );
-
-                       if ( use_new_seek )
-                       {
-                               // flush any pictures still in decode buffer
-                               avcodec_flush_buffers( codec_context );
-                       }
-               }
-       }
+       // Seek might have reopened the file
+       context = self->video_format;
+       stream = context->streams[ self->video_index ];
+       codec_context = stream->codec;
 
-       // Duplicate the last image if necessary (see comment on rawvideo below)
-       if ( this->av_frame && this->av_frame->linesize[0] && this->got_picture && this->seekable
+       // Duplicate the last image if necessary
+       if ( self->av_frame && self->av_frame->linesize[0] && self->got_picture && self->seekable
                 && ( paused
-                         || this->current_position == req_position
-                         || ( !use_new_seek && this->current_position > req_position ) ) )
+                         || self->current_position == req_position
+                         || ( !use_new_seek && self->current_position > req_position ) ) )
        {
                // Duplicate it
-               if ( ( image_size = allocate_buffer( frame_properties, codec_context, buffer, format, width, height ) ) )
+               if ( ( image_size = allocate_buffer( frame, codec_context, buffer, format, width, height ) ) )
+               {
+                       // Workaround 1088 encodings missing cropping info.
+                       if ( *height == 1088 && mlt_profile_dar( mlt_service_profile( MLT_PRODUCER_SERVICE( producer ) ) ) == 16.0/9.0 )
+                               *height = 1080;
 #ifdef VDPAU
-                       if ( this->vdpau && this->vdpau->buffer )
+                       if ( self->vdpau && self->vdpau->buffer )
                        {
                                AVPicture picture;
-                               picture.data[0] = this->vdpau->buffer;
-                               picture.data[2] = this->vdpau->buffer + codec_context->width * codec_context->height;
-                               picture.data[1] = this->vdpau->buffer + codec_context->width * codec_context->height * 5 / 4;
+                               picture.data[0] = self->vdpau->buffer;
+                               picture.data[2] = self->vdpau->buffer + codec_context->width * codec_context->height;
+                               picture.data[1] = self->vdpau->buffer + codec_context->width * codec_context->height * 5 / 4;
                                picture.linesize[0] = codec_context->width;
                                picture.linesize[1] = codec_context->width / 2;
                                picture.linesize[2] = codec_context->width / 2;
-                               convert_image( (AVFrame*) &picture, *buffer, PIX_FMT_YUV420P, format, *width, *height );
+                               convert_image( (AVFrame*) &picture, *buffer,
+                                       PIX_FMT_YUV420P, format, *width, *height, self->colorspace );
                        }
                        else
 #endif
-                       convert_image( this->av_frame, *buffer, codec_context->pix_fmt, format, *width, *height );
+                       convert_image( self->av_frame, *buffer, codec_context->pix_fmt,
+                               format, *width, *height, self->colorspace );
+               }
                else
                        mlt_frame_get_image( frame, buffer, format, width, height, writable );
        }
@@ -1040,30 +1368,50 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                av_init_packet( &pkt );
 
                // Construct an AVFrame for YUV422 conversion
-               if ( !this->av_frame )
-                       this->av_frame = avcodec_alloc_frame( );
+               if ( !self->av_frame )
+                       self->av_frame = avcodec_alloc_frame( );
 
                while( ret >= 0 && !got_picture )
                {
                        // Read a packet
-                       ret = av_read_frame( context, &pkt );
+                       pthread_mutex_lock( &self->packets_mutex );
+                       if ( mlt_deque_count( self->vpackets ) )
+                       {
+                               AVPacket *tmp = (AVPacket*) mlt_deque_pop_front( self->vpackets );
+                               pkt = *tmp;
+                               free( tmp );
+                       }
+                       else
+                       {
+                               ret = av_read_frame( context, &pkt );
+                               if ( ret >= 0 && !self->seekable && pkt.stream_index == self->audio_index )
+                               {
+                                       if ( !av_dup_packet( &pkt ) )
+                                       {
+                                               AVPacket *tmp = malloc( sizeof(AVPacket) );
+                                               *tmp = pkt;
+                                               mlt_deque_push_back( self->apackets, tmp );
+                                       }
+                               }
+                       }
+                       pthread_mutex_unlock( &self->packets_mutex );
 
                        // We only deal with video from the selected video_index
-                       if ( ret >= 0 && pkt.stream_index == this->video_index && pkt.size > 0 )
+                       if ( ret >= 0 && pkt.stream_index == self->video_index && pkt.size > 0 )
                        {
                                // Determine time code of the packet
                                if ( use_new_seek )
                                {
                                        int64_t pts = pkt.pts;
-                                       if ( this->first_pts > 0 )
-                                               pts -= this->first_pts;
+                                       if ( self->first_pts > 0 )
+                                               pts -= self->first_pts;
                                        else if ( context->start_time != AV_NOPTS_VALUE )
                                                pts -= context->start_time;
                                        int_position = ( int )( av_q2d( stream->time_base ) * pts * source_fps + 0.1 );
                                        if ( pkt.pts == AV_NOPTS_VALUE )
                                        {
-                                               this->invalid_pts_counter++;
-                                               if ( this->invalid_pts_counter > 20 )
+                                               self->invalid_pts_counter++;
+                                               if ( self->invalid_pts_counter > 20 )
                                                {
                                                        mlt_log_panic( MLT_PRODUCER_SERVICE(producer), "\ainvalid PTS; DISABLING NEW_SEEK!\n" );
                                                        mlt_properties_set_int( properties, "new_seek", 0 );
@@ -1073,28 +1421,27 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                        }
                                        else
                                        {
-                                               this->invalid_pts_counter = 0;
+                                               self->invalid_pts_counter = 0;
                                        }
-                                       mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "pkt.pts %llu req_pos %d cur_pos %d pkt_pos %d\n",
-                                               pkt.pts, req_position, this->current_position, int_position );
+                                       mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "pkt.pts %"PRId64" req_pos %d cur_pos %d pkt_pos %d\n",
+                                               pkt.pts, req_position, self->current_position, int_position );
                                }
                                else
                                {
-                                       if ( pkt.dts != AV_NOPTS_VALUE )
+                                       if ( self->seekable && pkt.dts != AV_NOPTS_VALUE )
                                        {
                                                int_position = ( int )( av_q2d( stream->time_base ) * pkt.dts * source_fps + 0.5 );
                                                if ( context->start_time != AV_NOPTS_VALUE )
                                                        int_position -= ( int )( context->start_time * source_fps / AV_TIME_BASE + 0.5 );
-                                               last_position = this->last_position;
-                                               if ( int_position == last_position )
-                                                       int_position = last_position + 1;
+                                               if ( int_position == self->last_position )
+                                                       int_position = self->last_position + 1;
                                        }
                                        else
                                        {
                                                int_position = req_position;
                                        }
-                                       mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "pkt.dts %llu req_pos %d cur_pos %d pkt_pos %d\n",
-                                               pkt.dts, req_position, this->current_position, int_position );
+                                       mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "pkt.dts %"PRId64" req_pos %d cur_pos %d pkt_pos %d\n",
+                                               pkt.dts, req_position, self->current_position, int_position );
                                        // Make a dumb assumption on streams that contain wild timestamps
                                        if ( abs( req_position - int_position ) > 999 )
                                        {
@@ -1102,30 +1449,30 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                                mlt_log_debug( MLT_PRODUCER_SERVICE(producer), " WILD TIMESTAMP!" );
                                        }
                                }
-                               this->last_position = int_position;
+                               self->last_position = int_position;
 
                                // Decode the image
                                if ( must_decode || int_position >= req_position )
                                {
 #ifdef VDPAU
-                                       if ( g_vdpau && this->vdpau )
+                                       if ( g_vdpau && self->vdpau )
                                        {
-                                               if ( g_vdpau->producer != this )
+                                               if ( g_vdpau->producer != self )
                                                {
                                                        vdpau_decoder_close();
-                                                       vdpau_decoder_init( this );
+                                                       vdpau_decoder_init( self );
                                                }
-                                               if ( this->vdpau )
-                                                       this->vdpau->is_decoded = 0;
+                                               if ( self->vdpau )
+                                                       self->vdpau->is_decoded = 0;
                                        }
 #endif
                                        codec_context->reordered_opaque = pkt.pts;
                                        if ( int_position >= req_position )
                                                codec_context->skip_loop_filter = AVDISCARD_NONE;
 #if (LIBAVCODEC_VERSION_INT >= ((52<<16)+(26<<8)+0))
-                                       ret = avcodec_decode_video2( codec_context, this->av_frame, &got_picture, &pkt );
+                                       ret = avcodec_decode_video2( codec_context, self->av_frame, &got_picture, &pkt );
 #else
-                                       ret = avcodec_decode_video( codec_context, this->av_frame, &got_picture, pkt.data, pkt.size );
+                                       ret = avcodec_decode_video( codec_context, self->av_frame, &got_picture, pkt.data, pkt.size );
 #endif
                                        // Note: decode may fail at the beginning of MPEGfile (B-frames referencing before first I-frame), so allow a few errors.
                                        if ( ret < 0 )
@@ -1144,13 +1491,13 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                        if ( use_new_seek )
                                        {
                                                // Determine time code of the packet
-                                               int64_t pts = this->av_frame->reordered_opaque;
-                                               if ( this->first_pts > 0 )
-                                                       pts -= this->first_pts;
+                                               int64_t pts = self->av_frame->reordered_opaque;
+                                               if ( self->first_pts > 0 )
+                                                       pts -= self->first_pts;
                                                else if ( context->start_time != AV_NOPTS_VALUE )
                                                        pts -= context->start_time;
                                                int_position = ( int )( av_q2d( stream->time_base) * pts * source_fps + 0.1 );
-                                               mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "got frame %d, key %d\n", int_position, this->av_frame->key_frame );
+                                               mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "got frame %d, key %d\n", int_position, self->av_frame->key_frame );
                                        }
                                        // Handle ignore
                                        if ( int_position < req_position )
@@ -1169,94 +1516,136 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                        }
                                }
                                mlt_log_debug( MLT_PRODUCER_SERVICE(producer), " got_pic %d key %d\n", got_picture, pkt.flags & PKT_FLAG_KEY );
-                               av_free_packet( &pkt );
-                       }
-                       else if ( ret >= 0 )
-                       {
-                               av_free_packet( &pkt );
                        }
 
                        // Now handle the picture if we have one
                        if ( got_picture )
                        {
-                               if ( ( image_size = allocate_buffer( frame_properties, codec_context, buffer, format, width, height ) ) )
+                               if ( ( image_size = allocate_buffer( frame, codec_context, buffer, format, width, height ) ) )
                                {
+                                       // Workaround 1088 encodings missing cropping info.
+                                       if ( *height == 1088 && mlt_profile_dar( mlt_service_profile( MLT_PRODUCER_SERVICE( producer ) ) ) == 16.0/9.0 )
+                                               *height = 1080;
 #ifdef VDPAU
-                                       if ( this->vdpau )
+                                       if ( self->vdpau )
                                        {
-                                               if ( this->vdpau->is_decoded )
+                                               if ( self->vdpau->is_decoded )
                                                {
-                                                       struct vdpau_render_state *render = (struct vdpau_render_state*) this->av_frame->data[0];
+                                                       struct vdpau_render_state *render = (struct vdpau_render_state*) self->av_frame->data[0];
                                                        void *planes[3];
                                                        uint32_t pitches[3];
                                                        VdpYCbCrFormat dest_format = VDP_YCBCR_FORMAT_YV12;
-                                                       AVPicture picture;
                                                        
-                                                       if ( !this->vdpau->buffer )
-                                                               this->vdpau->buffer = mlt_pool_alloc( codec_context->width * codec_context->height * 3 / 2 );
-                                                       picture.data[0] = planes[0] = this->vdpau->buffer;
-                                                       picture.data[2] = planes[1] = this->vdpau->buffer + codec_context->width * codec_context->height;
-                                                       picture.data[1] = planes[2] = this->vdpau->buffer + codec_context->width * codec_context->height * 5 / 4;
-                                                       picture.linesize[0] = pitches[0] = codec_context->width;
-                                                       picture.linesize[1] = pitches[1] = codec_context->width / 2;
-                                                       picture.linesize[2] = pitches[2] = codec_context->width / 2;
+                                                       if ( !self->vdpau->buffer )
+                                                               self->vdpau->buffer = mlt_pool_alloc( codec_context->width * codec_context->height * 3 / 2 );
+                                                       self->av_frame->data[0] = planes[0] = self->vdpau->buffer;
+                                                       self->av_frame->data[2] = planes[1] = self->vdpau->buffer + codec_context->width * codec_context->height;
+                                                       self->av_frame->data[1] = planes[2] = self->vdpau->buffer + codec_context->width * codec_context->height * 5 / 4;
+                                                       self->av_frame->linesize[0] = pitches[0] = codec_context->width;
+                                                       self->av_frame->linesize[1] = pitches[1] = codec_context->width / 2;
+                                                       self->av_frame->linesize[2] = pitches[2] = codec_context->width / 2;
 
                                                        VdpStatus status = vdp_surface_get_bits( render->surface, dest_format, planes, pitches );
                                                        if ( status == VDP_STATUS_OK )
                                                        {
-                                                               convert_image( (AVFrame*) &picture, *buffer, PIX_FMT_YUV420P, format, *width, *height );
+                                                               convert_image( self->av_frame, *buffer, PIX_FMT_YUV420P,
+                                                                       format, *width, *height, self->colorspace );
                                                        }
                                                        else
                                                        {
                                                                mlt_log_error( MLT_PRODUCER_SERVICE(producer), "VDPAU Error: %s\n", vdp_get_error_string( status ) );
-                                                               this->vdpau->is_decoded = 0;
+                                                               image_size = self->vdpau->is_decoded = 0;
                                                        }
                                                }
                                                else
                                                {
                                                        mlt_log_error( MLT_PRODUCER_SERVICE(producer), "VDPAU error in VdpDecoderRender\n" );
-                                                       got_picture = 0;
+                                                       image_size = got_picture = 0;
                                                }
                                        }
                                        else
 #endif
-                                       convert_image( this->av_frame, *buffer, codec_context->pix_fmt, format, *width, *height );
-                                       this->top_field_first |= this->av_frame->top_field_first;
-                                       this->current_position = int_position;
-                                       this->got_picture = 1;
+                                       convert_image( self->av_frame, *buffer, codec_context->pix_fmt,
+                                               format, *width, *height, self->colorspace );
+                                       self->top_field_first |= self->av_frame->top_field_first;
+                                       self->current_position = int_position;
+                                       self->got_picture = 1;
                                }
                                else
                                {
                                        got_picture = 0;
                                }
                        }
+                       if ( self->seekable || pkt.stream_index != self->audio_index )
+                               av_free_packet( &pkt );
                }
        }
 
-       avformat_unlock();
-
-       if ( this->got_picture && image_size > 0 && this->image_cache )
+       if ( self->got_picture && image_size > 0 && self->image_cache )
        {
                // Copy buffer to image cache   
                uint8_t *image = mlt_pool_alloc( image_size );
                memcpy( image, *buffer, image_size );
-               mlt_cache_put( this->image_cache, (void*) position, image, *format, mlt_pool_release );
+               mlt_cache_put( self->image_cache, (void*) position, image, *format, mlt_pool_release );
+       }
+       // Try to duplicate last image if there was a decoding failure
+       else if ( !image_size && self->av_frame && self->av_frame->linesize[0] )
+       {
+               // Duplicate it
+               if ( ( image_size = allocate_buffer( frame, codec_context, buffer, format, width, height ) ) )
+               {
+                       // Workaround 1088 encodings missing cropping info.
+                       if ( *height == 1088 && mlt_profile_dar( mlt_service_profile( MLT_PRODUCER_SERVICE( producer ) ) ) == 16.0/9.0 )
+                               *height = 1080;
+#ifdef VDPAU
+                       if ( self->vdpau && self->vdpau->buffer )
+                       {
+                               AVPicture picture;
+                               picture.data[0] = self->vdpau->buffer;
+                               picture.data[2] = self->vdpau->buffer + codec_context->width * codec_context->height;
+                               picture.data[1] = self->vdpau->buffer + codec_context->width * codec_context->height * 5 / 4;
+                               picture.linesize[0] = codec_context->width;
+                               picture.linesize[1] = codec_context->width / 2;
+                               picture.linesize[2] = codec_context->width / 2;
+                               convert_image( (AVFrame*) &picture, *buffer,
+                                       PIX_FMT_YUV420P, format, *width, *height, self->colorspace );
+                       }
+                       else
+#endif
+                       convert_image( self->av_frame, *buffer, codec_context->pix_fmt,
+                               format, *width, *height, self->colorspace );
+                       self->got_picture = 1;
+               }
+               else
+                       mlt_frame_get_image( frame, buffer, format, width, height, writable );
        }
 
+       // Regardless of speed, we expect to get the next frame (cos we ain't too bright)
+       self->video_expected = position + 1;
+
 exit_get_image:
+
+       pthread_mutex_unlock( &self->video_mutex );
+
        // Set the progressive flag
        if ( mlt_properties_get( properties, "force_progressive" ) )
                mlt_properties_set_int( frame_properties, "progressive", !!mlt_properties_get_int( properties, "force_progressive" ) );
-       else if ( this->av_frame )
-               mlt_properties_set_int( frame_properties, "progressive", !this->av_frame->interlaced_frame );
+       else if ( self->av_frame )
+               mlt_properties_set_int( frame_properties, "progressive", !self->av_frame->interlaced_frame );
 
        // Set the field order property for this frame
-       mlt_properties_set_int( frame_properties, "top_field_first", this->top_field_first );
+       if ( mlt_properties_get( properties, "force_tff" ) )
+               mlt_properties_set_int( frame_properties, "top_field_first", !!mlt_properties_get_int( properties, "force_tff" ) );
+       else
+               mlt_properties_set_int( frame_properties, "top_field_first", self->top_field_first );
 
-       // Regardless of speed, we expect to get the next frame (cos we ain't too bright)
-       this->video_expected = position + 1;
+       // Set immutable properties of the selected track's (or overridden) source attributes.
+       mlt_service_lock( MLT_PRODUCER_SERVICE( producer ) );
+       mlt_properties_set_int( properties, "meta.media.top_field_first", self->top_field_first );
+       mlt_properties_set_int( properties, "meta.media.progressive", mlt_properties_get_int( frame_properties, "progressive" ) );
+       mlt_service_unlock( MLT_PRODUCER_SERVICE( producer ) );
 
-       return !this->got_picture;
+       return !self->got_picture;
 }
 
 /** Process properties as AVOptions and apply to AV context obj
@@ -1287,13 +1676,13 @@ static void apply_properties( void *obj, mlt_properties properties, int flags )
 /** Initialize the video codec context.
  */
 
-static int video_codec_init( producer_avformat this, int index, mlt_properties properties )
+static int video_codec_init( producer_avformat self, int index, mlt_properties properties )
 {
        // Initialise the codec if necessary
-       if ( !this->video_codec )
+       if ( !self->video_codec )
        {
                // Get the video stream
-               AVStream *stream = this->video_format->streams[ index ];
+               AVStream *stream = self->video_format->streams[ index ];
 
                // Get codec context
                AVCodecContext *codec_context = stream->codec;
@@ -1305,14 +1694,14 @@ static int video_codec_init( producer_avformat this, int index, mlt_properties p
                {
                        if ( ( codec = avcodec_find_decoder_by_name( "h264_vdpau" ) ) )
                        {
-                               if ( vdpau_init( this ) )
+                               if ( vdpau_init( self ) )
                                {
-                                       this->video_codec = codec_context;
-                                       if ( !vdpau_decoder_init( this ) )
+                                       self->video_codec = codec_context;
+                                       if ( !vdpau_decoder_init( self ) )
                                                vdpau_decoder_close();
                                }
                        }
-                       if ( !this->vdpau )
+                       if ( !self->vdpau )
                                codec = avcodec_find_decoder( codec_context->codec_id );
                }
 #endif
@@ -1322,74 +1711,139 @@ static int video_codec_init( producer_avformat this, int index, mlt_properties p
                if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) )
                        thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) );
                if ( thread_count > 1 )
-               {
-                       avcodec_thread_init( codec_context, thread_count );
                        codec_context->thread_count = thread_count;
-               }
 
                // If we don't have a codec and we can't initialise it, we can't do much more...
                avformat_lock( );
                if ( codec && avcodec_open( codec_context, codec ) >= 0 )
                {
                        // Now store the codec with its destructor
-                       this->video_codec = codec_context;
+                       self->video_codec = codec_context;
                }
                else
                {
                        // Remember that we can't use this later
-                       this->video_index = -1;
+                       self->video_index = -1;
+                       avformat_unlock( );
+                       return 0;
                }
                avformat_unlock( );
 
                // Process properties as AVOptions
                apply_properties( codec_context, properties, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
+#if LIBAVCODEC_VERSION_MAJOR > 52
+               if ( codec->priv_class && codec_context->priv_data )
+                       apply_properties( codec_context->priv_data, properties, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
+#endif
 
                // Reset some image properties
-               mlt_properties_set_int( properties, "width", this->video_codec->width );
-               mlt_properties_set_int( properties, "height", this->video_codec->height );
+               mlt_properties_set_int( properties, "width", self->video_codec->width );
+               mlt_properties_set_int( properties, "height", self->video_codec->height );
                // For DV, we'll just use the saved aspect ratio
                if ( codec_context->codec_id != CODEC_ID_DVVIDEO )
-                       mlt_properties_set_double( properties, "aspect_ratio", get_aspect_ratio( stream, this->video_codec, NULL ) );
+                       get_aspect_ratio( properties, stream, self->video_codec, NULL );
 
                // Determine the fps first from the codec
-               double source_fps = (double) this->video_codec->time_base.den /
-                                                                  ( this->video_codec->time_base.num == 0 ? 1 : this->video_codec->time_base.num );
+               double source_fps = (double) self->video_codec->time_base.den /
+                                                                  ( self->video_codec->time_base.num == 0 ? 1 : self->video_codec->time_base.num );
                
                if ( mlt_properties_get( properties, "force_fps" ) )
                {
                        source_fps = mlt_properties_get_double( properties, "force_fps" );
-                       stream->time_base = av_d2q( source_fps, 255 );
+                       stream->time_base = av_d2q( source_fps, 1024 );
+                       mlt_properties_set_int( properties, "meta.media.frame_rate_num", stream->time_base.num );
+                       mlt_properties_set_int( properties, "meta.media.frame_rate_den", stream->time_base.den );
                }
                else
                {
                        // If the muxer reports a frame rate different than the codec
+#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(42<<8)+0)
+                       double muxer_fps = av_q2d( stream->avg_frame_rate );
+                       if ( isnan( muxer_fps ) || muxer_fps == 0 )
+                               muxer_fps = av_q2d( stream->r_frame_rate );
+#else
                        double muxer_fps = av_q2d( stream->r_frame_rate );
+#endif
                        // Choose the lesser - the wrong tends to be off by some multiple of 10
                        source_fps = FFMIN( source_fps, muxer_fps );
+                       if ( source_fps >= 1.0 && ( source_fps < muxer_fps || isnan( muxer_fps ) ) )
+                       {
+                               mlt_properties_set_int( properties, "meta.media.frame_rate_num", self->video_codec->time_base.den );
+                               mlt_properties_set_int( properties, "meta.media.frame_rate_den", self->video_codec->time_base.num == 0 ? 1 : self->video_codec->time_base.num );
+                       }
+                       else if ( muxer_fps > 0 )
+                       {
+                               AVRational frame_rate = stream->r_frame_rate;
+                               // With my samples when r_frame_rate != 1000 but avg_frame_rate is valid,
+                               // avg_frame_rate gives some approximate value that does not well match the media.
+                               // Also, on my sample where r_frame_rate = 1000, using avg_frame_rate directly
+                               // results in some very choppy output, but some value slightly different works
+                               // great.
+#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(42<<8)+0)
+                               if ( av_q2d( stream->r_frame_rate ) >= 1000 && av_q2d( stream->avg_frame_rate ) > 0 )
+                                       frame_rate = av_d2q( av_q2d( stream->avg_frame_rate ), 1024 );
+#endif
+                               mlt_properties_set_int( properties, "meta.media.frame_rate_num", frame_rate.num );
+                               mlt_properties_set_int( properties, "meta.media.frame_rate_den", frame_rate.den );
+                       }
+                       else
+                       {
+                               source_fps = mlt_producer_get_fps( self->parent );
+                               AVRational frame_rate = av_d2q( source_fps, 255 );
+                               mlt_properties_set_int( properties, "meta.media.frame_rate_num", frame_rate.num );
+                               mlt_properties_set_int( properties, "meta.media.frame_rate_den", frame_rate.den );
+                       }
                }
 
-               // We'll use fps if it's available
+               // source_fps is deprecated in favor of meta.media.frame_rate_num and .frame_rate_den
                if ( source_fps > 0 )
                        mlt_properties_set_double( properties, "source_fps", source_fps );
                else
-                       mlt_properties_set_double( properties, "source_fps", mlt_producer_get_fps( this->parent ) );
+                       mlt_properties_set_double( properties, "source_fps", mlt_producer_get_fps( self->parent ) );
+
+               // Set the YUV colorspace from override or detect
+               self->colorspace = mlt_properties_get_int( properties, "force_colorspace" );
+#if LIBAVCODEC_VERSION_INT > ((52<<16)+(28<<8)+0)              
+               if ( ! self->colorspace )
+               {
+                       switch ( self->video_codec->colorspace )
+                       {
+                       case AVCOL_SPC_SMPTE240M:
+                               self->colorspace = 240;
+                               break;
+                       case AVCOL_SPC_BT470BG:
+                       case AVCOL_SPC_SMPTE170M:
+                               self->colorspace = 601;
+                               break;
+                       case AVCOL_SPC_BT709:
+                               self->colorspace = 709;
+                               break;
+                       default:
+                               // This is a heuristic Charles Poynton suggests in "Digital Video and HDTV"
+                               self->colorspace = self->video_codec->width * self->video_codec->height > 750000 ? 709 : 601;
+                               break;
+                       }
+               }
+#endif
+               // Let apps get chosen colorspace
+               mlt_properties_set_int( properties, "meta.media.colorspace", self->colorspace );
        }
-       return this->video_codec && this->video_index > -1;
+       return self->video_codec && self->video_index > -1;
 }
 
 /** Set up video handling.
 */
 
-static void producer_set_up_video( producer_avformat this, mlt_frame frame )
+static void producer_set_up_video( producer_avformat self, mlt_frame frame )
 {
        // Get the producer
-       mlt_producer producer = this->parent;
+       mlt_producer producer = self->parent;
 
        // Get the properties
        mlt_properties properties = MLT_PRODUCER_PROPERTIES( producer );
 
        // Fetch the video format context
-       AVFormatContext *context = this->video_format;
+       AVFormatContext *context = self->video_format;
 
        // Get the video_index
        int index = mlt_properties_get_int( properties, "video_index" );
@@ -1397,23 +1851,9 @@ static void producer_set_up_video( producer_avformat this, mlt_frame frame )
        // Reopen the file if necessary
        if ( !context && index > -1 )
        {
-               mlt_events_block( properties, producer );
-               producer_open( this, mlt_service_profile( MLT_PRODUCER_SERVICE(producer) ),
+               producer_open( self, mlt_service_profile( MLT_PRODUCER_SERVICE(producer) ),
                        mlt_properties_get( properties, "resource" ) );
-               context = this->video_format;
-               if ( this->dummy_context )
-               {
-                       avformat_lock();
-                       av_close_input_file( this->dummy_context );
-                       avformat_unlock();
-               }
-               this->dummy_context = NULL;
-               mlt_events_unblock( properties, producer );
-               if ( this->audio_format )
-                       get_audio_streams_info( this );
-
-               // Process properties as AVOptions
-               apply_properties( context, properties, AV_OPT_FLAG_DECODING_PARAM );
+               context = self->video_format;
        }
 
        // Exception handling for video_index
@@ -1433,24 +1873,24 @@ static void producer_set_up_video( producer_avformat this, mlt_frame frame )
        }
 
        // Update the video properties if the index changed
-       if ( index != this->video_index )
+       if ( index != self->video_index )
        {
                // Reset the video properties if the index changed
-               this->video_index = index;
-               if ( this->video_codec )
+               self->video_index = index;
+               if ( self->video_codec )
                {
                        avformat_lock();
-                       avcodec_close( this->video_codec );
+                       avcodec_close( self->video_codec );
                        avformat_unlock();
                }
-               this->video_codec = NULL;
+               self->video_codec = NULL;
        }
 
        // Get the frame properties
        mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
 
        // Get the codec
-       if ( context && index > -1 && video_codec_init( this, index, properties ) )
+       if ( context && index > -1 && video_codec_init( self, index, properties ) )
        {
                // Set the frame properties
                double force_aspect_ratio = mlt_properties_get_double( properties, "force_aspect_ratio" );
@@ -1458,14 +1898,25 @@ static void producer_set_up_video( producer_avformat this, mlt_frame frame )
                        force_aspect_ratio : mlt_properties_get_double( properties, "aspect_ratio" );
 
                // Set the width and height
-               mlt_properties_set_int( frame_properties, "width", this->video_codec->width );
-               mlt_properties_set_int( frame_properties, "height", this->video_codec->height );
-               mlt_properties_set_int( frame_properties, "real_width", this->video_codec->width );
-               mlt_properties_set_int( frame_properties, "real_height", this->video_codec->height );
+               mlt_properties_set_int( frame_properties, "width", self->video_codec->width );
+               mlt_properties_set_int( frame_properties, "height", self->video_codec->height );
+               // real_width and real_height are deprecated in favor of meta.media.width and .height
+               mlt_properties_set_int( properties, "meta.media.width", self->video_codec->width );
+               mlt_properties_set_int( properties, "meta.media.height", self->video_codec->height );
+               mlt_properties_set_int( frame_properties, "real_width", self->video_codec->width );
+               mlt_properties_set_int( frame_properties, "real_height", self->video_codec->height );
                mlt_properties_set_double( frame_properties, "aspect_ratio", aspect_ratio );
+               mlt_properties_set_int( frame_properties, "colorspace", self->colorspace );
+
+               // Workaround 1088 encodings missing cropping info.
+               if ( self->video_codec->height == 1088 && mlt_profile_dar( mlt_service_profile( MLT_PRODUCER_SERVICE( producer ) ) ) == 16.0/9.0 )
+               {
+                       mlt_properties_set_int( properties, "meta.media.height", 1080 );
+                       mlt_properties_set_int( frame_properties, "real_height", 1080 );
+               }
 
                // Add our image operation
-               mlt_frame_push_service( frame, this );
+               mlt_frame_push_service( frame, self );
                mlt_frame_push_get_image( frame, producer_get_image );
        }
        else
@@ -1475,26 +1926,26 @@ static void producer_set_up_video( producer_avformat this, mlt_frame frame )
        }
 }
 
-static int seek_audio( producer_avformat this, mlt_position position, double timecode, int *ignore )
+static int seek_audio( producer_avformat self, mlt_position position, double timecode, int *ignore )
 {
        int paused = 0;
 
        // Seek if necessary
-       if ( position != this->audio_expected )
+       if ( self->seekable && position != self->audio_expected )
        {
-               if ( position + 1 == this->audio_expected )
+               if ( position + 1 == self->audio_expected )
                {
                        // We're paused - silence required
                        paused = 1;
                }
-               else if ( !this->seekable && position > this->audio_expected && ( position - this->audio_expected ) < 250 )
+               else if ( !self->seekable && position > self->audio_expected && ( position - self->audio_expected ) < 250 )
                {
                        // Fast forward - seeking is inefficient for small distances - just ignore following frames
-                       *ignore = position - this->audio_expected;
+                       *ignore = position - self->audio_expected;
                }
-               else if ( position < this->audio_expected || position - this->audio_expected >= 12 )
+               else if ( position < self->audio_expected || position - self->audio_expected >= 12 )
                {
-                       AVFormatContext *context = this->audio_format;
+                       AVFormatContext *context = self->audio_format;
                        int64_t timestamp = ( int64_t )( timecode * AV_TIME_BASE + 0.5 );
                        if ( context->start_time != AV_NOPTS_VALUE )
                                timestamp += context->start_time;
@@ -1508,42 +1959,52 @@ static int seek_audio( producer_avformat this, mlt_position position, double tim
                        // Clear the usage in the audio buffer
                        int i = MAX_AUDIO_STREAMS + 1;
                        while ( --i )
-                               this->audio_used[i - 1] = 0;
+                               self->audio_used[i - 1] = 0;
                }
        }
        return paused;
 }
 
-static int decode_audio( producer_avformat this, int *ignore, AVPacket *pkt, int channels, int samples, double timecode, double fps )
+static int sample_bytes( AVCodecContext *context )
+{
+#if LIBAVCODEC_VERSION_MAJOR > 52
+       return av_get_bits_per_sample_fmt( context->sample_fmt ) / 8;
+#else
+       return av_get_bits_per_sample_format( context->sample_fmt ) / 8;
+#endif
+}
+
+static int decode_audio( producer_avformat self, int *ignore, AVPacket pkt, int channels, int samples, double timecode, double fps )
 {
        // Fetch the audio_format
-       AVFormatContext *context = this->audio_format;
+       AVFormatContext *context = self->audio_format;
 
        // Get the current stream index
-       int index = pkt->stream_index;
+       int index = pkt.stream_index;
 
        // Get codec context
-       AVCodecContext *codec_context = this->audio_codec[ index ];
+       AVCodecContext *codec_context = self->audio_codec[ index ];
 
        // Obtain the resample context if it exists (not always needed)
-       ReSampleContext *resample = this->audio_resample[ index ];
+       ReSampleContext *resample = self->audio_resample[ index ];
 
        // Obtain the audio buffers
-       int16_t *audio_buffer = this->audio_buffer[ index ];
-       int16_t *decode_buffer = this->decode_buffer[ index ];
+       uint8_t *audio_buffer = self->audio_buffer[ index ];
+       uint8_t *decode_buffer = self->decode_buffer[ index ];
 
-       int audio_used = this->audio_used[ index ];
-       uint8_t *ptr = pkt->data;
-       int len = pkt->size;
+       int audio_used = self->audio_used[ index ];
+       uint8_t *ptr = pkt.data;
+       int len = pkt.size;
        int ret = 0;
 
        while ( ptr && ret >= 0 && len > 0 )
        {
-               int data_size = sizeof( int16_t ) * AVCODEC_MAX_AUDIO_FRAME_SIZE;
+               int sizeof_sample = resample? sizeof( int16_t ) : sample_bytes( codec_context );
+               int data_size = self->audio_buffer_size[ index ];
 
                // Decode the audio
 #if (LIBAVCODEC_VERSION_INT >= ((52<<16)+(26<<8)+0))
-               ret = avcodec_decode_audio3( codec_context, decode_buffer, &data_size, pkt );
+               ret = avcodec_decode_audio3( codec_context, (int16_t*) decode_buffer, &data_size, &pkt );
 #elif (LIBAVCODEC_VERSION_INT >= ((51<<16)+(29<<8)+0))
                ret = avcodec_decode_audio2( codec_context, decode_buffer, &data_size, ptr, len );
 #else
@@ -1551,37 +2012,37 @@ static int decode_audio( producer_avformat this, int *ignore, AVPacket *pkt, int
 #endif
                if ( ret < 0 )
                {
-                       mlt_log_warning( MLT_PRODUCER_SERVICE(this->parent), "audio decoding error\n", ret );
+                       mlt_log_warning( MLT_PRODUCER_SERVICE(self->parent), "audio decoding error %d\n", ret );
                        break;
                }
 
-               len -= ret;
-               ptr += ret;
+               pkt.size = len -= ret;
+               pkt.data = ptr += ret;
 
                // If decoded successfully
                if ( data_size > 0 )
                {
                        // Figure out how many samples will be needed after resampling
-                       int convert_samples = data_size / codec_context->channels / ( av_get_bits_per_sample_format( codec_context->sample_fmt ) / 8 );
-                       int samples_needed = this->resample_factor * convert_samples + 1;
-                       
+                       int convert_samples = data_size / codec_context->channels / sample_bytes( codec_context );
+                       int samples_needed = self->resample_factor * convert_samples;
+
                        // Resize audio buffer to prevent overflow
-                       if ( audio_used * channels + samples_needed > this->audio_buffer_size[ index ] )
+                       if ( ( audio_used + samples_needed ) * channels * sizeof_sample > self->audio_buffer_size[ index ] )
                        {
-                               this->audio_buffer_size[ index ] *= 2;
-                               audio_buffer = this->audio_buffer[ index ] = mlt_pool_realloc( audio_buffer, this->audio_buffer_size[ index ] * sizeof(int16_t) );
+                               self->audio_buffer_size[ index ] = ( audio_used + samples_needed * 2 ) * channels * sizeof_sample;
+                               audio_buffer = self->audio_buffer[ index ] = mlt_pool_realloc( audio_buffer, self->audio_buffer_size[ index ] );
                        }
                        if ( resample )
                        {
                                // Copy to audio buffer while resampling
-                               int16_t *source = decode_buffer;
-                               int16_t *dest = &audio_buffer[ audio_used * channels ];
-                               audio_used += audio_resample( resample, dest, source, convert_samples );
+                               uint8_t *source = decode_buffer;
+                               uint8_t *dest = &audio_buffer[ audio_used * channels * sizeof_sample ];
+                               audio_used += audio_resample( resample, (short*) dest, (short*) source, convert_samples );
                        }
                        else
                        {
                                // Straight copy to audio buffer
-                               memcpy( &audio_buffer[ audio_used * codec_context->channels ], decode_buffer, data_size );
+                               memcpy( &audio_buffer[ audio_used * codec_context->channels * sizeof_sample ], decode_buffer, data_size );
                                audio_used += convert_samples;
                        }
 
@@ -1590,121 +2051,135 @@ static int decode_audio( producer_avformat this, int *ignore, AVPacket *pkt, int
                        {
                                *ignore -= 1;
                                audio_used -= samples;
-                               memmove( audio_buffer, &audio_buffer[ samples * (resample? channels : codec_context->channels) ],
-                                        audio_used * sizeof( int16_t ) );
+                               memmove( audio_buffer, &audio_buffer[ samples * (resample? channels : codec_context->channels) * sizeof_sample ],
+                                                audio_used * sizeof_sample );
                        }
                }
        }
 
        // If we're behind, ignore this packet
-       if ( pkt->pts >= 0 )
+       if ( pkt.pts >= 0 )
        {
-               double current_pts = av_q2d( context->streams[ index ]->time_base ) * pkt->pts;
+               double current_pts = av_q2d( context->streams[ index ]->time_base ) * pkt.pts;
                int req_position = ( int )( timecode * fps + 0.5 );
                int int_position = ( int )( current_pts * fps + 0.5 );
                if ( context->start_time != AV_NOPTS_VALUE )
                        int_position -= ( int )( fps * context->start_time / AV_TIME_BASE + 0.5 );
 
-               if ( this->seekable && *ignore == 0 )
+               if ( self->seekable && *ignore == 0 )
                {
                        if ( int_position < req_position )
                                // We are behind, so skip some
                                *ignore = 1;
-                       else if ( int_position > req_position + 2 )
+
+                       // We use nb_streams in this test because the tolerance is dependent
+                       // on the interleaving of all streams esp. when there is more than
+                       // one audio stream.
+                       else if ( int_position > req_position + context->nb_streams )
                                // We are ahead, so seek backwards some more
-                               seek_audio( this, req_position, timecode - 1.0, ignore );
+                               seek_audio( self, req_position, timecode - 1.0, ignore );
                }
        }
 
-       this->audio_used[ index ] = audio_used;
+       self->audio_used[ index ] = audio_used;
 
        return ret;
 }
 
 /** Get the audio from a frame.
 */
-
 static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples )
 {
        // Get the producer
-       producer_avformat this = mlt_frame_pop_audio( frame );
+       producer_avformat self = mlt_frame_pop_audio( frame );
 
+       pthread_mutex_lock( &self->audio_mutex );
+       
        // Obtain the frame number of this frame
        mlt_position position = mlt_properties_get_position( MLT_FRAME_PROPERTIES( frame ), "avformat_position" );
 
        // Calculate the real time code
-       double real_timecode = producer_time_of_frame( this->parent, position );
+       double real_timecode = producer_time_of_frame( self->parent, position );
 
        // Get the producer fps
-       double fps = mlt_producer_get_fps( this->parent );
+       double fps = mlt_producer_get_fps( self->parent );
 
        // Number of frames to ignore (for ffwd)
-       int ignore = 0;
+       int ignore[ MAX_AUDIO_STREAMS ] = { 0 };
 
        // Flag for paused (silence)
-       int paused = seek_audio( this, position, real_timecode, &ignore );
+       int paused = seek_audio( self, position, real_timecode, &ignore[0] );
+
+       // Initialize ignore for all streams from the seek return value
+       int i = MAX_AUDIO_STREAMS;
+       while ( i-- )
+               ignore[i] = ignore[0];
 
        // Fetch the audio_format
-       AVFormatContext *context = this->audio_format;
+       AVFormatContext *context = self->audio_format;
+
+       int sizeof_sample = sizeof( int16_t );
        
        // Determine the tracks to use
-       int index = this->audio_index;
-       int index_max = this->audio_index + 1;
-       if ( this->audio_index == INT_MAX )
+       int index = self->audio_index;
+       int index_max = self->audio_index + 1;
+       if ( self->audio_index == INT_MAX )
        {
                index = 0;
                index_max = context->nb_streams;
-               *channels = this->total_channels;
-               *frequency = this->max_frequency;
+               *channels = self->total_channels;
+               *samples = *samples * FFMAX( self->max_frequency, *frequency ) / *frequency;
+               *frequency = FFMAX( self->max_frequency, *frequency );
        }
 
        // Initialize the resamplers and buffers
        for ( ; index < index_max; index++ )
        {
                // Get codec context
-               AVCodecContext *codec_context = this->audio_codec[ index ];
+               AVCodecContext *codec_context = self->audio_codec[ index ];
 
-               if ( codec_context && !this->audio_buffer[ index ] )
+               if ( codec_context && !self->audio_buffer[ index ] )
                {
                        // Check for resample and create if necessary
                        if ( codec_context->channels <= 2 )
                        {
                                // Determine by how much resampling will increase number of samples
-                               double resample_factor = this->audio_index == INT_MAX ? 1 : (double) *channels / codec_context->channels;
+                               double resample_factor = self->audio_index == INT_MAX ? 1 : (double) *channels / codec_context->channels;
                                resample_factor *= (double) *frequency / codec_context->sample_rate;
-                               if ( resample_factor > this->resample_factor )
-                                       this->resample_factor = resample_factor;
+                               if ( resample_factor > self->resample_factor )
+                                       self->resample_factor = resample_factor;
                                
                                // Create the resampler
 #if (LIBAVCODEC_VERSION_INT >= ((52<<16)+(15<<8)+0))
-                               this->audio_resample[ index ] = av_audio_resample_init(
-                                       this->audio_index == INT_MAX ? codec_context->channels : *channels,
+                               self->audio_resample[ index ] = av_audio_resample_init(
+                                       self->audio_index == INT_MAX ? codec_context->channels : *channels,
                                        codec_context->channels, *frequency, codec_context->sample_rate,
                                        SAMPLE_FMT_S16, codec_context->sample_fmt, 16, 10, 0, 0.8 );
 #else
-                               this->audio_resample[ index ] = audio_resample_init(
-                                       this->audio_index == INT_MAX ? codec_context->channels : *channels,
+                               self->audio_resample[ index ] = audio_resample_init(
+                                       self->audio_index == INT_MAX ? codec_context->channels : *channels,
                                        codec_context->channels, *frequency, codec_context->sample_rate );
 #endif
                        }
                        else
                        {
-                               codec_context->request_channels = this->audio_index == INT_MAX ? codec_context->channels : *channels;
+                               codec_context->request_channels = self->audio_index == INT_MAX ? codec_context->channels : *channels;
+                               sizeof_sample = sample_bytes( codec_context );
                        }
 
                        // Check for audio buffer and create if necessary
-                       this->audio_buffer_size[ index ] = AVCODEC_MAX_AUDIO_FRAME_SIZE;
-                       this->audio_buffer[ index ] = mlt_pool_alloc( this->audio_buffer_size[ index ] * sizeof( int16_t ) );
+                       self->audio_buffer_size[ index ] = AVCODEC_MAX_AUDIO_FRAME_SIZE * sizeof_sample;
+                       self->audio_buffer[ index ] = mlt_pool_alloc( self->audio_buffer_size[ index ] );
 
                        // Check for decoder buffer and create if necessary
-                       this->decode_buffer[ index ] = av_malloc( AVCODEC_MAX_AUDIO_FRAME_SIZE * sizeof( int16_t ) );
+                       self->decode_buffer[ index ] = av_malloc( self->audio_buffer_size[ index ] );
                }
        }
 
        // Get the audio if required
        if ( !paused )
        {
-               int ret = 0;
+               int ret = 0;
                int got_audio = 0;
                AVPacket pkt;
 
@@ -1713,96 +2188,147 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
                // If not resampling, give consumer more than requested.
                // It requested number samples based on requested frame rate.
                // Do not clean this up with a samples *= ...!
-               if ( this->audio_index != INT_MAX && ! this->audio_resample[ this->audio_index ] )
-                       *samples = *samples * this->audio_codec[ this->audio_index ]->sample_rate / *frequency;
+               if ( self->audio_index != INT_MAX && ! self->audio_resample[ self->audio_index ] )
+                       *samples = *samples * self->audio_codec[ self->audio_index ]->sample_rate / *frequency;
 
                while ( ret >= 0 && !got_audio )
                {
                        // Check if the buffer already contains the samples required
-                       if ( this->audio_index != INT_MAX && this->audio_used[ this->audio_index ] >= *samples && ignore == 0 )
+                       if ( self->audio_index != INT_MAX &&
+                                self->audio_used[ self->audio_index ] >= *samples &&
+                                ignore[ self->audio_index ] == 0 )
                        {
                                got_audio = 1;
                                break;
                        }
+                       else if ( self->audio_index == INT_MAX )
+                       {
+                               // Check if there is enough audio for all streams
+                               got_audio = 1;
+                               for ( index = 0; got_audio && index < context->nb_streams; index++ )
+                                       if ( ( self->audio_codec[ index ] && self->audio_used[ index ] < *samples ) || ignore[ index ] )
+                                               got_audio = 0;
+                               if ( got_audio )
+                                       break;
+                       }
 
                        // Read a packet
-                       ret = av_read_frame( context, &pkt );
+                       pthread_mutex_lock( &self->packets_mutex );
+                       if ( mlt_deque_count( self->apackets ) )
+                       {
+                               AVPacket *tmp = (AVPacket*) mlt_deque_pop_front( self->apackets );
+                               pkt = *tmp;
+                               free( tmp );
+                       }
+                       else
+                       {
+                               ret = av_read_frame( context, &pkt );
+                               if ( ret >= 0 && !self->seekable && pkt.stream_index == self->video_index )
+                               {
+                                       if ( !av_dup_packet( &pkt ) )
+                                       {
+                                               AVPacket *tmp = malloc( sizeof(AVPacket) );
+                                               *tmp = pkt;
+                                               mlt_deque_push_back( self->vpackets, tmp );
+                                       }
+                               }
+                       }
+                       pthread_mutex_unlock( &self->packets_mutex );
 
                        // We only deal with audio from the selected audio index
-                       if ( ret >= 0 && pkt.data && pkt.size > 0 && ( pkt.stream_index == this->audio_index ||
-                                ( this->audio_index == INT_MAX && context->streams[ pkt.stream_index ]->codec->codec_type == CODEC_TYPE_AUDIO ) ) )
+                       index = pkt.stream_index;
+                       if ( ret >= 0 && pkt.data && pkt.size > 0 && ( index == self->audio_index ||
+                                ( self->audio_index == INT_MAX && context->streams[ index ]->codec->codec_type == CODEC_TYPE_AUDIO ) ) )
                        {
-                               int channels2 = this->audio_index == INT_MAX ? this->audio_codec[pkt.stream_index]->channels : *channels;
-                               ret = decode_audio( this, &ignore, &pkt, channels2, *samples, real_timecode, fps );
+                               int channels2 = ( self->audio_index == INT_MAX || !self->audio_resample[index] ) ?
+                                       self->audio_codec[index]->channels : *channels;
+                               ret = decode_audio( self, &ignore[index], pkt, channels2, *samples, real_timecode, fps );
                        }
-                       av_free_packet( &pkt );
 
-                       if ( this->audio_index == INT_MAX && ret >= 0 )
-                       {
-                               // Determine if there is enough audio for all streams
-                               got_audio = 1;
-                               for ( index = 0; index < context->nb_streams; index++ )
+                       if ( self->seekable || index != self->video_index )
+                               av_free_packet( &pkt );
+
+               }
+
+               // Set some additional return values
+               *format = mlt_audio_s16;
+               if ( self->audio_index != INT_MAX && !self->audio_resample[ self->audio_index ] )
+               {
+                       index = self->audio_index;
+                       *channels = self->audio_codec[ index ]->channels;
+                       *frequency = self->audio_codec[ index ]->sample_rate;
+                       *format = self->audio_codec[ index ]->sample_fmt == SAMPLE_FMT_S32 ? mlt_audio_s32le
+                               : self->audio_codec[ index ]->sample_fmt == SAMPLE_FMT_FLT ? mlt_audio_f32le
+                               : mlt_audio_s16;
+                       sizeof_sample = sample_bytes( self->audio_codec[ index ] );
+               }
+               else if ( self->audio_index == INT_MAX )
+               {
+                       // This only works if all audio tracks have the same sample format.
+                       for ( index = 0; index < index_max; index++ )
+                               if ( self->audio_codec[ index ] && !self->audio_resample[ index ] )
                                {
-                                       if ( this->audio_codec[ index ] && this->audio_used[ index ] < *samples )
-                                               got_audio = 0;
+                                       *format = self->audio_codec[ index ]->sample_fmt == SAMPLE_FMT_S32 ? mlt_audio_s32le
+                                               : self->audio_codec[ index ]->sample_fmt == SAMPLE_FMT_FLT ? mlt_audio_f32le
+                                               : mlt_audio_s16;
+                                       sizeof_sample = sample_bytes( self->audio_codec[ index ] );
+                                       break;
                                }
-                       }
                }
-               
+
                // Allocate and set the frame's audio buffer
-               int size = *samples * *channels * sizeof(int16_t);
+               int size = mlt_audio_format_size( *format, *samples, *channels );
                *buffer = mlt_pool_alloc( size );
-               *format = mlt_audio_s16;
                mlt_frame_set_audio( frame, *buffer, *format, size, mlt_pool_release );
 
                // Interleave tracks if audio_index=all
-               if ( this->audio_index == INT_MAX )
+               if ( self->audio_index == INT_MAX )
                {
-                       int16_t *dest = *buffer;
+                       uint8_t *dest = *buffer;
                        int i;
                        for ( i = 0; i < *samples; i++ )
                        {
                                for ( index = 0; index < index_max; index++ )
-                               if ( this->audio_codec[ index ] )
+                               if ( self->audio_codec[ index ] )
                                {
-                                       int current_channels = this->audio_codec[ index ]->channels;
-                                       int16_t *src = this->audio_buffer[ index ] + i * current_channels;
-                                       memcpy( dest, src, current_channels * sizeof(int16_t) );
-                                       dest += current_channels;
+                                       int current_channels = self->audio_codec[ index ]->channels;
+                                       uint8_t *src = self->audio_buffer[ index ] + i * current_channels * sizeof_sample;
+                                       memcpy( dest, src, current_channels * sizeof_sample );
+                                       dest += current_channels * sizeof_sample;
                                }
                        }
                        for ( index = 0; index < index_max; index++ )
-                       if ( this->audio_codec[ index ] && this->audio_used[ index ] >= *samples )
+                       if ( self->audio_codec[ index ] && self->audio_used[ index ] >= *samples )
                        {
-                               int current_channels = this->audio_codec[ index ]->channels;
-                               int16_t *src = this->audio_buffer[ index ] + *samples * current_channels;
-                               this->audio_used[index] -= *samples;
-                               memmove( this->audio_buffer[ index ], src, this->audio_used[ index ] * current_channels * sizeof(int16_t) );
+                               int current_channels = self->audio_codec[ index ]->channels;
+                               uint8_t *src = self->audio_buffer[ index ] + *samples * current_channels * sizeof_sample;
+                               self->audio_used[index] -= *samples;
+                               memmove( self->audio_buffer[ index ], src, self->audio_used[ index ] * current_channels * sizeof_sample );
                        }
                }
                // Copy a single track to the output buffer
                else
                {
-                       index = this->audio_index;
+                       index = self->audio_index;
 
                        // Now handle the audio if we have enough
-                       if ( this->audio_used[ index ] >= *samples )
+                       if ( self->audio_used[ index ] > 0 )
                        {
-                               int16_t *src = this->audio_buffer[ index ];
-                               memcpy( *buffer, src, *samples * *channels * sizeof(int16_t) );
-                               this->audio_used[ index ] -= *samples;
-                               memmove( src, &src[ *samples * *channels ], this->audio_used[ index ] * *channels * sizeof(int16_t) );
+                               uint8_t *src = self->audio_buffer[ index ];
+                               // copy samples from audio_buffer
+                               size = self->audio_used[ index ] < *samples ? self->audio_used[ index ] : *samples;
+                               memcpy( *buffer, src, size * *channels * sizeof_sample );
+                               // supply the remaining requested samples as silence
+                               if ( *samples > self->audio_used[ index ] )
+                                       memset( *buffer + size * *channels * sizeof_sample, 0, ( *samples - self->audio_used[ index ] ) * *channels * sizeof_sample );
+                               // reposition the samples within audio_buffer
+                               self->audio_used[ index ] -= size;
+                               memmove( src, src + size * *channels * sizeof_sample, self->audio_used[ index ] * *channels * sizeof_sample );
                        }
                        else
                        {
                                // Otherwise fill with silence
-                               memset( *buffer, 0, *samples * *channels * sizeof(int16_t) );
-                       }
-                       if ( !this->audio_resample[ index ] )
-                       {
-                               // TODO: uncomment and remove following line when full multi-channel support is ready
-                               // *channels = codec_context->channels;
-                               *frequency = this->audio_codec[ index ]->sample_rate;
+                               memset( *buffer, 0, *samples * *channels * sizeof_sample );
                        }
                }
        }
@@ -1814,7 +2340,9 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
        
        // Regardless of speed (other than paused), we expect to get the next frame
        if ( !paused )
-               this->audio_expected = position + 1;
+               self->audio_expected = position + 1;
+
+       pthread_mutex_unlock( &self->audio_mutex );
 
        return 0;
 }
@@ -1822,13 +2350,13 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
 /** Initialize the audio codec context.
 */
 
-static int audio_codec_init( producer_avformat this, int index, mlt_properties properties )
+static int audio_codec_init( producer_avformat self, int index, mlt_properties properties )
 {
        // Initialise the codec if necessary
-       if ( !this->audio_codec[ index ] )
+       if ( !self->audio_codec[ index ] )
        {
                // Get codec context
-               AVCodecContext *codec_context = this->audio_format->streams[index]->codec;
+               AVCodecContext *codec_context = self->audio_format->streams[index]->codec;
 
                // Find the codec
                AVCodec *codec = avcodec_find_decoder( codec_context->codec_id );
@@ -1838,36 +2366,40 @@ static int audio_codec_init( producer_avformat this, int index, mlt_properties p
                if ( codec && avcodec_open( codec_context, codec ) >= 0 )
                {
                        // Now store the codec with its destructor
-                       if ( this->audio_codec[ index ] )
-                               avcodec_close( this->audio_codec[ index ] );
-                       this->audio_codec[ index ] = codec_context;
+                       if ( self->audio_codec[ index ] )
+                               avcodec_close( self->audio_codec[ index ] );
+                       self->audio_codec[ index ] = codec_context;
                }
                else
                {
-                       // Remember that we can't use this later
-                       this->audio_index = -1;
+                       // Remember that we can't use self later
+                       self->audio_index = -1;
                }
                avformat_unlock( );
 
                // Process properties as AVOptions
                apply_properties( codec_context, properties, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
+#if LIBAVCODEC_VERSION_MAJOR > 52
+               if ( codec && codec->priv_class && codec_context->priv_data )
+                       apply_properties( codec_context->priv_data, properties, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
+#endif
        }
-       return this->audio_codec[ index ] && this->audio_index > -1;
+       return self->audio_codec[ index ] && self->audio_index > -1;
 }
 
 /** Set up audio handling.
 */
 
-static void producer_set_up_audio( producer_avformat this, mlt_frame frame )
+static void producer_set_up_audio( producer_avformat self, mlt_frame frame )
 {
        // Get the producer
-       mlt_producer producer = this->parent;
+       mlt_producer producer = self->parent;
 
        // Get the properties
        mlt_properties properties = MLT_PRODUCER_PROPERTIES( producer );
 
        // Fetch the audio format context
-       AVFormatContext *context = this->audio_format;
+       AVFormatContext *context = self->audio_format;
 
        mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
 
@@ -1875,27 +2407,17 @@ static void producer_set_up_audio( producer_avformat this, mlt_frame frame )
        int index = mlt_properties_get_int( properties, "audio_index" );
 
        // Handle all audio tracks
-       if ( this->audio_index > -1 &&
+       if ( self->audio_index > -1 &&
             mlt_properties_get( properties, "audio_index" ) &&
             !strcmp( mlt_properties_get( properties, "audio_index" ), "all" ) )
                index = INT_MAX;
 
        // Reopen the file if necessary
-       if ( !context && this->audio_index > -1 && index > -1 )
+       if ( !context && self->audio_index > -1 && index > -1 )
        {
-               mlt_events_block( properties, producer );
-               producer_open( this, mlt_service_profile( MLT_PRODUCER_SERVICE(producer) ),
+               producer_open( self, mlt_service_profile( MLT_PRODUCER_SERVICE(producer) ),
                        mlt_properties_get( properties, "resource" ) );
-               context = this->audio_format;
-               if ( this->dummy_context )
-               {
-                       avformat_lock();
-                       av_close_input_file( this->dummy_context );
-                       avformat_unlock();
-               }
-               this->dummy_context = NULL;
-               mlt_events_unblock( properties, producer );
-               get_audio_streams_info( this );
+               context = self->audio_format;
        }
 
        // Exception handling for audio_index
@@ -1909,50 +2431,50 @@ static void producer_set_up_audio( producer_avformat this, mlt_frame frame )
        if ( context && index > -1 && index < INT_MAX &&
                 context->streams[ index ]->codec->codec_type != CODEC_TYPE_AUDIO )
        {
-               index = this->audio_index;
+               index = self->audio_index;
                mlt_properties_set_int( properties, "audio_index", index );
        }
 
        // Update the audio properties if the index changed
-       if ( context && index > -1 && index != this->audio_index )
+       if ( context && index > -1 && index != self->audio_index )
        {
-               if ( this->audio_codec[ this->audio_index ] )
+               if ( self->audio_codec[ self->audio_index ] )
                {
                        avformat_lock();
-                       avcodec_close( this->audio_codec[ this->audio_index ] );
+                       avcodec_close( self->audio_codec[ self->audio_index ] );
                        avformat_unlock();
                }
-               this->audio_codec[ this->audio_index ] = NULL;
+               self->audio_codec[ self->audio_index ] = NULL;
        }
-       if ( this->audio_index != -1 )
-               this->audio_index = index;
+       if ( self->audio_index != -1 )
+               self->audio_index = index;
        else
                index = -1;
 
        // Get the codec(s)
        if ( context && index == INT_MAX )
        {
-               mlt_properties_set_int( frame_properties, "frequency", this->max_frequency );
-               mlt_properties_set_int( frame_properties, "channels", this->total_channels );
+               mlt_properties_set_int( frame_properties, "audio_frequency", self->max_frequency );
+               mlt_properties_set_int( frame_properties, "audio_channels", self->total_channels );
                for ( index = 0; index < context->nb_streams; index++ )
                {
                        if ( context->streams[ index ]->codec->codec_type == CODEC_TYPE_AUDIO )
-                               audio_codec_init( this, index, properties );
+                               audio_codec_init( self, index, properties );
                }
        }
-       else if ( context && index > -1 && audio_codec_init( this, index, properties ) )
+       else if ( context && index > -1 && audio_codec_init( self, index, properties ) )
        {
                // Set the frame properties
                if ( index < INT_MAX )
                {
-                       mlt_properties_set_int( frame_properties, "frequency", this->audio_codec[ index ]->sample_rate );
-                       mlt_properties_set_int( frame_properties, "channels", this->audio_codec[ index ]->channels );
+                       mlt_properties_set_int( frame_properties, "frequency", self->audio_codec[ index ]->sample_rate );
+                       mlt_properties_set_int( frame_properties, "channels", self->audio_codec[ index ]->channels );
                }
        }
        if ( context && index > -1 )
        {
                // Add our audio operation
-               mlt_frame_push_audio( frame, this );
+               mlt_frame_push_audio( frame, self );
                mlt_frame_push_audio( frame, producer_get_audio );
        }
 }
@@ -1965,15 +2487,15 @@ static int producer_get_frame( mlt_producer producer, mlt_frame_ptr frame, int i
        // Access the private data
        mlt_service service = MLT_PRODUCER_SERVICE( producer );
        mlt_cache_item cache_item = mlt_service_cache_get( service, "producer_avformat" );
-       producer_avformat this = mlt_cache_item_data( cache_item, NULL );
+       producer_avformat self = mlt_cache_item_data( cache_item, NULL );
 
        // If cache miss
-       if ( !this )
+       if ( !self )
        {
-               this = calloc( 1, sizeof( struct producer_avformat_s ) );
-               producer->child = this;
-               this->parent = producer;
-               mlt_service_cache_put( service, "producer_avformat", this, 0, (mlt_destructor) producer_avformat_close );
+               self = calloc( 1, sizeof( struct producer_avformat_s ) );
+               producer->child = self;
+               self->parent = producer;
+               mlt_service_cache_put( service, "producer_avformat", self, 0, (mlt_destructor) producer_avformat_close );
                cache_item = mlt_service_cache_get( service, "producer_avformat" );
        }
 
@@ -1997,10 +2519,10 @@ static int producer_get_frame( mlt_producer producer, mlt_frame_ptr frame, int i
        mlt_properties_set_position( MLT_FRAME_PROPERTIES( *frame ), "avformat_position", mlt_producer_frame( producer ) );
        
        // Set up the video
-       producer_set_up_video( this, *frame );
+       producer_set_up_video( self, *frame );
 
        // Set up the audio
-       producer_set_up_audio( this, *frame );
+       producer_set_up_audio( self, *frame );
 
        // Calculate the next timecode
        mlt_producer_prepare_next( producer );
@@ -2008,41 +2530,65 @@ static int producer_get_frame( mlt_producer producer, mlt_frame_ptr frame, int i
        return 0;
 }
 
-static void producer_avformat_close( producer_avformat this )
+static void producer_avformat_close( producer_avformat self )
 {
        mlt_log_debug( NULL, "producer_avformat_close\n" );
-       // Close the file
-       av_free( this->av_frame );
+
+       // Cleanup av contexts
+       av_free( self->av_frame );
        avformat_lock();
        int i;
        for ( i = 0; i < MAX_AUDIO_STREAMS; i++ )
        {
-               if ( this->audio_resample[i] )
-                       audio_resample_close( this->audio_resample[i] );
-               mlt_pool_release( this->audio_buffer[i] );
-               av_free( this->decode_buffer[i] );
-               if ( this->audio_codec[i] )
-                       avcodec_close( this->audio_codec[i] );
-       }
-       if ( this->video_codec )
-               avcodec_close( this->video_codec );
-       if ( this->dummy_context )
-               av_close_input_file( this->dummy_context );
-       if ( this->audio_format )
-               av_close_input_file( this->audio_format );
-       if ( this->video_format )
-               av_close_input_file( this->video_format );
+               if ( self->audio_resample[i] )
+                       audio_resample_close( self->audio_resample[i] );
+               mlt_pool_release( self->audio_buffer[i] );
+               av_free( self->decode_buffer[i] );
+               if ( self->audio_codec[i] )
+                       avcodec_close( self->audio_codec[i] );
+       }
+       if ( self->video_codec )
+               avcodec_close( self->video_codec );
+       // Close the file
+       if ( self->dummy_context )
+               av_close_input_file( self->dummy_context );
+       if ( self->seekable && self->audio_format )
+               av_close_input_file( self->audio_format );
+       if ( self->video_format )
+               av_close_input_file( self->video_format );
        avformat_unlock();
 #ifdef VDPAU
-       vdpau_producer_close( this );
+       vdpau_producer_close( self );
 #endif
-       if ( this->image_cache )
-               mlt_cache_close( this->image_cache );
-       free( this );
+       if ( self->image_cache )
+               mlt_cache_close( self->image_cache );
+
+       // Cleanup the mutexes
+       pthread_mutex_destroy( &self->audio_mutex );
+       pthread_mutex_destroy( &self->video_mutex );
+       pthread_mutex_destroy( &self->packets_mutex );
+
+       // Cleanup the packet queues
+       AVPacket *pkt;
+       while ( ( pkt = mlt_deque_pop_back( self->apackets ) ) )
+       {
+               av_free_packet( pkt );
+               free( pkt );
+       }
+       while ( ( pkt = mlt_deque_pop_back( self->vpackets ) ) )
+       {
+               av_free_packet( pkt );
+               free( pkt );
+       }
+
+       free( self );
 }
 
 static void producer_close( mlt_producer parent )
 {
+       // Remove this instance from the cache
+       mlt_service_cache_purge( MLT_PRODUCER_SERVICE(parent) );
+
        // Close the parent
        parent->close = NULL;
        mlt_producer_close( parent );