]> git.sesse.net Git - mlt/blobdiff - src/modules/avformat/producer_avformat.c
Make the Movit converter use the correct color primaries.
[mlt] / src / modules / avformat / producer_avformat.c
index 62d4dbe961cd89592869e30a3acf398eab3a2894..f2530308e8cfc553bb3e2dd2766691a96203d553 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * producer_avformat.c -- avformat producer
- * Copyright (C) 2003-2009 Ushodaya Enterprises Limited
+ * Copyright (C) 2003-2012 Ushodaya Enterprises Limited
  * Author: Charles Yates <charles.yates@pandora.be>
  * Author: Dan Dennedy <dan@dennedy.org>
  * Much code borrowed from ffmpeg.c: Copyright (c) 2000-2003 Fabrice Bellard
 
 // ffmpeg Header files
 #include <libavformat/avformat.h>
-#ifdef SWSCALE
-#  include <libswscale/swscale.h>
-#endif
-#if LIBAVCODEC_VERSION_MAJOR > 52
+#include <libswscale/swscale.h>
 #include <libavutil/samplefmt.h>
-#elif (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
-const char *avcodec_get_sample_fmt_name(int sample_fmt);
-#endif
+#include <libavutil/pixdesc.h>
+
 #ifdef VDPAU
 #  include <libavcodec/vdpau.h>
 #endif
-#if (LIBAVUTIL_VERSION_INT > ((50<<16)+(7<<8)+0))
-#  include <libavutil/pixdesc.h>
+#if (LIBAVUTIL_VERSION_INT >= ((51<<16)+(8<<8)+0))
+#  include <libavutil/dict.h>
 #endif
 
 // System header files
@@ -52,12 +48,7 @@ const char *avcodec_get_sample_fmt_name(int sample_fmt);
 #include <pthread.h>
 #include <limits.h>
 
-#if LIBAVUTIL_VERSION_INT < (50<<16)
-#define PIX_FMT_RGB32 PIX_FMT_RGBA32
-#define PIX_FMT_YUYV422 PIX_FMT_YUV422
-#endif
-
-#if LIBAVCODEC_VERSION_MAJOR > 52
+#if LIBAVCODEC_VERSION_MAJOR >= 53
 #include <libavutil/opt.h>
 #define CODEC_TYPE_VIDEO      AVMEDIA_TYPE_VIDEO
 #define CODEC_TYPE_AUDIO      AVMEDIA_TYPE_AUDIO
@@ -66,14 +57,17 @@ const char *avcodec_get_sample_fmt_name(int sample_fmt);
 #include <libavcodec/opt.h>
 #endif
 
+#if LIBAVCODEC_VERSION_MAJOR < 55
+#define AV_CODEC_ID_DVVIDEO CODEC_ID_DVVIDEO
+#define AV_CODEC_ID_H264    CODEC_ID_H264
+#endif
+
 #define POSITION_INITIAL (-2)
 #define POSITION_INVALID (-1)
 
-#define MAX_AUDIO_STREAMS (10)
+#define MAX_AUDIO_STREAMS (32)
 #define MAX_VDPAU_SURFACES (10)
-
-void avformat_lock( );
-void avformat_unlock( );
+#define MAX_AUDIO_FRAME_SIZE (192000) // 1 second of 48khz 32bit audio
 
 struct producer_avformat_s
 {
@@ -83,18 +77,18 @@ struct producer_avformat_s
        AVFormatContext *video_format;
        AVCodecContext *audio_codec[ MAX_AUDIO_STREAMS ];
        AVCodecContext *video_codec;
-       AVFrame *av_frame;
-       ReSampleContext *audio_resample[ MAX_AUDIO_STREAMS ];
+       AVFrame *video_frame;
+       AVFrame *audio_frame;
+       AVPacket pkt;
        mlt_position audio_expected;
        mlt_position video_expected;
        int audio_index;
        int video_index;
-       double start_time;
-       int first_pts;
-       int last_position;
+       int64_t first_pts;
+       int64_t last_position;
        int seekable;
-       int current_position;
-       int got_picture;
+       int64_t current_position;
+       mlt_position nonseek_position;
        int top_field_first;
        uint8_t *audio_buffer[ MAX_AUDIO_STREAMS ];
        size_t audio_buffer_size[ MAX_AUDIO_STREAMS ];
@@ -106,11 +100,18 @@ struct producer_avformat_s
        int max_channel;
        int max_frequency;
        unsigned int invalid_pts_counter;
-       double resample_factor;
+       unsigned int invalid_dts_counter;
        mlt_cache image_cache;
-       int colorspace;
+       int yuv_colorspace, color_primaries;
+       int full_luma;
        pthread_mutex_t video_mutex;
        pthread_mutex_t audio_mutex;
+       mlt_deque apackets;
+       mlt_deque vpackets;
+       pthread_mutex_t packets_mutex;
+       pthread_mutex_t open_mutex;
+       int is_mutex_init;
+       AVRational video_time_base;
 #ifdef VDPAU
        struct
        {
@@ -123,6 +124,9 @@ struct producer_avformat_s
                int ip_age[2];
                int is_decoded;
                uint8_t *buffer;
+
+               VdpDevice device;
+               VdpDecoder decoder;
        } *vdpau;
 #endif
 };
@@ -130,7 +134,7 @@ typedef struct producer_avformat_s *producer_avformat;
 
 // Forward references.
 static int list_components( char* file );
-static int producer_open( producer_avformat self, mlt_profile profile, char *file );
+static int producer_open( producer_avformat self, mlt_profile profile, const char *URL, int take_lock );
 static int producer_get_frame( mlt_producer producer, mlt_frame_ptr frame, int index );
 static void producer_avformat_close( producer_avformat );
 static void producer_close( mlt_producer parent );
@@ -138,6 +142,8 @@ static void producer_set_up_video( producer_avformat self, mlt_frame frame );
 static void producer_set_up_audio( producer_avformat self, mlt_frame frame );
 static void apply_properties( void *obj, mlt_properties properties, int flags );
 static int video_codec_init( producer_avformat self, int index, mlt_properties properties );
+static void get_audio_streams_info( producer_avformat self );
+static mlt_audio_format pick_audio_format( int sample_fmt );
 
 #ifdef VDPAU
 #include "vdpau.c"
@@ -176,38 +182,40 @@ mlt_producer producer_avformat_init( mlt_profile profile, const char *service, c
 
                        // Register our get_frame implementation
                        producer->get_frame = producer_get_frame;
-                       
+
                        if ( strcmp( service, "avformat-novalidate" ) )
                        {
                                // Open the file
-                               if ( producer_open( self, profile, file ) != 0 )
+                               if ( producer_open( self, profile, file, 1 ) != 0 )
                                {
                                        // Clean up
                                        mlt_producer_close( producer );
                                        producer = NULL;
+                                       producer_avformat_close( self );
                                }
-                               else
+                               else if ( self->seekable )
                                {
                                        // Close the file to release resources for large playlists - reopen later as needed
-                                       avformat_lock();
-                                       if ( self->dummy_context )
-                                               av_close_input_file( self->dummy_context );
-                                       self->dummy_context = NULL;
+#if LIBAVFORMAT_VERSION_INT >= ((53<<16)+(17<<8)+0)
+                                       if ( self->audio_format )
+                                               avformat_close_input( &self->audio_format );
+                                       if ( self->video_format )
+                                               avformat_close_input( &self->video_format );
+#else
                                        if ( self->audio_format )
                                                av_close_input_file( self->audio_format );
-                                       self->audio_format = NULL;
                                        if ( self->video_format )
                                                av_close_input_file( self->video_format );
+#endif
+                                       self->audio_format = NULL;
                                        self->video_format = NULL;
-                                       avformat_unlock();
-       
-                                       // Default the user-selectable indices from the auto-detected indices
-                                       mlt_properties_set_int( properties, "audio_index",  self->audio_index );
-                                       mlt_properties_set_int( properties, "video_index",  self->video_index );
                                }
                        }
                        if ( producer )
                        {
+                               // Default the user-selectable indices from the auto-detected indices
+                               mlt_properties_set_int( properties, "audio_index",  self->audio_index );
+                               mlt_properties_set_int( properties, "video_index",  self->video_index );
 #ifdef VDPAU
                                mlt_service_cache_set_size( MLT_PRODUCER_SERVICE(producer), "producer_avformat", 5 );
 #endif
@@ -256,14 +264,41 @@ int list_components( char* file )
        return skip;
 }
 
+static int first_video_index( producer_avformat self )
+{
+       AVFormatContext *context = self->video_format? self->video_format : self->audio_format;
+       int i = -1; // not found
+
+       if ( context ) {
+               for ( i = 0; i < context->nb_streams; i++ ) {
+                       if ( context->streams[i]->codec &&
+                            context->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
+                               break;
+               }
+               if ( i == context->nb_streams )
+                       i = -1;
+       }
+       return i;
+}
+
 /** Find the default streams.
 */
 
-static mlt_properties find_default_streams( mlt_properties meta_media, AVFormatContext *context, int *audio_index, int *video_index )
+static mlt_properties find_default_streams( producer_avformat self )
 {
        int i;
        char key[200];
+#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(8<<8)+0)
+       AVDictionaryEntry *tag = NULL;
+#else
        AVMetadataTag *tag = NULL;
+#endif
+       AVFormatContext *context = self->video_format;
+       mlt_properties meta_media = MLT_PRODUCER_PROPERTIES( self->parent );
+
+       // Default to the first audio and video streams found
+       self->audio_index = -1;
+       self->video_index = -1;
 
        mlt_properties_set_int( meta_media, "meta.media.nb_streams", context->nb_streams );
 
@@ -284,31 +319,35 @@ static mlt_properties find_default_streams( mlt_properties meta_media, AVFormatC
                switch( codec_context->codec_type )
                {
                        case CODEC_TYPE_VIDEO:
-                               if ( *video_index < 0 )
-                                       *video_index = i;
+                               // Use first video stream
+                               if ( self->video_index < 0 )
+                                       self->video_index = i;
                                mlt_properties_set( meta_media, key, "video" );
                                snprintf( key, sizeof(key), "meta.media.%d.stream.frame_rate", i );
-#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(42<<8)+0)
                                double ffmpeg_fps = av_q2d( context->streams[ i ]->avg_frame_rate );
+#if LIBAVFORMAT_VERSION_MAJOR < 55
                                if ( isnan( ffmpeg_fps ) || ffmpeg_fps == 0 )
                                        ffmpeg_fps = av_q2d( context->streams[ i ]->r_frame_rate );
-                               mlt_properties_set_double( meta_media, key, ffmpeg_fps );
-#else
-                               mlt_properties_set_double( meta_media, key, av_q2d( context->streams[ i ]->r_frame_rate ) );
 #endif
+                               mlt_properties_set_double( meta_media, key, ffmpeg_fps );
 
-#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
                                snprintf( key, sizeof(key), "meta.media.%d.stream.sample_aspect_ratio", i );
                                mlt_properties_set_double( meta_media, key, av_q2d( context->streams[ i ]->sample_aspect_ratio ) );
-#endif
+                               snprintf( key, sizeof(key), "meta.media.%d.codec.width", i );
+                               mlt_properties_set_int( meta_media, key, codec_context->width );
+                               snprintf( key, sizeof(key), "meta.media.%d.codec.height", i );
+                               mlt_properties_set_int( meta_media, key, codec_context->height );
                                snprintf( key, sizeof(key), "meta.media.%d.codec.frame_rate", i );
-                               mlt_properties_set_double( meta_media, key, (double) codec_context->time_base.den /
-                                                                                  ( codec_context->time_base.num == 0 ? 1 : codec_context->time_base.num ) );
+                               AVRational frame_rate = { codec_context->time_base.den, codec_context->time_base.num * codec_context->ticks_per_frame };
+                               mlt_properties_set_double( meta_media, key, av_q2d( frame_rate ) );
                                snprintf( key, sizeof(key), "meta.media.%d.codec.pix_fmt", i );
+#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(3<<8)+0)
+                               mlt_properties_set( meta_media, key, av_get_pix_fmt_name( codec_context->pix_fmt ) );
+#else
                                mlt_properties_set( meta_media, key, avcodec_get_pix_fmt_name( codec_context->pix_fmt ) );
+#endif
                                snprintf( key, sizeof(key), "meta.media.%d.codec.sample_aspect_ratio", i );
                                mlt_properties_set_double( meta_media, key, av_q2d( codec_context->sample_aspect_ratio ) );
-#if LIBAVCODEC_VERSION_INT > ((52<<16)+(28<<8)+0)
                                snprintf( key, sizeof(key), "meta.media.%d.codec.colorspace", i );
                                switch ( codec_context->colorspace )
                                {
@@ -327,19 +366,17 @@ static mlt_properties find_default_streams( mlt_properties meta_media, AVFormatC
                                        mlt_properties_set_int( meta_media, key, codec_context->width * codec_context->height > 750000 ? 709 : 601 );
                                        break;
                                }
-#endif
                                break;
                        case CODEC_TYPE_AUDIO:
-                               if ( *audio_index < 0 )
-                                       *audio_index = i;
+                               if ( !codec_context->channels )
+                                       break;
+                               // Use first audio stream
+                               if ( self->audio_index < 0 && pick_audio_format( codec_context->sample_fmt ) != mlt_audio_none )
+                                       self->audio_index = i;
+
                                mlt_properties_set( meta_media, key, "audio" );
-#if LIBAVCODEC_VERSION_MAJOR > 52
                                snprintf( key, sizeof(key), "meta.media.%d.codec.sample_fmt", i );
                                mlt_properties_set( meta_media, key, av_get_sample_fmt_name( codec_context->sample_fmt ) );
-#elif (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0))
-                               snprintf( key, sizeof(key), "meta.media.%d.codec.sample_fmt", i );
-                               mlt_properties_set( meta_media, key, avcodec_get_sample_fmt_name( codec_context->sample_fmt ) );
-#endif
                                snprintf( key, sizeof(key), "meta.media.%d.codec.sample_rate", i );
                                mlt_properties_set_int( meta_media, key, codec_context->sample_rate );
                                snprintf( key, sizeof(key), "meta.media.%d.codec.channels", i );
@@ -352,10 +389,8 @@ static mlt_properties find_default_streams( mlt_properties meta_media, AVFormatC
 //             mlt_properties_set_double( meta_media, key, av_q2d( context->streams[ i ]->time_base ) );
                snprintf( key, sizeof(key), "meta.media.%d.codec.name", i );
                mlt_properties_set( meta_media, key, codec->name );
-#if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(55<<8)+0))
                snprintf( key, sizeof(key), "meta.media.%d.codec.long_name", i );
                mlt_properties_set( meta_media, key, codec->long_name );
-#endif
                snprintf( key, sizeof(key), "meta.media.%d.codec.bit_rate", i );
                mlt_properties_set_int( meta_media, key, codec_context->bit_rate );
 //             snprintf( key, sizeof(key), "meta.media.%d.codec.time_base", i );
@@ -366,8 +401,11 @@ static mlt_properties find_default_streams( mlt_properties meta_media, AVFormatC
 //             mlt_properties_set_int( meta_media, key, codec_context->level );
 
                // Read Metadata
-#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(31<<8)+0)
+#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(8<<8)+0)
+               while ( ( tag = av_dict_get( stream->metadata, "", tag, AV_DICT_IGNORE_SUFFIX ) ) )
+#else
                while ( ( tag = av_metadata_get( stream->metadata, "", tag, AV_METADATA_IGNORE_SUFFIX ) ) )
+#endif
                {
                        if ( tag->value && strcmp( tag->value, "" ) && strcmp( tag->value, "und" ) )
                        {
@@ -375,10 +413,12 @@ static mlt_properties find_default_streams( mlt_properties meta_media, AVFormatC
                                mlt_properties_set( meta_media, key, tag->value );
                        }
                }
-#endif
        }
-#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(31<<8)+0)
+#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(8<<8)+0)
+       while ( ( tag = av_dict_get( context->metadata, "", tag, AV_DICT_IGNORE_SUFFIX ) ) )
+#else
        while ( ( tag = av_metadata_get( context->metadata, "", tag, AV_METADATA_IGNORE_SUFFIX ) ) )
+#endif
        {
                if ( tag->value && strcmp( tag->value, "" ) && strcmp( tag->value, "und" ) )
                {
@@ -386,447 +426,555 @@ static mlt_properties find_default_streams( mlt_properties meta_media, AVFormatC
                        mlt_properties_set( meta_media, key, tag->value );
                }
        }
-#else
-       if ( context->title && strcmp( context->title, "" ) )
-               mlt_properties_set(properties, "meta.attr.title.markup", context->title );
-       if ( context->author && strcmp( context->author, "" ) )
-               mlt_properties_set(properties, "meta.attr.author.markup", context->author );
-       if ( context->copyright && strcmp( context->copyright, "" ) )
-               mlt_properties_set(properties, "meta.attr.copyright.markup", context->copyright );
-       if ( context->comment )
-               mlt_properties_set(properties, "meta.attr.comment.markup", context->comment );
-       if ( context->album )
-               mlt_properties_set(properties, "meta.attr.album.markup", context->album );
-       if ( context->year )
-               mlt_properties_set_int(properties, "meta.attr.year.markup", context->year );
-       if ( context->track )
-               mlt_properties_set_int(properties, "meta.attr.track.markup", context->track );
-#endif
 
        return meta_media;
 }
 
-static inline int dv_is_pal( AVPacket *pkt )
+static void get_aspect_ratio( mlt_properties properties, AVStream *stream, AVCodecContext *codec_context )
 {
-       return pkt->data[3] & 0x80;
+       AVRational sar = stream->sample_aspect_ratio;
+       if ( sar.num <= 0 || sar.den <= 0 )
+               sar = codec_context->sample_aspect_ratio;
+       if ( sar.num <= 0 || sar.den <= 0 )
+               sar.num = sar.den = 1;
+       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", sar.num );
+       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", sar.den );
+       mlt_properties_set_double( properties, "aspect_ratio", av_q2d( sar ) );
 }
 
-static int dv_is_wide( AVPacket *pkt )
+#if LIBAVFORMAT_VERSION_INT > ((53<<16)+(6<<8)+0)
+static char* parse_url( mlt_profile profile, const char* URL, AVInputFormat **format, AVDictionary **params )
+#else
+static char* parse_url( mlt_profile profile, const char* URL, AVInputFormat **format, AVFormatParameters *params )
+#endif
 {
-       int i = 80 /* block size */ *3 /* VAUX starts at block 3 */ +3 /* skip block header */;
+       if ( !URL ) return NULL;
+
+       char *result = NULL;
+       char *protocol = strdup( URL );
+       char *url = strchr( protocol, ':' );
 
-       for ( ; i < pkt->size; i += 5 /* packet size */ )
+       // Only if there is not a protocol specification that avformat can handle
+#if LIBAVFORMAT_VERSION_MAJOR >= 53
+       if ( url && avio_check( URL, 0 ) < 0 )
+#else
+       if ( url && !url_exist( URL ) )
+#endif
        {
-               if ( pkt->data[ i ] == 0x61 )
-               {
-                       uint8_t x = pkt->data[ i + 2 ] & 0x7;
-                       return ( x == 2 ) || ( x == 7 );
-               }
-       }
-       return 0;
-}
+               // Truncate protocol string
+               url[0] = 0;
+               mlt_log_debug( NULL, "%s: protocol=%s resource=%s\n", __FUNCTION__, protocol, url + 1 );
 
-static double get_aspect_ratio( mlt_properties properties, AVStream *stream, AVCodecContext *codec_context, AVPacket *pkt )
-{
-       double aspect_ratio = 1.0;
+               // Lookup the format
+               *format = av_find_input_format( protocol );
 
-       if ( codec_context->codec_id == CODEC_ID_DVVIDEO )
-       {
-               if ( pkt )
+               // Eat the format designator
+               result = ++url;
+
+               if ( *format )
                {
-                       if ( dv_is_pal( pkt ) )
-                       {
-                               if ( dv_is_wide( pkt ) )
-                               {
-                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 64 );
-                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 45 );
-                               }
-                               else
-                               {
-                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 16 );
-                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 15 );
-                               }
-                       }
-                       else
+#if LIBAVFORMAT_VERSION_INT > ((53<<16)+(6<<8)+0)
+                       // support for legacy width and height parameters
+                       char *width = NULL;
+                       char *height = NULL;
+#else
+                       // These are required by video4linux2 (defaults)
+                       params->width = profile->width;
+                       params->height = profile->height;
+                       if ( !strstr( URL, "&frame_rate" ) )
+                               params->time_base = (AVRational){ profile->frame_rate_den, profile->frame_rate_num };
+                       params->channels = 2;
+                       params->sample_rate = 48000;
+#endif
+
+                       // Parse out params
+                       url = strchr( url, '?' );
+                       while ( url )
                        {
-                               if ( dv_is_wide( pkt ) )
-                               {
-                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 32 );
-                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 27 );
-                               }
-                               else
+                               url[0] = 0;
+                               char *name = strdup( ++url );
+                               char *value = strchr( name, '=' );
+                               if ( !value )
+                                       // Also accept : as delimiter for backwards compatibility.
+                                       value = strchr( name, ':' );
+                               if ( value )
                                {
-                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 8 );
-                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 9 );
-                               }
-                       }
-               }
-               else
-               {
-                       AVRational ar =
-#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
-                               stream->sample_aspect_ratio;
+                                       value[0] = 0;
+                                       value++;
+                                       char *t = strchr( value, '&' );
+                                       if ( t )
+                                               t[0] = 0;
+#if LIBAVFORMAT_VERSION_INT > ((53<<16)+(6<<8)+0)
+                                       // translate old parameters to new av_dict names
+                                       if ( !strcmp( name, "frame_rate" ) )
+                                               av_dict_set( params, "framerate", value, 0 );
+                                       else if ( !strcmp( name, "pix_fmt" ) )
+                                               av_dict_set( params, "pixel_format", value, 0 );
+                                       else if ( !strcmp( name, "width" ) )
+                                               width = strdup( value );
+                                       else if ( !strcmp( name, "height" ) )
+                                               height = strdup( value );
+                                       else
+                                               // generic demux/device option support
+                                               av_dict_set( params, name, value, 0 );
 #else
-                               codec_context->sample_aspect_ratio;
+                                       if ( !strcmp( name, "frame_rate" ) )
+                                               params->time_base.den = atoi( value );
+                                       else if ( !strcmp( name, "frame_rate_base" ) )
+                                               params->time_base.num = atoi( value );
+                                       else if ( !strcmp( name, "sample_rate" ) )
+                                               params->sample_rate = atoi( value );
+                                       else if ( !strcmp( name, "channel" ) )
+                                               params->channel = atoi( value );
+                                       else if ( !strcmp( name, "channels" ) )
+                                               params->channels = atoi( value );
+                                       else if ( !strcmp( name, "pix_fmt" ) )
+                                               params->pix_fmt = av_get_pix_fmt( value );
+                                       else if ( !strcmp( name, "width" ) )
+                                               params->width = atoi( value );
+                                       else if ( !strcmp( name, "height" ) )
+                                               params->height = atoi( value );
+                                       else if ( !strcmp( name, "standard" ) )
+                                               params->standard = strdup( value );
 #endif
-                       // Override FFmpeg's notion of DV aspect ratios, which are
-                       // based upon a width of 704. Since we do not have a normaliser
-                       // that crops (nor is cropping 720 wide ITU-R 601 video always desirable)
-                       // we just coerce the values to facilitate a passive behaviour through
-                       // the rescale normaliser when using equivalent producers and consumers.
-                       // = display_aspect / (width * height)
-                       if ( ar.num == 10 && ar.den == 11 )
-                       {
-                               // 4:3 NTSC
-                               mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 8 );
-                               mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 9 );
-                       }
-                       else if ( ar.num == 59 && ar.den == 54 )
-                       {
-                               // 4:3 PAL
-                               mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 16 );
-                               mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 15 );
-                       }
-                       else if ( ar.num == 40 && ar.den == 33 )
-                       {
-                               // 16:9 NTSC
-                               mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 32 );
-                               mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 27 );
+                               }
+                               free( name );
+                               url = strchr( url, '&' );
                        }
-                       else if ( ar.num == 118 && ar.den == 81 )
+#if LIBAVFORMAT_VERSION_INT > ((53<<16)+(6<<8)+0)
+                       // continued support for legacy width and height parameters
+                       if ( width && height )
                        {
-                               // 16:9 PAL
-                               mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 64 );
-                               mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 45 );
+                               char *s = malloc( strlen( width ) + strlen( height ) + 2 );
+                               strcpy( s, width );
+                               strcat( s, "x");
+                               strcat( s, height );
+                               av_dict_set( params, "video_size", s, 0 );
+                               free( s );
                        }
+                       if ( width ) free( width );
+                       if ( height ) free ( height );
+#endif
                }
+               result = strdup( result );
        }
        else
        {
-               AVRational codec_sar = codec_context->sample_aspect_ratio;
-               AVRational stream_sar =
-#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0)
-                       stream->sample_aspect_ratio;
-#else
-                       { 0, 1 };
-#endif
-               if ( codec_sar.num > 0 )
-               {
-                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", codec_sar.num );
-                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", codec_sar.den );
-               }
-               else if ( stream_sar.num > 0 )
+               result = strdup( URL );
+       }
+       free( protocol );
+       return result;
+}
+
+static int get_basic_info( producer_avformat self, mlt_profile profile, const char *filename )
+{
+       int error = 0;
+
+       // Get the properties
+       mlt_properties properties = MLT_PRODUCER_PROPERTIES( self->parent );
+
+       AVFormatContext *format = self->video_format;
+
+       // Get the duration
+       if ( !mlt_properties_get_int( properties, "_length_computed" ) )
+       {
+               // The _length_computed flag prevents overwriting explicity set length/out/eof properties
+               // when producer_open is called after initial call when restoring or reseting the producer.
+               if ( format->duration != AV_NOPTS_VALUE )
                {
-                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", stream_sar.num );
-                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", stream_sar.den );
+                       // This isn't going to be accurate for all formats
+                       // We will treat everything with the producer fps.
+                       mlt_position frames = ( mlt_position )( int )( format->duration *
+                               profile->frame_rate_num / profile->frame_rate_den / AV_TIME_BASE);
+                       mlt_properties_set_position( properties, "out", frames - 1 );
+                       mlt_properties_set_position( properties, "length", frames );
+                       mlt_properties_set_int( properties, "_length_computed", 1 );
                }
                else
                {
-                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 1 );
-                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 1 );
+                       // Set live sources to run forever
+                       mlt_properties_set_position( properties, "length", INT_MAX );
+                       mlt_properties_set_position( properties, "out", INT_MAX - 1 );
+                       mlt_properties_set( properties, "eof", "loop" );
+                       mlt_properties_set_int( properties, "_length_computed", 1 );
                }
        }
-       AVRational ar = { mlt_properties_get_double( properties, "meta.media.sample_aspect_num" ), mlt_properties_get_double( properties, "meta.media.sample_aspect_den" ) };
-       aspect_ratio = av_q2d( ar );
-       mlt_properties_set_double( properties, "aspect_ratio", aspect_ratio );
 
-       return aspect_ratio;
+       // Check if we're seekable
+       // avdevices are typically AVFMT_NOFILE and not seekable
+       self->seekable = !format->iformat || !( format->iformat->flags & AVFMT_NOFILE );
+       if ( format->pb )
+       {
+               // protocols can indicate if they support seeking
+#if LIBAVFORMAT_VERSION_MAJOR >= 53
+               self->seekable = format->pb->seekable;
+#else
+               URLContext *uc = url_fileno( format->pb );
+               if ( uc )
+                       self->seekable = !uc->is_streamed;
+#endif
+       }
+       if ( self->seekable )
+       {
+               // Do a more rigourous test of seekable on a disposable context
+               self->seekable = av_seek_frame( format, -1, format->start_time, AVSEEK_FLAG_BACKWARD ) >= 0;
+               mlt_properties_set_int( properties, "seekable", self->seekable );
+               self->dummy_context = format;
+#if LIBAVFORMAT_VERSION_INT > ((53<<16)+(6<<8)+0)
+               self->video_format = NULL;
+               avformat_open_input( &self->video_format, filename, NULL, NULL );
+               avformat_find_stream_info( self->video_format, NULL );
+#else
+               av_open_input_file( &self->video_format, filename, NULL, 0, NULL );
+               av_find_stream_info( self->video_format );
+#endif
+               format = self->video_format;
+       }
+
+       // Fetch the width, height and aspect ratio
+       if ( self->video_index != -1 )
+       {
+               AVCodecContext *codec_context = format->streams[ self->video_index ]->codec;
+               mlt_properties_set_int( properties, "width", codec_context->width );
+               mlt_properties_set_int( properties, "height", codec_context->height );
+               get_aspect_ratio( properties, format->streams[ self->video_index ], codec_context );
+
+               // Verify that we can convert this to YUV 4:2:2
+               // TODO: we can now also return RGB and RGBA and quite possibly more in the future.
+               struct SwsContext *context = sws_getContext( codec_context->width, codec_context->height, codec_context->pix_fmt,
+                       codec_context->width, codec_context->height, PIX_FMT_YUYV422, SWS_BILINEAR, NULL, NULL, NULL);
+               if ( context )
+                       sws_freeContext( context );
+               else
+                       error = 1;
+       }
+       return error;
 }
 
 /** Open the file.
 */
 
-static int producer_open( producer_avformat self, mlt_profile profile, char *file )
+static int producer_open( producer_avformat self, mlt_profile profile, const char *URL, int take_lock )
 {
        // Return an error code (0 == no error)
        int error = 0;
-
-       // Context for avformat
-       AVFormatContext *context = NULL;
-
-       // Get the properties
        mlt_properties properties = MLT_PRODUCER_PROPERTIES( self->parent );
 
-       // We will treat everything with the producer fps
-       double fps = mlt_profile_fps( profile );
+       if ( !self->is_mutex_init )
+       {
+               pthread_mutex_init( &self->audio_mutex, NULL );
+               pthread_mutex_init( &self->video_mutex, NULL );
+               pthread_mutex_init( &self->packets_mutex, NULL );
+               pthread_mutex_init( &self->open_mutex, NULL );
+               self->is_mutex_init = 1;
+       }
 
        // Lock the service
-       pthread_mutex_init( &self->audio_mutex, NULL );
-       pthread_mutex_init( &self->video_mutex, NULL );
-       pthread_mutex_lock( &self->audio_mutex );
-       pthread_mutex_lock( &self->video_mutex );
+       if ( take_lock )
+       {
+               pthread_mutex_lock( &self->audio_mutex );
+               pthread_mutex_lock( &self->video_mutex );
+       }
+       mlt_events_block( properties, self->parent );
 
-       // If "MRL", then create AVInputFormat
+       // Parse URL
        AVInputFormat *format = NULL;
-       AVFormatParameters *params = NULL;
-       char *standard = NULL;
-       char *mrl = strchr( file, ':' );
-
-       // AV option (0 = both, 1 = video, 2 = audio)
-       int av = 0;
-
-       // Only if there is not a protocol specification that avformat can handle
-#if LIBAVFORMAT_VERSION_MAJOR > 52
-       if ( mrl && avio_check( file, 0 ) < 0 )
+#if LIBAVFORMAT_VERSION_INT > ((53<<16)+(6<<8)+0)
+       AVDictionary *params = NULL;
 #else
-       if ( mrl && !url_exist( file ) )
+       AVFormatParameters params;
+       memset( &params, 0, sizeof(params) );
 #endif
-       {
-               // 'file' becomes format abbreviation
-               mrl[0] = 0;
-
-               // Lookup the format
-               format = av_find_input_format( file );
-
-               // Eat the format designator
-               file = ++mrl;
-
-               if ( format )
-               {
-                       // Allocate params
-                       params = calloc( sizeof( AVFormatParameters ), 1 );
-
-                       // These are required by video4linux (defaults)
-                       params->width = 640;
-                       params->height = 480;
-                       params->time_base= (AVRational){1,25};
-                       params->channels = 2;
-                       params->sample_rate = 48000;
-               }
-
-               // Parse out params
-               mrl = strchr( file, '?' );
-               while ( mrl )
-               {
-                       mrl[0] = 0;
-                       char *name = strdup( ++mrl );
-                       char *value = strchr( name, ':' );
-                       if ( value )
-                       {
-                               value[0] = 0;
-                               value++;
-                               char *t = strchr( value, '&' );
-                               if ( t )
-                                       t[0] = 0;
-                               if ( !strcmp( name, "frame_rate" ) )
-                                       params->time_base.den = atoi( value );
-                               else if ( !strcmp( name, "frame_rate_base" ) )
-                                       params->time_base.num = atoi( value );
-                               else if ( !strcmp( name, "sample_rate" ) )
-                                       params->sample_rate = atoi( value );
-                               else if ( !strcmp( name, "channel" ) )
-                                       params->channel = atoi( value );
-                               else if ( !strcmp( name, "channels" ) )
-                                       params->channels = atoi( value );
-#if (LIBAVUTIL_VERSION_INT > ((50<<16)+(7<<8)+0))
-                               else if ( !strcmp( name, "pix_fmt" ) )
-                                       params->pix_fmt = av_get_pix_fmt( value );
+       char *filename = parse_url( profile, URL, &format, &params );
+
+       // Now attempt to open the file or device with filename
+#if LIBAVFORMAT_VERSION_INT > ((53<<16)+(6<<8)+0)
+       error = avformat_open_input( &self->video_format, filename, format, &params ) < 0;
+       if ( error )
+               // If the URL is a network stream URL, then we probably need to open with full URL
+               error = avformat_open_input( &self->video_format, URL, format, &params ) < 0;
+#else
+       error = av_open_input_file( &self->video_format, filename, format, 0, &params ) < 0;
+       if ( error )
+               // If the URL is a network stream URL, then we probably need to open with full URL
+               error = av_open_input_file( &self->video_format, URL, format, 0, &params ) < 0;
 #endif
-                               else if ( !strcmp( name, "width" ) )
-                                       params->width = atoi( value );
-                               else if ( !strcmp( name, "height" ) )
-                                       params->height = atoi( value );
-                               else if ( !strcmp( name, "standard" ) )
-                               {
-                                       standard = strdup( value );
-                                       params->standard = standard;
-                               }
-                               else if ( !strcmp( name, "av" ) )
-                                       av = atoi( value );
-                       }
-                       free( name );
-                       mrl = strchr( mrl, '&' );
-               }
-       }
 
-       // Now attempt to open the file
-       error = av_open_input_file( &context, file, format, 0, params ) < 0;
+       // Set MLT properties onto video AVFormatContext
+       if ( !error && self->video_format )
+       {
+               apply_properties( self->video_format, properties, AV_OPT_FLAG_DECODING_PARAM );
+               if ( self->video_format->iformat && self->video_format->iformat->priv_class && self->video_format->priv_data )
+                       apply_properties( self->video_format->priv_data, properties, AV_OPT_FLAG_DECODING_PARAM );
+       }
 
+#if LIBAVFORMAT_VERSION_INT > ((53<<16)+(6<<8)+0)
+       av_dict_free( &params );
+#else
        // Cleanup AVFormatParameters
-       free( standard );
-       free( params );
+       if ( params.standard )
+               free( (void*) params.standard );
+#endif
 
        // If successful, then try to get additional info
-       if ( !error )
+       if ( !error && self->video_format )
        {
                // Get the stream info
-               error = av_find_stream_info( context ) < 0;
+#if LIBAVFORMAT_VERSION_INT > ((53<<16)+(6<<8)+0)
+               error = avformat_find_stream_info( self->video_format, NULL ) < 0;
+#else
+               error = av_find_stream_info( self->video_format ) < 0;
+#endif
 
                // Continue if no error
-               if ( !error )
+               if ( !error && self->video_format )
                {
-                       // We will default to the first audio and video streams found
-                       int audio_index = -1;
-                       int video_index = -1;
-
                        // Find default audio and video streams
-                       find_default_streams( properties, context, &audio_index, &video_index );
-
-                       // Now set properties where we can (use default unknowns if required)
-                       if ( context->duration != AV_NOPTS_VALUE )
-                       {
-                               // This isn't going to be accurate for all formats
-                               mlt_position frames = ( mlt_position )( ( ( double )context->duration / ( double )AV_TIME_BASE ) * fps );
-                               mlt_properties_set_position( properties, "out", frames - 1 );
-                               mlt_properties_set_position( properties, "length", frames );
-                       }
+                       find_default_streams( self );
+                       error = get_basic_info( self, profile, filename );
 
-                       if ( context->start_time != AV_NOPTS_VALUE )
-                               self->start_time = context->start_time;
-
-                       // Check if we're seekable
-                       // avdevices are typically AVFMT_NOFILE and not seekable
-                       self->seekable = !format || !( format->flags & AVFMT_NOFILE );
-                       if ( context->pb )
-                       {
-                               // protocols can indicate if they support seeking
-#if LIBAVFORMAT_VERSION_MAJOR > 52
-                               self->seekable = context->pb->seekable;
-#else
-                               URLContext *uc = url_fileno( context->pb );
-                               if ( uc )
-                                       self->seekable = !uc->is_streamed;
-#endif
-                       }
-                       if ( self->seekable )
-                       {
-                               self->seekable = av_seek_frame( context, -1, self->start_time, AVSEEK_FLAG_BACKWARD ) >= 0;
-                               mlt_properties_set_int( properties, "seekable", self->seekable );
-                               self->dummy_context = context;
-                               av_open_input_file( &context, file, NULL, 0, NULL );
-                               av_find_stream_info( context );
-                       }
-
-                       // Store selected audio and video indexes on properties
-                       self->audio_index = audio_index;
-                       self->video_index = video_index;
-                       self->first_pts = -1;
+                       // Initialize position info
+                       self->first_pts = AV_NOPTS_VALUE;
                        self->last_position = POSITION_INITIAL;
 
-                       // Fetch the width, height and aspect ratio
-                       if ( video_index != -1 )
+                       if ( !self->audio_format )
                        {
-                               AVCodecContext *codec_context = context->streams[ video_index ]->codec;
-                               mlt_properties_set_int( properties, "width", codec_context->width );
-                               mlt_properties_set_int( properties, "height", codec_context->height );
-
-                               if ( codec_context->codec_id == CODEC_ID_DVVIDEO )
+                               // We're going to cheat here - for seekable A/V files, we will have separate contexts
+                               // to support independent seeking of audio from video.
+                               // TODO: Is this really necessary?
+                               if ( self->audio_index != -1 && self->video_index != -1 )
                                {
-                                       // Fetch the first frame of DV so we can read it directly
-                                       AVPacket pkt;
-                                       int ret = 0;
-                                       while ( ret >= 0 )
+                                       if ( self->seekable )
                                        {
-                                               ret = av_read_frame( context, &pkt );
-                                               if ( ret >= 0 && pkt.stream_index == video_index && pkt.size > 0 )
-                                               {
-                                                       get_aspect_ratio( properties, context->streams[ video_index ], codec_context, &pkt );
-                                                       break;
-                                               }
+                                               // And open again for our audio context
+#if LIBAVFORMAT_VERSION_INT > ((53<<16)+(6<<8)+0)
+                                               avformat_open_input( &self->audio_format, filename, NULL, NULL );
+                                               apply_properties( self->audio_format, properties, AV_OPT_FLAG_DECODING_PARAM );
+                                               if ( self->audio_format->iformat && self->audio_format->iformat->priv_class && self->audio_format->priv_data )
+                                                       apply_properties( self->audio_format->priv_data, properties, AV_OPT_FLAG_DECODING_PARAM );
+                                               avformat_find_stream_info( self->audio_format, NULL );
+#else
+                                               av_open_input_file( &self->audio_format, filename, NULL, 0, NULL );
+                                               apply_properties( self->audio_format, properties, AV_OPT_FLAG_DECODING_PARAM );
+#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(110<<8)+0)
+                        if ( self->audio_format->iformat && self->audio_format->iformat->priv_class && self->audio_format->priv_data )
+                            apply_properties( self->audio_format->priv_data, properties, AV_OPT_FLAG_DECODING_PARAM );
+#endif
+                                               av_find_stream_info( self->audio_format );
+#endif
+                                       }
+                                       else
+                                       {
+                                               self->audio_format = self->video_format;
                                        }
                                }
-                               else
+                               else if ( self->audio_index != -1 )
                                {
-                                       get_aspect_ratio( properties, context->streams[ video_index ], codec_context, NULL );
+                                       // We only have an audio context
+                                       self->audio_format = self->video_format;
+                                       self->video_format = NULL;
                                }
-#ifdef SWSCALE
-                               struct SwsContext *context = sws_getContext( codec_context->width, codec_context->height, codec_context->pix_fmt,
-                                       codec_context->width, codec_context->height, PIX_FMT_YUYV422, SWS_BILINEAR, NULL, NULL, NULL);
-                               if ( context )
-                                       sws_freeContext( context );
-                               else
-                                       error = 1;
-#endif
-                       }
-
-                       // We're going to cheat here - for a/v files, we will have two contexts (reasoning will be clear later)
-                       if ( av == 0 && audio_index != -1 && video_index != -1 )
-                       {
-                               // We'll use the open one as our video_format
-                               self->video_format = context;
-
-                               // And open again for our audio context
-                               av_open_input_file( &context, file, NULL, 0, NULL );
-                               av_find_stream_info( context );
-
-                               // Audio context
-                               self->audio_format = context;
-                       }
-                       else if ( av != 2 && video_index != -1 )
-                       {
-                               // We only have a video context
-                               self->video_format = context;
-                       }
-                       else if ( audio_index != -1 )
-                       {
-                               // We only have an audio context
-                               self->audio_format = context;
-                       }
-                       else
-                       {
-                               // Something has gone wrong
-                               error = -1;
+                               else if ( self->video_index == -1 )
+                               {
+                                       // Something has gone wrong
+                                       error = -1;
+                               }
+                               if ( self->audio_format && !self->audio_streams )
+                                       get_audio_streams_info( self );
                        }
                }
        }
+       if ( filename )
+               free( filename );
+       if ( !error )
+       {
+               self->apackets = mlt_deque_init();
+               self->vpackets = mlt_deque_init();
+       }
+
+       if ( self->dummy_context )
+       {
+               pthread_mutex_lock( &self->open_mutex );
+#if LIBAVFORMAT_VERSION_INT >= ((53<<16)+(17<<8)+0)
+               avformat_close_input( &self->dummy_context );
+#else
+               av_close_input_file( self->dummy_context );
+#endif
+               self->dummy_context = NULL;
+               pthread_mutex_unlock( &self->open_mutex );
+       }
 
        // Unlock the service
-       pthread_mutex_unlock( &self->audio_mutex );
-       pthread_mutex_unlock( &self->video_mutex );
+       if ( take_lock )
+       {
+               pthread_mutex_unlock( &self->audio_mutex );
+               pthread_mutex_unlock( &self->video_mutex );
+       }
+       mlt_events_unblock( properties, self->parent );
 
        return error;
 }
 
-void reopen_video( producer_avformat self, mlt_producer producer, mlt_properties properties )
+static void prepare_reopen( producer_avformat self )
 {
-       mlt_service_lock( MLT_PRODUCER_SERVICE( producer ) );
+       mlt_service_lock( MLT_PRODUCER_SERVICE( self->parent ) );
        pthread_mutex_lock( &self->audio_mutex );
+       pthread_mutex_lock( &self->open_mutex );
 
-       if ( self->video_codec )
+       int i;
+       for ( i = 0; i < MAX_AUDIO_STREAMS; i++ )
        {
-               avformat_lock();
-               avcodec_close( self->video_codec );
-               avformat_unlock();
+               mlt_pool_release( self->audio_buffer[i] );
+               self->audio_buffer[i] = NULL;
+               av_free( self->decode_buffer[i] );
+               self->decode_buffer[i] = NULL;
+               if ( self->audio_codec[i] )
+                       avcodec_close( self->audio_codec[i] );
+               self->audio_codec[i] = NULL;
        }
+       if ( self->video_codec )
+               avcodec_close( self->video_codec );
        self->video_codec = NULL;
-       if ( self->dummy_context )
-               av_close_input_file( self->dummy_context );
-       self->dummy_context = NULL;
+
+#if LIBAVFORMAT_VERSION_INT >= ((53<<16)+(17<<8)+0)
+       if ( self->seekable && self->audio_format )
+               avformat_close_input( &self->audio_format );
+       if ( self->video_format )
+               avformat_close_input( &self->video_format );
+#else
+       if ( self->seekable && self->audio_format )
+               av_close_input_file( self->audio_format );
        if ( self->video_format )
                av_close_input_file( self->video_format );
+#endif
+       self->audio_format = NULL;
        self->video_format = NULL;
+       pthread_mutex_unlock( &self->open_mutex );
 
-       int audio_index = self->audio_index;
-       int video_index = self->video_index;
-
-       mlt_events_block( properties, producer );
-       pthread_mutex_unlock( &self->audio_mutex );
-       pthread_mutex_unlock( &self->video_mutex );
-       producer_open( self, mlt_service_profile( MLT_PRODUCER_SERVICE(producer) ),
-               mlt_properties_get( properties, "resource" ) );
-       pthread_mutex_lock( &self->video_mutex );
-       pthread_mutex_lock( &self->audio_mutex );
-       if ( self->dummy_context )
+       // Cleanup the packet queues
+       AVPacket *pkt;
+       if ( self->apackets )
        {
-               av_close_input_file( self->dummy_context );
-               self->dummy_context = NULL;
+               while ( ( pkt = mlt_deque_pop_back( self->apackets ) ) )
+               {
+                       av_free_packet( pkt );
+                       free( pkt );
+               }
+               mlt_deque_close( self->apackets );
+               self->apackets = NULL;
        }
-       mlt_events_unblock( properties, producer );
-       apply_properties( self->video_format, properties, AV_OPT_FLAG_DECODING_PARAM );
-#if LIBAVFORMAT_VERSION_MAJOR > 52
-       if ( self->video_format->iformat && self->video_format->iformat->priv_class && self->video_format->priv_data )
-               apply_properties( self->video_format->priv_data, properties, AV_OPT_FLAG_DECODING_PARAM );
-#endif
+       if ( self->vpackets )
+       {
+               while ( ( pkt = mlt_deque_pop_back( self->vpackets ) ) )
+               {
+                       av_free_packet( pkt );
+                       free( pkt );
+               }
+               mlt_deque_close( self->vpackets );
+               self->vpackets = NULL;
+       }
+       pthread_mutex_unlock( &self->audio_mutex );
+       mlt_service_unlock( MLT_PRODUCER_SERVICE( self->parent ) );
+}
 
-       self->audio_index = audio_index;
-       if ( self->video_format && video_index > -1 )
+static int64_t best_pts( producer_avformat self, int64_t pts, int64_t dts )
+{
+       self->invalid_pts_counter += pts == AV_NOPTS_VALUE;
+       self->invalid_dts_counter += dts == AV_NOPTS_VALUE;
+       if ( ( self->invalid_pts_counter <= self->invalid_dts_counter
+                  || dts == AV_NOPTS_VALUE ) && pts != AV_NOPTS_VALUE )
+               return pts;
+       else
+               return dts;
+}
+
+static void find_first_pts( producer_avformat self, int video_index )
+{
+       // find initial PTS
+       AVFormatContext *context = self->video_format? self->video_format : self->audio_format;
+       int ret = 0;
+       int toscan = 500;
+       AVPacket pkt;
+
+       while ( ret >= 0 && toscan-- > 0 )
        {
-               self->video_index = video_index;
-               video_codec_init( self, video_index, properties );
+               ret = av_read_frame( context, &pkt );
+               if ( ret >= 0 && pkt.stream_index == video_index && ( pkt.flags & PKT_FLAG_KEY ) )
+               {
+                       mlt_log_debug( MLT_PRODUCER_SERVICE(self->parent),
+                               "first_pts %"PRId64" dts %"PRId64" pts_dts_delta %d\n",
+                               pkt.pts, pkt.dts, (int)(pkt.pts - pkt.dts) );
+                       self->first_pts = best_pts( self, pkt.pts, pkt.dts );
+                       if ( self->first_pts != AV_NOPTS_VALUE )
+                               toscan = 0;
+               }
+               av_free_packet( &pkt );
        }
+       av_seek_frame( context, -1, 0, AVSEEK_FLAG_BACKWARD );
+}
 
-       pthread_mutex_unlock( &self->audio_mutex );
-       mlt_service_unlock( MLT_PRODUCER_SERVICE( producer ) );
+static int seek_video( producer_avformat self, mlt_position position,
+       int64_t req_position, int preseek )
+{
+       mlt_producer producer = self->parent;
+       int paused = 0;
+
+       if ( self->seekable && ( position != self->video_expected || self->last_position < 0 ) )
+       {
+               mlt_properties properties = MLT_PRODUCER_PROPERTIES( producer );
+
+               // Fetch the video format context
+               AVFormatContext *context = self->video_format;
+
+               // Get the video stream
+               AVStream *stream = context->streams[ self->video_index ];
+
+               // Get codec context
+               AVCodecContext *codec_context = stream->codec;
+
+               // We may want to use the source fps if available
+               double source_fps = mlt_properties_get_double( properties, "meta.media.frame_rate_num" ) /
+                       mlt_properties_get_double( properties, "meta.media.frame_rate_den" );
+
+               if ( self->last_position == POSITION_INITIAL )
+                       find_first_pts( self, self->video_index );
+
+               if ( self->video_frame && position + 1 == self->video_expected )
+               {
+                       // We're paused - use last image
+                       paused = 1;
+               }
+               else if ( self->seekable && ( position < self->video_expected || position - self->video_expected >= 12 || self->last_position < 0 ) )
+               {
+                       // Calculate the timestamp for the requested frame
+                       int64_t timestamp = req_position / ( av_q2d( self->video_time_base ) * source_fps );
+                       if ( req_position <= 0 )
+                               timestamp = 0;
+                       else if ( self->first_pts != AV_NOPTS_VALUE )
+                               timestamp += self->first_pts;
+                       else if ( context->start_time != AV_NOPTS_VALUE )
+                               timestamp += context->start_time;
+                       if ( preseek && av_q2d( self->video_time_base ) != 0 )
+                               timestamp -= 2 / av_q2d( self->video_time_base );
+                       if ( timestamp < 0 )
+                               timestamp = 0;
+                       mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "seeking timestamp %"PRId64" position " MLT_POSITION_FMT " expected "MLT_POSITION_FMT" last_pos %"PRId64"\n",
+                               timestamp, position, self->video_expected, self->last_position );
+
+                       // Seek to the timestamp
+                       codec_context->skip_loop_filter = AVDISCARD_NONREF;
+                       av_seek_frame( context, self->video_index, timestamp, AVSEEK_FLAG_BACKWARD );
+
+                       // flush any pictures still in decode buffer
+                       avcodec_flush_buffers( codec_context );
+
+                       // Remove the cached info relating to the previous position
+                       self->current_position = POSITION_INVALID;
+                       self->last_position = POSITION_INVALID;
+                       av_freep( &self->video_frame );
+               }
+       }
+       return paused;
 }
 
 /** Convert a frame position to a time code.
@@ -855,8 +1003,12 @@ static void get_audio_streams_info( producer_avformat self )
                        AVCodec *codec = avcodec_find_decoder( codec_context->codec_id );
 
                        // If we don't have a codec and we can't initialise it, we can't do much more...
-                       avformat_lock( );
+                       pthread_mutex_lock( &self->open_mutex );
+#if LIBAVCODEC_VERSION_INT >= ((53<<16)+(8<<8)+0)
+                       if ( codec && avcodec_open2( codec_context, codec, NULL ) >= 0 )
+#else
                        if ( codec && avcodec_open( codec_context, codec ) >= 0 )
+#endif
                        {
                                self->audio_streams++;
                                self->audio_max_stream = i;
@@ -867,19 +1019,15 @@ static void get_audio_streams_info( producer_avformat self )
                                        self->max_frequency = codec_context->sample_rate;
                                avcodec_close( codec_context );
                        }
-                       avformat_unlock( );
+                       pthread_mutex_unlock( &self->open_mutex );
                }
        }
        mlt_log_verbose( NULL, "[producer avformat] audio: total_streams %d max_stream %d total_channels %d max_channels %d\n",
                self->audio_streams, self->audio_max_stream, self->total_channels, self->max_channel );
-       
-       // Other audio-specific initializations
-       self->resample_factor = 1.0;
 }
 
-static void set_luma_transfer( struct SwsContext *context, int colorspace, int use_full_range )
+static void set_luma_transfer( struct SwsContext *context, int yuv_colorspace, int use_full_range )
 {
-#if defined(SWSCALE) && (LIBSWSCALE_VERSION_INT >= ((0<<16)+(7<<8)+2))
        int *coefficients;
        const int *new_coefficients;
        int full_range;
@@ -891,7 +1039,7 @@ static void set_luma_transfer( struct SwsContext *context, int colorspace, int u
                // Don't change these from defaults unless explicitly told to.
                if ( use_full_range >= 0 )
                        full_range = use_full_range;
-               switch ( colorspace )
+               switch ( yuv_colorspace )
                {
                case 170:
                case 470:
@@ -912,15 +1060,67 @@ static void set_luma_transfer( struct SwsContext *context, int colorspace, int u
                sws_setColorspaceDetails( context, new_coefficients, full_range, new_coefficients, full_range,
                        brightness, contrast, saturation );
        }
+}
+
+static mlt_image_format pick_pix_format( enum PixelFormat pix_fmt )
+{
+       switch ( pix_fmt )
+       {
+       case PIX_FMT_ARGB:
+       case PIX_FMT_RGBA:
+       case PIX_FMT_ABGR:
+       case PIX_FMT_BGRA:
+               return mlt_image_rgb24a;
+       case PIX_FMT_YUV420P:
+       case PIX_FMT_YUVJ420P:
+       case PIX_FMT_YUVA420P:
+               return mlt_image_yuv420p;
+       case PIX_FMT_RGB24:
+       case PIX_FMT_BGR24:
+       case PIX_FMT_GRAY8:
+       case PIX_FMT_MONOWHITE:
+       case PIX_FMT_MONOBLACK:
+       case PIX_FMT_RGB8:
+       case PIX_FMT_BGR8:
+               return mlt_image_rgb24;
+       default:
+               return mlt_image_yuv422;
+       }
+}
+
+static mlt_audio_format pick_audio_format( int sample_fmt )
+{
+       switch ( sample_fmt )
+       {
+       // interleaved
+       case AV_SAMPLE_FMT_U8:
+               return mlt_audio_u8;
+       case AV_SAMPLE_FMT_S16:
+               return mlt_audio_s16;
+       case AV_SAMPLE_FMT_S32:
+               return mlt_audio_s32le;
+       case AV_SAMPLE_FMT_FLT:
+               return mlt_audio_f32le;
+       // planar - this producer converts planar to interleaved
+#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(17<<8)+0)
+       case AV_SAMPLE_FMT_U8P:
+               return mlt_audio_u8;
+       case AV_SAMPLE_FMT_S16P:
+               return mlt_audio_s16;
+       case AV_SAMPLE_FMT_S32P:
+               return mlt_audio_s32le;
+       case AV_SAMPLE_FMT_FLTP:
+               return mlt_audio_f32le;
 #endif
+       default:
+               return mlt_audio_none;
+       }
 }
 
-static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
-       mlt_image_format *format, int width, int height, int colorspace )
+static void convert_image( producer_avformat self, AVFrame *frame, uint8_t *buffer, int pix_fmt,
+       mlt_image_format *format, int width, int height, uint8_t **alpha )
 {
-#ifdef SWSCALE
-       int full_range = -1;
-       int flags = SWS_BILINEAR | SWS_ACCURATE_RND;
+       int flags = SWS_BICUBIC | SWS_ACCURATE_RND;
 
 #ifdef USE_MMX
        flags |= SWS_CPU_CAPS_MMX;
@@ -929,19 +1129,26 @@ static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
        flags |= SWS_CPU_CAPS_MMX2;
 #endif
 
-       if ( pix_fmt == PIX_FMT_RGB32 )
+       // extract alpha from planar formats
+       if ( ( pix_fmt == PIX_FMT_YUVA420P
+#if defined(FFUDIV) && LIBAVUTIL_VERSION_INT >= ((51<<16)+(35<<8)+101)
+                       || pix_fmt == PIX_FMT_YUVA444P
+#endif
+                       ) &&
+               *format != mlt_image_rgb24a && *format != mlt_image_opengl &&
+               frame->data[3] && frame->linesize[3] )
        {
-               *format = mlt_image_rgb24a;
-               struct SwsContext *context = sws_getContext( width, height, pix_fmt,
-                       width, height, PIX_FMT_RGBA, flags, NULL, NULL, NULL);
-               AVPicture output;
-               avpicture_fill( &output, buffer, PIX_FMT_RGBA, width, height );
-               set_luma_transfer( context, colorspace, full_range );
-               sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
-                       output.data, output.linesize);
-               sws_freeContext( context );
+               int i;
+               uint8_t *src, *dst;
+
+               dst = *alpha = mlt_pool_alloc( width * height );
+               src = frame->data[3];
+
+               for ( i = 0; i < height; dst += width, src += frame->linesize[3], i++ )
+                       memcpy( dst, src, FFMIN( width, frame->linesize[3] ) );
        }
-       else if ( *format == mlt_image_yuv420p )
+
+       if ( *format == mlt_image_yuv420p )
        {
                struct SwsContext *context = sws_getContext( width, height, pix_fmt,
                        width, height, PIX_FMT_YUV420P, flags, NULL, NULL, NULL);
@@ -952,7 +1159,7 @@ static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
                output.linesize[0] = width;
                output.linesize[1] = width >> 1;
                output.linesize[2] = width >> 1;
-               set_luma_transfer( context, colorspace, full_range );
+               set_luma_transfer( context, self->yuv_colorspace, -1 );
                sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
                        output.data, output.linesize);
                sws_freeContext( context );
@@ -963,7 +1170,7 @@ static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
                        width, height, PIX_FMT_RGB24, flags | SWS_FULL_CHR_H_INT, NULL, NULL, NULL);
                AVPicture output;
                avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
-               set_luma_transfer( context, colorspace, full_range );
+               set_luma_transfer( context, self->yuv_colorspace, self->full_luma );
                sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
                        output.data, output.linesize);
                sws_freeContext( context );
@@ -974,7 +1181,7 @@ static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
                        width, height, PIX_FMT_RGBA, flags | SWS_FULL_CHR_H_INT, NULL, NULL, NULL);
                AVPicture output;
                avpicture_fill( &output, buffer, PIX_FMT_RGBA, width, height );
-               set_luma_transfer( context, colorspace, full_range );
+               set_luma_transfer( context, self->yuv_colorspace, self->full_luma );
                sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
                        output.data, output.linesize);
                sws_freeContext( context );
@@ -985,42 +1192,11 @@ static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
                        width, height, PIX_FMT_YUYV422, flags | SWS_FULL_CHR_H_INP, NULL, NULL, NULL);
                AVPicture output;
                avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height );
-               set_luma_transfer( context, colorspace, full_range );
+               set_luma_transfer( context, self->yuv_colorspace, -1 );
                sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
                        output.data, output.linesize);
                sws_freeContext( context );
        }
-#else
-       if ( *format == mlt_image_yuv420p )
-       {
-               AVPicture pict;
-               pict.data[0] = buffer;
-               pict.data[1] = buffer + width * height;
-               pict.data[2] = buffer + ( 5 * width * height ) / 4;
-               pict.linesize[0] = width;
-               pict.linesize[1] = width >> 1;
-               pict.linesize[2] = width >> 1;
-               img_convert( &pict, PIX_FMT_YUV420P, (AVPicture *)frame, pix_fmt, width, height );
-       }
-       else if ( *format == mlt_image_rgb24 )
-       {
-               AVPicture output;
-               avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
-               img_convert( &output, PIX_FMT_RGB24, (AVPicture *)frame, pix_fmt, width, height );
-       }
-       else if ( format == mlt_image_rgb24a || format == mlt_image_opengl )
-       {
-               AVPicture output;
-               avpicture_fill( &output, buffer, PIX_FMT_RGB32, width, height );
-               img_convert( &output, PIX_FMT_RGB32, (AVPicture *)frame, pix_fmt, width, height );
-       }
-       else
-       {
-               AVPicture output;
-               avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height );
-               img_convert( &output, PIX_FMT_YUYV422, (AVPicture *)frame, pix_fmt, width, height );
-       }
-#endif
 }
 
 /** Allocate the image buffer and set it on the frame.
@@ -1033,15 +1209,12 @@ static int allocate_buffer( mlt_frame frame, AVCodecContext *codec_context, uint
        if ( codec_context->width == 0 || codec_context->height == 0 )
                return size;
 
+       if ( *format == mlt_image_glsl )
+               *format = pick_pix_format( codec_context->pix_fmt );
+
        *width = codec_context->width;
        *height = codec_context->height;
-
-       if ( codec_context->pix_fmt == PIX_FMT_RGB32 )
-               size = *width * ( *height + 1 ) * 4;
-       else
-               size = mlt_image_format_size( *format, *width, *height, NULL );
-
-       // Construct the output image
+       size = mlt_image_format_size( *format, *width, *height, NULL );
        *buffer = mlt_pool_alloc( size );
        if ( *buffer )
                mlt_frame_set_image( frame, *buffer, size, mlt_pool_release );
@@ -1064,15 +1237,21 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
        mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
 
        // Obtain the frame number of this frame
-       mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" );
+       mlt_position position = mlt_frame_original_position( frame );
 
        // Get the producer properties
        mlt_properties properties = MLT_PRODUCER_PROPERTIES( producer );
 
        pthread_mutex_lock( &self->video_mutex );
 
+       uint8_t *alpha = NULL;
+       int got_picture = 0;
+       int image_size = 0;
+
        // Fetch the video format context
        AVFormatContext *context = self->video_format;
+       if ( !context )
+               goto exit_get_image;
 
        // Get the video stream
        AVStream *stream = context->streams[ self->video_index ];
@@ -1081,14 +1260,43 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
        AVCodecContext *codec_context = stream->codec;
 
        // Get the image cache
-       if ( ! self->image_cache && ! mlt_properties_get_int( properties, "noimagecache" ) )
-               self->image_cache = mlt_cache_init();
+       if ( ! self->image_cache )
+       {
+               // if cache size supplied by environment variable
+               int cache_supplied = getenv( "MLT_AVFORMAT_CACHE" ) != NULL;
+               int cache_size = cache_supplied? atoi( getenv( "MLT_AVFORMAT_CACHE" ) ) : 0;
+
+               // cache size supplied via property
+               if ( mlt_properties_get( properties, "cache" ) )
+               {
+                       cache_supplied = 1;
+                       cache_size = mlt_properties_get_int( properties, "cache" );
+               }
+               if ( mlt_properties_get_int( properties, "noimagecache" ) )
+                       cache_size = 0;
+               // create cache if not disabled
+               if ( !cache_supplied || cache_size > 0 )
+                       self->image_cache = mlt_cache_init();
+               // set cache size if supplied
+               if ( self->image_cache && cache_supplied )
+                       mlt_cache_set_size( self->image_cache, cache_size );
+       }
        if ( self->image_cache )
        {
-               mlt_cache_item item = mlt_cache_get( self->image_cache, (void*) position );
-               uint8_t *original = mlt_cache_item_data( item, (int*) format );
+               mlt_frame original = mlt_cache_get_frame( self->image_cache, position );
                if ( original )
                {
+                       mlt_properties orig_props = MLT_FRAME_PROPERTIES( original );
+                       int size = 0;
+
+                       *buffer = mlt_properties_get_data( orig_props, "alpha", &size );
+                       if (*buffer)
+                               mlt_frame_set_alpha( frame, *buffer, size, NULL );
+                       *buffer = mlt_properties_get_data( orig_props, "image", &size );
+                       mlt_frame_set_image( frame, *buffer, size, NULL );
+                       mlt_properties_set_data( frame_properties, "avformat.image_cache", original, 0, (mlt_destructor) mlt_frame_close, NULL );
+                       *format = mlt_properties_get_int( orig_props, "format" );
+
                        // Set the resolution
                        *width = codec_context->width;
                        *height = codec_context->height;
@@ -1097,162 +1305,53 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                        if ( *height == 1088 && mlt_profile_dar( mlt_service_profile( MLT_PRODUCER_SERVICE( producer ) ) ) == 16.0/9.0 )
                                *height = 1080;
 
-                       // Cache hit
-                       int size = mlt_image_format_size( *format, *width, *height, NULL );
-                       if ( writable )
-                       {
-                               *buffer = mlt_pool_alloc( size );
-                               mlt_frame_set_image( frame, *buffer, size, mlt_pool_release );
-                               memcpy( *buffer, original, size );
-                               mlt_cache_item_close( item );
-                       }
-                       else
-                       {
-                               *buffer = original;
-                               mlt_properties_set_data( frame_properties, "avformat.image_cache", item, 0, ( mlt_destructor )mlt_cache_item_close, NULL );
-                               mlt_frame_set_image( frame, *buffer, size, NULL );
-                       }
-                       self->got_picture = 1;
-
+                       got_picture = 1;
                        goto exit_get_image;
                }
-       }
-       // Cache miss
-       int image_size = 0;
-
-       // Packet
-       AVPacket pkt;
-
-       // Special case pause handling flag
-       int paused = 0;
-
-       // Special case ffwd handling
-       int ignore = 0;
-
-       // We may want to use the source fps if available
-       double source_fps = mlt_properties_get_double( properties, "meta.media.frame_rate_num" ) /
-               mlt_properties_get_double( properties, "meta.media.frame_rate_den" );
-       double fps = mlt_producer_get_fps( producer );
-
-       // This is the physical frame position in the source
-       int req_position = ( int )( position / fps * source_fps + 0.5 );
-
-       // Determines if we have to decode all frames in a sequence
-       // Temporary hack to improve intra frame only
-       int must_decode = strcmp( codec_context->codec->name, "dnxhd" ) &&
-                                 strcmp( codec_context->codec->name, "dvvideo" ) &&
-                                 strcmp( codec_context->codec->name, "huffyuv" ) &&
-                                 strcmp( codec_context->codec->name, "mjpeg" ) &&
-                                 strcmp( codec_context->codec->name, "rawvideo" );
-
-       int last_position = self->last_position;
-
-       // Turn on usage of new seek API and PTS for seeking
-       int use_new_seek = codec_context->codec_id == CODEC_ID_H264 && !strcmp( context->iformat->name, "mpegts" );
-       if ( mlt_properties_get( properties, "new_seek" ) )
-               use_new_seek = mlt_properties_get_int( properties, "new_seek" );
-
-       // Seek if necessary
-       if ( position != self->video_expected || last_position < 0 )
-       {
-               if ( self->av_frame && position + 1 == self->video_expected )
-               {
-                       // We're paused - use last image
-                       paused = 1;
-               }
-               else if ( !self->seekable && position > self->video_expected && ( position - self->video_expected ) < 250 )
-               {
-                       // Fast forward - seeking is inefficient for small distances - just ignore following frames
-                       ignore = ( int )( ( position - self->video_expected ) / fps * source_fps );
-                       codec_context->skip_loop_filter = AVDISCARD_NONREF;
-               }
-               else if ( self->seekable && ( position < self->video_expected || position - self->video_expected >= 12 || last_position < 0 ) )
-               {
-                       if ( use_new_seek && last_position == POSITION_INITIAL )
-                       {
-                               // find first key frame
-                               int ret = 0;
-                               int toscan = 100;
+       }
+       // Cache miss
 
-                               while ( ret >= 0 && toscan-- > 0 )
-                               {
-                                       ret = av_read_frame( context, &pkt );
-                                       if ( ret >= 0 && ( pkt.flags & PKT_FLAG_KEY ) && pkt.stream_index == self->video_index )
-                                       {
-                                               mlt_log_verbose( MLT_PRODUCER_SERVICE(producer), "first_pts %"PRId64" dts %"PRId64" pts_dts_delta %d\n", pkt.pts, pkt.dts, (int)(pkt.pts - pkt.dts) );
-                                               self->first_pts = pkt.pts;
-                                               toscan = 0;
-                                       }
-                                       av_free_packet( &pkt );
-                               }
-                               // Rewind
-                               av_seek_frame( context, -1, 0, AVSEEK_FLAG_BACKWARD );
-                       }
+       // We may want to use the source fps if available
+       double source_fps = mlt_properties_get_double( properties, "meta.media.frame_rate_num" ) /
+               mlt_properties_get_double( properties, "meta.media.frame_rate_den" );
 
-                       // Calculate the timestamp for the requested frame
-                       int64_t timestamp;
-                       if ( use_new_seek )
-                       {
-                               timestamp = ( req_position - 0.1 / source_fps ) /
-                                       ( av_q2d( stream->time_base ) * source_fps );
-                               mlt_log_verbose( MLT_PRODUCER_SERVICE(producer), "pos %d pts %"PRId64" ", req_position, timestamp );
-                               if ( self->first_pts > 0 )
-                                       timestamp += self->first_pts;
-                               else if ( context->start_time != AV_NOPTS_VALUE )
-                                       timestamp += context->start_time;
-                       }
-                       else
-                       {
-                               timestamp = ( int64_t )( ( double )req_position / source_fps * AV_TIME_BASE + 0.5 );
-                               if ( context->start_time != AV_NOPTS_VALUE )
-                                       timestamp += context->start_time;
-                       }
-                       if ( must_decode )
-                               timestamp -= AV_TIME_BASE;
-                       if ( timestamp < 0 )
-                               timestamp = 0;
-                       mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "seeking timestamp %"PRId64" position %d expected %d last_pos %d\n",
-                               timestamp, position, self->video_expected, last_position );
+       // This is the physical frame position in the source
+       int64_t req_position = ( int64_t )( position / mlt_producer_get_fps( producer ) * source_fps + 0.5 );
 
-                       // Seek to the timestamp
-                       if ( use_new_seek )
-                       {
-                               codec_context->skip_loop_filter = AVDISCARD_NONREF;
-                               av_seek_frame( context, self->video_index, timestamp, AVSEEK_FLAG_BACKWARD );
-                       }
-                       else if ( req_position > 0 || last_position <= 0 )
-                       {
-                               av_seek_frame( context, -1, timestamp, AVSEEK_FLAG_BACKWARD );
-                       }
-                       else
-                       {
-                               // Re-open video stream when rewinding to beginning from somewhere else.
-                               // This is rather ugly, and I prefer not to do it this way, but ffmpeg is
-                               // not reliably seeking to the first frame across formats.
-                               reopen_video( self, producer, properties );
-                               context = self->video_format;
-                               stream = context->streams[ self->video_index ];
-                               codec_context = stream->codec;
-                       }
+       // Determines if we have to decode all frames in a sequence
+       // Temporary hack to improve intra frame only
+       int must_decode = !( codec_context->codec && codec_context->codec->name ) || (
+                                 strcmp( codec_context->codec->name, "dnxhd" ) &&
+                                 strcmp( codec_context->codec->name, "dvvideo" ) &&
+                                 strcmp( codec_context->codec->name, "huffyuv" ) &&
+                                 strcmp( codec_context->codec->name, "mjpeg" ) &&
+                                 strcmp( codec_context->codec->name, "rawvideo" ) );
 
-                       // Remove the cached info relating to the previous position
-                       self->current_position = POSITION_INVALID;
-                       self->last_position = POSITION_INVALID;
-                       av_freep( &self->av_frame );
+       double delay = mlt_properties_get_double( properties, "video_delay" );
 
-                       if ( use_new_seek )
-                       {
-                               // flush any pictures still in decode buffer
-                               avcodec_flush_buffers( codec_context );
-                       }
-               }
-       }
+       // Seek if necessary
+       const char *interp = mlt_properties_get( frame_properties, "rescale.interp" );
+       int preseek = must_decode
+#if defined(FFUDIV) && LIBAVFORMAT_VERSION_INT >= ((53<<16)+(24<<8)+2)
+               && ( interp && strcmp( interp, "nearest" ) )
+#endif
+               && codec_context->has_b_frames;
+       int paused = seek_video( self, position, req_position, preseek );
+
+       // Seek might have reopened the file
+       context = self->video_format;
+       stream = context->streams[ self->video_index ];
+       codec_context = stream->codec;
+       if ( *format == mlt_image_none ||
+                       codec_context->pix_fmt == PIX_FMT_ARGB ||
+                       codec_context->pix_fmt == PIX_FMT_RGBA ||
+                       codec_context->pix_fmt == PIX_FMT_ABGR ||
+                       codec_context->pix_fmt == PIX_FMT_BGRA )
+               *format = pick_pix_format( codec_context->pix_fmt );
 
        // Duplicate the last image if necessary
-       if ( self->av_frame && self->av_frame->linesize[0] && self->got_picture && self->seekable
-                && ( paused
-                         || self->current_position == req_position
-                         || ( !use_new_seek && self->current_position > req_position ) ) )
+       if ( self->video_frame && self->video_frame->linesize[0]
+                && ( paused || self->current_position >= req_position ) )
        {
                // Duplicate it
                if ( ( image_size = allocate_buffer( frame, codec_context, buffer, format, width, height ) ) )
@@ -1270,119 +1369,127 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                picture.linesize[0] = codec_context->width;
                                picture.linesize[1] = codec_context->width / 2;
                                picture.linesize[2] = codec_context->width / 2;
-                               convert_image( (AVFrame*) &picture, *buffer,
-                                       PIX_FMT_YUV420P, format, *width, *height, self->colorspace );
+                               convert_image( self, (AVFrame*) &picture, *buffer,
+                                       PIX_FMT_YUV420P, format, *width, *height, &alpha );
                        }
                        else
 #endif
-                       convert_image( self->av_frame, *buffer, codec_context->pix_fmt,
-                               format, *width, *height, self->colorspace );
+                       convert_image( self, self->video_frame, *buffer, codec_context->pix_fmt,
+                               format, *width, *height, &alpha );
+                       got_picture = 1;
                }
-               else
-                       mlt_frame_get_image( frame, buffer, format, width, height, writable );
        }
        else
        {
                int ret = 0;
-               int int_position = 0;
+               int64_t int_position = 0;
                int decode_errors = 0;
-               int got_picture = 0;
-
-               av_init_packet( &pkt );
 
                // Construct an AVFrame for YUV422 conversion
-               if ( !self->av_frame )
-                       self->av_frame = avcodec_alloc_frame( );
+               if ( !self->video_frame )
+                       self->video_frame = avcodec_alloc_frame( );
 
                while( ret >= 0 && !got_picture )
                {
                        // Read a packet
-                       ret = av_read_frame( context, &pkt );
-
-                       // We only deal with video from the selected video_index
-                       if ( ret >= 0 && pkt.stream_index == self->video_index && pkt.size > 0 )
+                       if ( self->pkt.stream_index == self->video_index )
+                               av_free_packet( &self->pkt );
+                       av_init_packet( &self->pkt );
+                       pthread_mutex_lock( &self->packets_mutex );
+                       if ( mlt_deque_count( self->vpackets ) )
                        {
-                               // Determine time code of the packet
-                               if ( use_new_seek )
+                               AVPacket *tmp = (AVPacket*) mlt_deque_pop_front( self->vpackets );
+                               self->pkt = *tmp;
+                               free( tmp );
+                       }
+                       else
+                       {
+                               ret = av_read_frame( context, &self->pkt );
+                               if ( ret >= 0 && !self->seekable && self->pkt.stream_index == self->audio_index )
                                {
-                                       int64_t pts = pkt.pts;
-                                       if ( self->first_pts > 0 )
-                                               pts -= self->first_pts;
-                                       else if ( context->start_time != AV_NOPTS_VALUE )
-                                               pts -= context->start_time;
-                                       int_position = ( int )( av_q2d( stream->time_base ) * pts * source_fps + 0.1 );
-                                       if ( pkt.pts == AV_NOPTS_VALUE )
-                                       {
-                                               self->invalid_pts_counter++;
-                                               if ( self->invalid_pts_counter > 20 )
-                                               {
-                                                       mlt_log_panic( MLT_PRODUCER_SERVICE(producer), "\ainvalid PTS; DISABLING NEW_SEEK!\n" );
-                                                       mlt_properties_set_int( properties, "new_seek", 0 );
-                                                       int_position = req_position;
-                                                       use_new_seek = 0;
-                                               }
-                                       }
-                                       else
+                                       if ( !av_dup_packet( &self->pkt ) )
                                        {
-                                               self->invalid_pts_counter = 0;
+                                               AVPacket *tmp = malloc( sizeof(AVPacket) );
+                                               *tmp = self->pkt;
+                                               mlt_deque_push_back( self->apackets, tmp );
                                        }
-                                       mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "pkt.pts %"PRId64" req_pos %d cur_pos %d pkt_pos %d\n",
-                                               pkt.pts, req_position, self->current_position, int_position );
                                }
-                               else
+                               else if ( ret < 0 )
                                {
-                                       if ( self->seekable && pkt.dts != AV_NOPTS_VALUE )
+                                       mlt_log_verbose( MLT_PRODUCER_SERVICE(producer), "av_read_frame returned error %d inside get_image\n", ret );
+                                       if ( !self->seekable && mlt_properties_get_int( properties, "reconnect" ) )
                                        {
-                                               int_position = ( int )( av_q2d( stream->time_base ) * pkt.dts * source_fps + 0.5 );
-                                               if ( context->start_time != AV_NOPTS_VALUE )
-                                                       int_position -= ( int )( context->start_time * source_fps / AV_TIME_BASE + 0.5 );
-                                               last_position = self->last_position;
-                                               if ( int_position == last_position )
-                                                       int_position = last_position + 1;
+                                               // Try to reconnect to live sources by closing context and codecs,
+                                               // and letting next call to get_frame() reopen.
+                                               prepare_reopen( self );
+                                               pthread_mutex_unlock( &self->packets_mutex );
+                                               goto exit_get_image;
                                        }
-                                       else
-                                       {
-                                               int_position = req_position;
-                                       }
-                                       mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "pkt.dts %"PRId64" req_pos %d cur_pos %d pkt_pos %d\n",
-                                               pkt.dts, req_position, self->current_position, int_position );
-                                       // Make a dumb assumption on streams that contain wild timestamps
-                                       if ( abs( req_position - int_position ) > 999 )
+                                       if ( !self->seekable && mlt_properties_get_int( properties, "exit_on_disconnect" ) )
                                        {
-                                               int_position = req_position;
-                                               mlt_log_debug( MLT_PRODUCER_SERVICE(producer), " WILD TIMESTAMP!" );
+                                               mlt_log_fatal( MLT_PRODUCER_SERVICE(producer), "Exiting with error due to disconnected source.\n" );
+                                               exit( EXIT_FAILURE );
                                        }
                                }
+                       }
+                       pthread_mutex_unlock( &self->packets_mutex );
+
+                       // We only deal with video from the selected video_index
+                       if ( ret >= 0 && self->pkt.stream_index == self->video_index && self->pkt.size > 0 )
+                       {
+                               int64_t pts = best_pts( self, self->pkt.pts, self->pkt.dts );
+                               if ( pts != AV_NOPTS_VALUE )
+                               {
+                                       if ( !self->seekable && self->first_pts == AV_NOPTS_VALUE )
+                                               self->first_pts = pts;
+                                       if ( self->first_pts != AV_NOPTS_VALUE )
+                                               pts -= self->first_pts;
+                                       else if ( context->start_time != AV_NOPTS_VALUE )
+                                               pts -= context->start_time;
+                                       int_position = ( int64_t )( ( av_q2d( self->video_time_base ) * pts + delay ) * source_fps + 0.5 );
+                                       if ( int_position == self->last_position )
+                                               int_position = self->last_position + 1;
+                               }
+                               mlt_log_debug( MLT_PRODUCER_SERVICE(producer),
+                                       "V pkt.pts %"PRId64" pkt.dts %"PRId64" req_pos %"PRId64" cur_pos %"PRId64" pkt_pos %"PRId64"\n",
+                                       self->pkt.pts, self->pkt.dts, req_position, self->current_position, int_position );
+
+                               // Make a dumb assumption on streams that contain wild timestamps
+                               if ( abs( req_position - int_position ) > 999 )
+                               {
+                                       int_position = req_position;
+                                       mlt_log_warning( MLT_PRODUCER_SERVICE(producer), " WILD TIMESTAMP!\n" );
+                               }
                                self->last_position = int_position;
 
                                // Decode the image
                                if ( must_decode || int_position >= req_position )
                                {
 #ifdef VDPAU
-                                       if ( g_vdpau && self->vdpau )
+                                       if ( self->vdpau )
                                        {
-                                               if ( g_vdpau->producer != self )
+                                               if ( self->vdpau->decoder == VDP_INVALID_HANDLE )
                                                {
-                                                       vdpau_decoder_close();
                                                        vdpau_decoder_init( self );
                                                }
-                                               if ( self->vdpau )
-                                                       self->vdpau->is_decoded = 0;
+                                               self->vdpau->is_decoded = 0;
                                        }
 #endif
-                                       codec_context->reordered_opaque = pkt.pts;
+                                       codec_context->reordered_opaque = int_position;
                                        if ( int_position >= req_position )
                                                codec_context->skip_loop_filter = AVDISCARD_NONE;
 #if (LIBAVCODEC_VERSION_INT >= ((52<<16)+(26<<8)+0))
-                                       ret = avcodec_decode_video2( codec_context, self->av_frame, &got_picture, &pkt );
+                                       ret = avcodec_decode_video2( codec_context, self->video_frame, &got_picture, &self->pkt );
 #else
-                                       ret = avcodec_decode_video( codec_context, self->av_frame, &got_picture, pkt.data, pkt.size );
+                                       ret = avcodec_decode_video( codec_context, self->video_frame, &got_picture, self->pkt.data, self->pkt.size );
 #endif
                                        // Note: decode may fail at the beginning of MPEGfile (B-frames referencing before first I-frame), so allow a few errors.
                                        if ( ret < 0 )
                                        {
                                                if ( ++decode_errors <= 10 )
                                                        ret = 0;
+                                               else
+                                                       mlt_log_warning( MLT_PRODUCER_SERVICE(producer), "video decoding error %d\n", ret );
                                        }
                                        else
                                        {
@@ -1392,34 +1499,26 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
 
                                if ( got_picture )
                                {
-                                       if ( use_new_seek )
+                                       // Get position of reordered frame
+                                       int_position = self->video_frame->reordered_opaque;
+#if (LIBAVCODEC_VERSION_INT >= ((52<<16)+(106<<8)+0))
+                                       pts = best_pts( self, self->video_frame->pkt_pts, self->video_frame->pkt_dts );
+                                       if ( pts != AV_NOPTS_VALUE )
                                        {
-                                               // Determine time code of the packet
-                                               int64_t pts = self->av_frame->reordered_opaque;
-                                               if ( self->first_pts > 0 )
+                                               if ( self->first_pts != AV_NOPTS_VALUE )
                                                        pts -= self->first_pts;
                                                else if ( context->start_time != AV_NOPTS_VALUE )
                                                        pts -= context->start_time;
-                                               int_position = ( int )( av_q2d( stream->time_base) * pts * source_fps + 0.1 );
-                                               mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "got frame %d, key %d\n", int_position, self->av_frame->key_frame );
+                                               int_position = ( int64_t )( ( av_q2d( self->video_time_base ) * pts + delay ) * source_fps + 0.5 );
                                        }
-                                       // Handle ignore
+#endif
+
                                        if ( int_position < req_position )
-                                       {
-                                               ignore = 0;
                                                got_picture = 0;
-                                       }
                                        else if ( int_position >= req_position )
-                                       {
-                                               ignore = 0;
                                                codec_context->skip_loop_filter = AVDISCARD_NONE;
-                                       }
-                                       else if ( ignore -- )
-                                       {
-                                               got_picture = 0;
-                                       }
                                }
-                               mlt_log_debug( MLT_PRODUCER_SERVICE(producer), " got_pic %d key %d\n", got_picture, pkt.flags & PKT_FLAG_KEY );
+                               mlt_log_debug( MLT_PRODUCER_SERVICE(producer), " got_pic %d key %d\n", got_picture, self->pkt.flags & PKT_FLAG_KEY );
                        }
 
                        // Now handle the picture if we have one
@@ -1435,25 +1534,25 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                        {
                                                if ( self->vdpau->is_decoded )
                                                {
-                                                       struct vdpau_render_state *render = (struct vdpau_render_state*) self->av_frame->data[0];
+                                                       struct vdpau_render_state *render = (struct vdpau_render_state*) self->video_frame->data[0];
                                                        void *planes[3];
                                                        uint32_t pitches[3];
                                                        VdpYCbCrFormat dest_format = VDP_YCBCR_FORMAT_YV12;
                                                        
                                                        if ( !self->vdpau->buffer )
                                                                self->vdpau->buffer = mlt_pool_alloc( codec_context->width * codec_context->height * 3 / 2 );
-                                                       self->av_frame->data[0] = planes[0] = self->vdpau->buffer;
-                                                       self->av_frame->data[2] = planes[1] = self->vdpau->buffer + codec_context->width * codec_context->height;
-                                                       self->av_frame->data[1] = planes[2] = self->vdpau->buffer + codec_context->width * codec_context->height * 5 / 4;
-                                                       self->av_frame->linesize[0] = pitches[0] = codec_context->width;
-                                                       self->av_frame->linesize[1] = pitches[1] = codec_context->width / 2;
-                                                       self->av_frame->linesize[2] = pitches[2] = codec_context->width / 2;
+                                                       self->video_frame->data[0] = planes[0] = self->vdpau->buffer;
+                                                       self->video_frame->data[2] = planes[1] = self->vdpau->buffer + codec_context->width * codec_context->height;
+                                                       self->video_frame->data[1] = planes[2] = self->vdpau->buffer + codec_context->width * codec_context->height * 5 / 4;
+                                                       self->video_frame->linesize[0] = pitches[0] = codec_context->width;
+                                                       self->video_frame->linesize[1] = pitches[1] = codec_context->width / 2;
+                                                       self->video_frame->linesize[2] = pitches[2] = codec_context->width / 2;
 
                                                        VdpStatus status = vdp_surface_get_bits( render->surface, dest_format, planes, pitches );
                                                        if ( status == VDP_STATUS_OK )
                                                        {
-                                                               convert_image( self->av_frame, *buffer, PIX_FMT_YUV420P,
-                                                                       format, *width, *height, self->colorspace );
+                                                               convert_image( self, self->video_frame, *buffer, PIX_FMT_YUV420P,
+                                                                       format, *width, *height, &alpha );
                                                        }
                                                        else
                                                        {
@@ -1469,31 +1568,38 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                        }
                                        else
 #endif
-                                       convert_image( self->av_frame, *buffer, codec_context->pix_fmt,
-                                               format, *width, *height, self->colorspace );
-                                       self->top_field_first |= self->av_frame->top_field_first;
+                                       convert_image( self, self->video_frame, *buffer, codec_context->pix_fmt,
+                                               format, *width, *height, &alpha );
+                                       self->top_field_first |= self->video_frame->top_field_first;
                                        self->current_position = int_position;
-                                       self->got_picture = 1;
                                }
                                else
                                {
                                        got_picture = 0;
                                }
                        }
-                       if ( ret >= 0 )
-                               av_free_packet( &pkt );
+
+                       // Free packet data if not video and not live audio packet
+                       if ( self->pkt.stream_index != self->video_index &&
+                                !( !self->seekable && self->pkt.stream_index == self->audio_index ) )
+                               av_free_packet( &self->pkt );
                }
        }
 
-       if ( self->got_picture && image_size > 0 && self->image_cache )
+       // set alpha
+       if ( alpha )
+               mlt_frame_set_alpha( frame, alpha, (*width) * (*height), mlt_pool_release );
+
+       if ( image_size > 0 && self->image_cache )
        {
-               // Copy buffer to image cache   
-               uint8_t *image = mlt_pool_alloc( image_size );
-               memcpy( image, *buffer, image_size );
-               mlt_cache_put( self->image_cache, (void*) position, image, *format, mlt_pool_release );
+               mlt_properties_set_int( frame_properties, "format", *format );
+               mlt_cache_put_frame( self->image_cache, frame );
        }
+
        // Try to duplicate last image if there was a decoding failure
-       else if ( !image_size && self->av_frame && self->av_frame->linesize[0] )
+       // TODO: with multithread decoding a partial frame decoding resulting
+       // in failure also resets av_frame making test below fail.
+       if ( !image_size && self->video_frame && self->video_frame->linesize[0] )
        {
                // Duplicate it
                if ( ( image_size = allocate_buffer( frame, codec_context, buffer, format, width, height ) ) )
@@ -1511,17 +1617,15 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                picture.linesize[0] = codec_context->width;
                                picture.linesize[1] = codec_context->width / 2;
                                picture.linesize[2] = codec_context->width / 2;
-                               convert_image( (AVFrame*) &picture, *buffer,
-                                       PIX_FMT_YUV420P, format, *width, *height, self->colorspace );
+                               convert_image( self, (AVFrame*) &picture, *buffer,
+                                       PIX_FMT_YUV420P, format, *width, *height, &alpha );
                        }
                        else
 #endif
-                       convert_image( self->av_frame, *buffer, codec_context->pix_fmt,
-                               format, *width, *height, self->colorspace );
-                       self->got_picture = 1;
+                       convert_image( self, self->video_frame, *buffer, codec_context->pix_fmt,
+                               format, *width, *height, &alpha );
+                       got_picture = 1;
                }
-               else
-                       mlt_frame_get_image( frame, buffer, format, width, height, writable );
        }
 
        // Regardless of speed, we expect to get the next frame (cos we ain't too bright)
@@ -1534,8 +1638,8 @@ exit_get_image:
        // Set the progressive flag
        if ( mlt_properties_get( properties, "force_progressive" ) )
                mlt_properties_set_int( frame_properties, "progressive", !!mlt_properties_get_int( properties, "force_progressive" ) );
-       else if ( self->av_frame )
-               mlt_properties_set_int( frame_properties, "progressive", !self->av_frame->interlaced_frame );
+       else if ( self->video_frame )
+               mlt_properties_set_int( frame_properties, "progressive", !self->video_frame->interlaced_frame );
 
        // Set the field order property for this frame
        if ( mlt_properties_get( properties, "force_tff" ) )
@@ -1549,7 +1653,12 @@ exit_get_image:
        mlt_properties_set_int( properties, "meta.media.progressive", mlt_properties_get_int( frame_properties, "progressive" ) );
        mlt_service_unlock( MLT_PRODUCER_SERVICE( producer ) );
 
-       return !self->got_picture;
+       // If we already have RGB, then the full range processing either happened already
+       // or does not apply (RGB source).
+       if ( *format == mlt_image_rgb24 || *format == mlt_image_rgb24a || *format == mlt_image_opengl )
+               mlt_properties_set( frame_properties, "force_full_luma", NULL );
+
+       return !got_picture;
 }
 
 /** Process properties as AVOptions and apply to AV context obj
@@ -1562,11 +1671,17 @@ static void apply_properties( void *obj, mlt_properties properties, int flags )
        for ( i = 0; i < count; i++ )
        {
                const char *opt_name = mlt_properties_get_name( properties, i );
+#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(10<<8)+0)
+               const AVOption *opt = av_opt_find( obj, opt_name, NULL, flags, flags );
+#else
                const AVOption *opt = av_find_opt( obj, opt_name, NULL, flags, flags );
+#endif
                if ( opt_name && mlt_properties_get( properties, opt_name ) )
                {
                        if ( opt )
-#if LIBAVCODEC_VERSION_INT >= ((52<<16)+(7<<8)+0)
+#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(12<<8)+0)
+                               av_opt_set( obj, opt_name, mlt_properties_get( properties, opt_name), 0 );
+#elif LIBAVCODEC_VERSION_INT >= ((52<<16)+(7<<8)+0)
                                av_set_string3( obj, opt_name, mlt_properties_get( properties, opt_name), 0, NULL );
 #elif LIBAVCODEC_VERSION_INT >= ((51<<16)+(59<<8)+0)
                                av_set_string2( obj, opt_name, mlt_properties_get( properties, opt_name), 0 );
@@ -1594,7 +1709,7 @@ static int video_codec_init( producer_avformat self, int index, mlt_properties p
                // Find the codec
                AVCodec *codec = avcodec_find_decoder( codec_context->codec_id );
 #ifdef VDPAU
-               if ( codec_context->codec_id == CODEC_ID_H264 )
+               if ( codec_context->codec_id == AV_CODEC_ID_H264 )
                {
                        if ( ( codec = avcodec_find_decoder_by_name( "h264_vdpau" ) ) )
                        {
@@ -1602,7 +1717,7 @@ static int video_codec_init( producer_avformat self, int index, mlt_properties p
                                {
                                        self->video_codec = codec_context;
                                        if ( !vdpau_decoder_init( self ) )
-                                               vdpau_decoder_close();
+                                               vdpau_fini( self );
                                }
                        }
                        if ( !self->vdpau )
@@ -1618,8 +1733,12 @@ static int video_codec_init( producer_avformat self, int index, mlt_properties p
                        codec_context->thread_count = thread_count;
 
                // If we don't have a codec and we can't initialise it, we can't do much more...
-               avformat_lock( );
+               pthread_mutex_lock( &self->open_mutex );
+#if LIBAVCODEC_VERSION_INT >= ((53<<16)+(8<<8)+0)
+               if ( codec && avcodec_open2( codec_context, codec, NULL ) >= 0 )
+#else
                if ( codec && avcodec_open( codec_context, codec ) >= 0 )
+#endif
                {
                        // Now store the codec with its destructor
                        self->video_codec = codec_context;
@@ -1628,109 +1747,131 @@ static int video_codec_init( producer_avformat self, int index, mlt_properties p
                {
                        // Remember that we can't use this later
                        self->video_index = -1;
-                       avformat_unlock( );
+                       pthread_mutex_unlock( &self->open_mutex );
                        return 0;
                }
-               avformat_unlock( );
+               pthread_mutex_unlock( &self->open_mutex );
 
                // Process properties as AVOptions
                apply_properties( codec_context, properties, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
-#if LIBAVCODEC_VERSION_MAJOR > 52
+#if LIBAVCODEC_VERSION_INT >= ((52<<16)+(122<<8)+0)
                if ( codec->priv_class && codec_context->priv_data )
                        apply_properties( codec_context->priv_data, properties, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
 #endif
 
                // Reset some image properties
-               mlt_properties_set_int( properties, "width", self->video_codec->width );
-               mlt_properties_set_int( properties, "height", self->video_codec->height );
-               // For DV, we'll just use the saved aspect ratio
-               if ( codec_context->codec_id != CODEC_ID_DVVIDEO )
-                       get_aspect_ratio( properties, stream, self->video_codec, NULL );
-
-               // Determine the fps first from the codec
-               double source_fps = (double) self->video_codec->time_base.den /
-                                                                  ( self->video_codec->time_base.num == 0 ? 1 : self->video_codec->time_base.num );
-               
-               if ( mlt_properties_get( properties, "force_fps" ) )
+               if ( self->video_codec )
                {
-                       source_fps = mlt_properties_get_double( properties, "force_fps" );
-                       stream->time_base = av_d2q( source_fps, 1024 );
-                       mlt_properties_set_int( properties, "meta.media.frame_rate_num", stream->time_base.num );
-                       mlt_properties_set_int( properties, "meta.media.frame_rate_den", stream->time_base.den );
+                       mlt_properties_set_int( properties, "width", self->video_codec->width );
+                       mlt_properties_set_int( properties, "height", self->video_codec->height );
+                       get_aspect_ratio( properties, stream, self->video_codec );
                }
-               else
-               {
-                       // If the muxer reports a frame rate different than the codec
+
+               // Start with the muxer frame rate.
 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(42<<8)+0)
-                       double muxer_fps = av_q2d( stream->avg_frame_rate );
-                       if ( isnan( muxer_fps ) || muxer_fps == 0 )
-                               muxer_fps = av_q2d( stream->r_frame_rate );
+               AVRational frame_rate = stream->avg_frame_rate;
 #else
-                       double muxer_fps = av_q2d( stream->r_frame_rate );
+               AVRational frame_rate = stream->r_frame_rate;
 #endif
-                       // Choose the lesser - the wrong tends to be off by some multiple of 10
-                       source_fps = FFMIN( source_fps, muxer_fps );
-                       if ( source_fps >= 1.0 && ( source_fps < muxer_fps || isnan( muxer_fps ) ) )
-                       {
-                               mlt_properties_set_int( properties, "meta.media.frame_rate_num", self->video_codec->time_base.den );
-                               mlt_properties_set_int( properties, "meta.media.frame_rate_den", self->video_codec->time_base.num == 0 ? 1 : self->video_codec->time_base.num );
-                       }
-                       else if ( muxer_fps > 0 )
-                       {
-                               AVRational frame_rate = stream->r_frame_rate;
-                               // With my samples when r_frame_rate != 1000 but avg_frame_rate is valid,
-                               // avg_frame_rate gives some approximate value that does not well match the media.
-                               // Also, on my sample where r_frame_rate = 1000, using avg_frame_rate directly
-                               // results in some very choppy output, but some value slightly different works
-                               // great.
-#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(42<<8)+0)
-                               if ( av_q2d( stream->r_frame_rate ) >= 1000 && av_q2d( stream->avg_frame_rate ) > 0 )
-                                       frame_rate = av_d2q( av_q2d( stream->avg_frame_rate ), 1024 );
+               double fps = av_q2d( frame_rate );
+
+#if LIBAVFORMAT_VERSION_MAJOR < 55
+               // Verify and sanitize the muxer frame rate.
+               if ( isnan( fps ) || isinf( fps ) || fps == 0 )
+               {
+                       frame_rate = stream->r_frame_rate;
+                       fps = av_q2d( frame_rate );
+               }
 #endif
-                               mlt_properties_set_int( properties, "meta.media.frame_rate_num", frame_rate.num );
-                               mlt_properties_set_int( properties, "meta.media.frame_rate_den", frame_rate.den );
-                       }
-                       else
-                       {
-                               source_fps = mlt_producer_get_fps( self->parent );
-                               AVRational frame_rate = av_d2q( source_fps, 255 );
-                               mlt_properties_set_int( properties, "meta.media.frame_rate_num", frame_rate.num );
-                               mlt_properties_set_int( properties, "meta.media.frame_rate_den", frame_rate.den );
-                       }
+#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(42<<8)+0) && LIBAVFORMAT_VERSION_MAJOR < 55
+               // With my samples when r_frame_rate != 1000 but avg_frame_rate is valid,
+               // avg_frame_rate gives some approximate value that does not well match the media.
+               // Also, on my sample where r_frame_rate = 1000, using avg_frame_rate directly
+               // results in some very choppy output, but some value slightly different works
+               // great.
+               if ( av_q2d( stream->r_frame_rate ) >= 1000 && av_q2d( stream->avg_frame_rate ) > 0 )
+               {
+                       frame_rate = av_d2q( av_q2d( stream->avg_frame_rate ), 1024 );
+                       fps = av_q2d( frame_rate );
+               }
+#endif
+               // XXX frame rates less than 1 fps are not considered sane
+               if ( isnan( fps ) || isinf( fps ) || fps < 1.0 )
+               {
+                       // Get the frame rate from the codec.
+                       frame_rate.num = self->video_codec->time_base.den;
+                       frame_rate.den = self->video_codec->time_base.num * self->video_codec->ticks_per_frame;
+                       fps = av_q2d( frame_rate );
+               }
+               if ( isnan( fps ) || isinf( fps ) || fps < 1.0 )
+               {
+                       // Use the profile frame rate if all else fails.
+                       mlt_profile profile = mlt_service_profile( MLT_PRODUCER_SERVICE( self->parent ) );
+                       frame_rate.num = profile->frame_rate_num;
+                       frame_rate.den = profile->frame_rate_den;
                }
 
-               // source_fps is deprecated in favor of meta.media.frame_rate_num and .frame_rate_den
-               if ( source_fps > 0 )
-                       mlt_properties_set_double( properties, "source_fps", source_fps );
-               else
-                       mlt_properties_set_double( properties, "source_fps", mlt_producer_get_fps( self->parent ) );
+               self->video_time_base = stream->time_base;
+               if ( mlt_properties_get( properties, "force_fps" ) )
+               {
+                       AVRational force_fps = av_d2q( mlt_properties_get_double( properties, "force_fps" ), 1024 );
+                       self->video_time_base = av_mul_q( stream->time_base, av_div_q( frame_rate, force_fps ) );
+                       frame_rate = force_fps;
+               }
+               mlt_properties_set_int( properties, "meta.media.frame_rate_num", frame_rate.num );
+               mlt_properties_set_int( properties, "meta.media.frame_rate_den", frame_rate.den );
 
                // Set the YUV colorspace from override or detect
-               self->colorspace = mlt_properties_get_int( properties, "force_colorspace" );
-#if LIBAVCODEC_VERSION_INT > ((52<<16)+(28<<8)+0)              
-               if ( ! self->colorspace )
+               self->yuv_colorspace = mlt_properties_get_int( properties, "force_colorspace" );
+#if LIBAVCODEC_VERSION_INT > ((52<<16)+(28<<8)+0)
+               if ( ! self->yuv_colorspace )
                {
                        switch ( self->video_codec->colorspace )
                        {
                        case AVCOL_SPC_SMPTE240M:
-                               self->colorspace = 240;
+                               self->yuv_colorspace = 240;
                                break;
                        case AVCOL_SPC_BT470BG:
                        case AVCOL_SPC_SMPTE170M:
-                               self->colorspace = 601;
+                               self->yuv_colorspace = 601;
                                break;
                        case AVCOL_SPC_BT709:
-                               self->colorspace = 709;
+                               self->yuv_colorspace = 709;
                                break;
                        default:
                                // This is a heuristic Charles Poynton suggests in "Digital Video and HDTV"
-                               self->colorspace = self->video_codec->width * self->video_codec->height > 750000 ? 709 : 601;
+                               self->yuv_colorspace = self->video_codec->width * self->video_codec->height > 750000 ? 709 : 601;
                                break;
                        }
                }
 #endif
                // Let apps get chosen colorspace
-               mlt_properties_set_int( properties, "meta.media.colorspace", self->colorspace );
+               mlt_properties_set_int( properties, "meta.media.colorspace", self->yuv_colorspace );
+
+               switch ( self->video_codec->color_primaries )
+               {
+               case AVCOL_PRI_BT470BG:
+                       self->color_primaries = 601625;
+                       break;
+               case AVCOL_PRI_SMPTE170M:
+               case AVCOL_PRI_SMPTE240M:
+                       self->color_primaries = 601525;
+                       break;
+               case AVCOL_PRI_BT709:
+               case AVCOL_PRI_UNSPECIFIED:
+               default:
+                       self->color_primaries = 709;
+                       break;
+               }
+
+               self->full_luma = -1;
+#if LIBAVCODEC_VERSION_INT >= ((52<<16)+(72<<8)+2)
+               mlt_log_debug( MLT_PRODUCER_SERVICE(self->parent), "color_range %d\n", codec_context->color_range );
+               if ( codec_context->color_range == AVCOL_RANGE_JPEG )
+                       self->full_luma = 1;
+#endif
+               if ( mlt_properties_get( properties, "set.force_full_luma" ) )
+                       self->full_luma = mlt_properties_get_int( properties, "set.force_full_luma" );
        }
        return self->video_codec && self->video_index > -1;
 }
@@ -1752,31 +1893,16 @@ static void producer_set_up_video( producer_avformat self, mlt_frame frame )
        // Get the video_index
        int index = mlt_properties_get_int( properties, "video_index" );
 
+       int unlock_needed = 0;
+
        // Reopen the file if necessary
        if ( !context && index > -1 )
        {
-               mlt_events_block( properties, producer );
+               unlock_needed = 1;
+               pthread_mutex_lock( &self->video_mutex );
                producer_open( self, mlt_service_profile( MLT_PRODUCER_SERVICE(producer) ),
-                       mlt_properties_get( properties, "resource" ) );
+                       mlt_properties_get( properties, "resource" ), 0 );
                context = self->video_format;
-               if ( self->dummy_context )
-               {
-                       av_close_input_file( self->dummy_context );
-                       self->dummy_context = NULL;
-               }
-               mlt_events_unblock( properties, producer );
-               if ( self->audio_format && !self->audio_streams )
-                       get_audio_streams_info( self );
-
-               // Process properties as AVOptions
-               if ( context )
-               {
-                       apply_properties( context, properties, AV_OPT_FLAG_DECODING_PARAM );
-#if LIBAVFORMAT_VERSION_MAJOR > 52
-                       if ( context->iformat && context->iformat->priv_class && context->priv_data )
-                               apply_properties( context->priv_data, properties, AV_OPT_FLAG_DECODING_PARAM );
-#endif
-               }
        }
 
        // Exception handling for video_index
@@ -1800,13 +1926,11 @@ static void producer_set_up_video( producer_avformat self, mlt_frame frame )
        {
                // Reset the video properties if the index changed
                self->video_index = index;
+               pthread_mutex_lock( &self->open_mutex );
                if ( self->video_codec )
-               {
-                       avformat_lock();
                        avcodec_close( self->video_codec );
-                       avformat_unlock();
-               }
                self->video_codec = NULL;
+               pthread_mutex_unlock( &self->open_mutex );
        }
 
        // Get the frame properties
@@ -1823,19 +1947,17 @@ static void producer_set_up_video( producer_avformat self, mlt_frame frame )
                // Set the width and height
                mlt_properties_set_int( frame_properties, "width", self->video_codec->width );
                mlt_properties_set_int( frame_properties, "height", self->video_codec->height );
-               // real_width and real_height are deprecated in favor of meta.media.width and .height
                mlt_properties_set_int( properties, "meta.media.width", self->video_codec->width );
                mlt_properties_set_int( properties, "meta.media.height", self->video_codec->height );
-               mlt_properties_set_int( frame_properties, "real_width", self->video_codec->width );
-               mlt_properties_set_int( frame_properties, "real_height", self->video_codec->height );
                mlt_properties_set_double( frame_properties, "aspect_ratio", aspect_ratio );
-               mlt_properties_set_int( frame_properties, "colorspace", self->colorspace );
+               mlt_properties_set_int( frame_properties, "colorspace", self->yuv_colorspace );
+               mlt_properties_set_int( frame_properties, "color_primaries", self->color_primaries );
+               mlt_properties_set_int( frame_properties, "full_luma", self->full_luma );
 
                // Workaround 1088 encodings missing cropping info.
                if ( self->video_codec->height == 1088 && mlt_profile_dar( mlt_service_profile( MLT_PRODUCER_SERVICE( producer ) ) ) == 16.0/9.0 )
                {
                        mlt_properties_set_int( properties, "meta.media.height", 1080 );
-                       mlt_properties_set_int( frame_properties, "real_height", 1080 );
                }
 
                // Add our image operation
@@ -1847,25 +1969,31 @@ static void producer_set_up_video( producer_avformat self, mlt_frame frame )
                // If something failed, use test card image
                mlt_properties_set_int( frame_properties, "test_image", 1 );
        }
+       if ( unlock_needed )
+               pthread_mutex_unlock( &self->video_mutex );
 }
 
-static int seek_audio( producer_avformat self, mlt_position position, double timecode, int *ignore )
+static int seek_audio( producer_avformat self, mlt_position position, double timecode )
 {
        int paused = 0;
 
        // Seek if necessary
-       if ( position != self->audio_expected )
+       if ( self->seekable && ( position != self->audio_expected || self->last_position < 0 ) )
        {
+               if ( self->last_position == POSITION_INITIAL )
+               {
+                       int video_index = self->video_index;
+                       if ( video_index == -1 )
+                               video_index = first_video_index( self );
+                       if ( video_index >= 0 )
+                               find_first_pts( self, video_index );
+               }
+
                if ( position + 1 == self->audio_expected )
                {
                        // We're paused - silence required
                        paused = 1;
                }
-               else if ( !self->seekable && position > self->audio_expected && ( position - self->audio_expected ) < 250 )
-               {
-                       // Fast forward - seeking is inefficient for small distances - just ignore following frames
-                       *ignore = position - self->audio_expected;
-               }
                else if ( position < self->audio_expected || position - self->audio_expected >= 12 )
                {
                        AVFormatContext *context = self->audio_format;
@@ -1890,13 +2018,43 @@ static int seek_audio( producer_avformat self, mlt_position position, double tim
 
 static int sample_bytes( AVCodecContext *context )
 {
-#if LIBAVCODEC_VERSION_MAJOR > 52
+#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(8<<8)+0)
+       return av_get_bytes_per_sample( context->sample_fmt );
+#elif LIBAVCODEC_VERSION_MAJOR >= 53
        return av_get_bits_per_sample_fmt( context->sample_fmt ) / 8;
 #else
        return av_get_bits_per_sample_format( context->sample_fmt ) / 8;
 #endif
 }
 
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+static void planar_to_interleaved( uint8_t *dest, AVFrame *src, int samples, int channels, int bytes_per_sample )
+{
+       int s, c;
+       for ( s = 0; s < samples; s++ )
+       {
+               for ( c = 0; c < channels; c++ )
+               {
+                       memcpy( dest, &src->data[c][s * bytes_per_sample], bytes_per_sample );
+                       dest += bytes_per_sample;
+               }
+       }
+}
+#else
+static void planar_to_interleaved( uint8_t *dest, uint8_t *src, int samples, int channels, int bytes_per_sample )
+{
+       int s, c;
+       for ( s = 0; s < samples; s++ )
+       {
+               for ( c = 0; c < channels; c++ )
+               {
+                       memcpy( dest, src + ( c * samples + s ) * bytes_per_sample, bytes_per_sample );
+                       dest += bytes_per_sample;
+               }
+       }
+}
+#endif
+
 static int decode_audio( producer_avformat self, int *ignore, AVPacket pkt, int channels, int samples, double timecode, double fps )
 {
        // Fetch the audio_format
@@ -1908,9 +2066,6 @@ static int decode_audio( producer_avformat self, int *ignore, AVPacket pkt, int
        // Get codec context
        AVCodecContext *codec_context = self->audio_codec[ index ];
 
-       // Obtain the resample context if it exists (not always needed)
-       ReSampleContext *resample = self->audio_resample[ index ];
-
        // Obtain the audio buffers
        uint8_t *audio_buffer = self->audio_buffer[ index ];
        uint8_t *decode_buffer = self->decode_buffer[ index ];
@@ -1922,16 +2077,23 @@ static int decode_audio( producer_avformat self, int *ignore, AVPacket pkt, int
 
        while ( ptr && ret >= 0 && len > 0 )
        {
-               int sizeof_sample = resample? sizeof( int16_t ) : sample_bytes( codec_context );
+               int sizeof_sample = sample_bytes( codec_context );
                int data_size = self->audio_buffer_size[ index ];
 
                // Decode the audio
-#if (LIBAVCODEC_VERSION_INT >= ((52<<16)+(26<<8)+0))
-               ret = avcodec_decode_audio3( codec_context, (int16_t*) decode_buffer, &data_size, &pkt );
-#elif (LIBAVCODEC_VERSION_INT >= ((51<<16)+(29<<8)+0))
-               ret = avcodec_decode_audio2( codec_context, decode_buffer, &data_size, ptr, len );
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+               if ( !self->audio_frame )
+                       self->audio_frame = avcodec_alloc_frame();
+               else
+                       avcodec_get_frame_defaults( self->audio_frame );
+               ret = avcodec_decode_audio4( codec_context, self->audio_frame, &data_size, &pkt );
+               if ( data_size ) {
+                       data_size = av_samples_get_buffer_size( NULL, codec_context->channels,
+                               self->audio_frame->nb_samples, codec_context->sample_fmt, 1 );
+                       decode_buffer = self->audio_frame->data[0];
+               }
 #else
-               ret = avcodec_decode_audio( codec_context, decode_buffer, &data_size, ptr, len );
+               ret = avcodec_decode_audio3( codec_context, (int16_t*) decode_buffer, &data_size, &pkt );
 #endif
                if ( ret < 0 )
                {
@@ -1947,57 +2109,74 @@ static int decode_audio( producer_avformat self, int *ignore, AVPacket pkt, int
                {
                        // Figure out how many samples will be needed after resampling
                        int convert_samples = data_size / codec_context->channels / sample_bytes( codec_context );
-                       int samples_needed = self->resample_factor * convert_samples;
 
                        // Resize audio buffer to prevent overflow
-                       if ( ( audio_used + samples_needed ) * channels * sizeof_sample > self->audio_buffer_size[ index ] )
+                       if ( ( audio_used + convert_samples ) * channels * sizeof_sample > self->audio_buffer_size[ index ] )
                        {
-                               self->audio_buffer_size[ index ] = ( audio_used + samples_needed * 2 ) * channels * sizeof_sample;
+                               self->audio_buffer_size[ index ] = ( audio_used + convert_samples * 2 ) * channels * sizeof_sample;
                                audio_buffer = self->audio_buffer[ index ] = mlt_pool_realloc( audio_buffer, self->audio_buffer_size[ index ] );
                        }
-                       if ( resample )
-                       {
-                               // Copy to audio buffer while resampling
-                               uint8_t *source = decode_buffer;
-                               uint8_t *dest = &audio_buffer[ audio_used * channels * sizeof_sample ];
-                               audio_used += audio_resample( resample, (short*) dest, (short*) source, convert_samples );
-                       }
-                       else
+                       uint8_t *dest = &audio_buffer[ audio_used * codec_context->channels * sizeof_sample ];
+                       switch ( codec_context->sample_fmt )
                        {
+#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(17<<8)+0)
+                       case AV_SAMPLE_FMT_U8P:
+                       case AV_SAMPLE_FMT_S16P:
+                       case AV_SAMPLE_FMT_S32P:
+                       case AV_SAMPLE_FMT_FLTP:
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+                               planar_to_interleaved( dest, self->audio_frame, convert_samples, codec_context->channels, sizeof_sample );
+#else
+                               planar_to_interleaved( dest, decode_buffer, convert_samples, codec_context->channels, sizeof_sample );
+#endif
+                               break;
+#endif
+                       default:
                                // Straight copy to audio buffer
-                               memcpy( &audio_buffer[ audio_used * codec_context->channels * sizeof_sample ], decode_buffer, data_size );
-                               audio_used += convert_samples;
+                               memcpy( dest, decode_buffer, data_size );
                        }
+                       audio_used += convert_samples;
 
                        // Handle ignore
-                       while ( *ignore && audio_used > samples )
+                       while ( *ignore && audio_used )
                        {
                                *ignore -= 1;
-                               audio_used -= samples;
-                               memmove( audio_buffer, &audio_buffer[ samples * (resample? channels : codec_context->channels) * sizeof_sample ],
+                               audio_used -= audio_used > samples ? samples : audio_used;
+                               memmove( audio_buffer, &audio_buffer[ samples * codec_context->channels * sizeof_sample ],
                                                 audio_used * sizeof_sample );
                        }
                }
        }
 
        // If we're behind, ignore this packet
-       if ( pkt.pts >= 0 )
+       // Skip this on non-seekable, audio-only inputs.
+       if ( pkt.pts >= 0 && ( self->seekable || self->video_format ) && *ignore == 0 && audio_used > samples / 2 )
        {
-               double current_pts = av_q2d( context->streams[ index ]->time_base ) * pkt.pts;
-               int req_position = ( int )( timecode * fps + 0.5 );
-               int int_position = ( int )( current_pts * fps + 0.5 );
-               if ( context->start_time != AV_NOPTS_VALUE )
-                       int_position -= ( int )( fps * context->start_time / AV_TIME_BASE + 0.5 );
-
-               if ( self->seekable && *ignore == 0 )
+               int64_t pts = pkt.pts;
+               if ( self->first_pts != AV_NOPTS_VALUE )
+                       pts -= self->first_pts;
+               else if ( context->start_time != AV_NOPTS_VALUE )
+                       pts -= context->start_time;
+               double timebase = av_q2d( context->streams[ index ]->time_base );
+               int64_t int_position = ( int64_t )( timebase * pts * fps + 0.5 );
+               int64_t req_position = ( int64_t )( timecode * fps + 0.5 );
+
+               mlt_log_debug( MLT_PRODUCER_SERVICE(self->parent),
+                       "A pkt.pts %"PRId64" pkt.dts %"PRId64" req_pos %"PRId64" cur_pos %"PRId64" pkt_pos %"PRId64"\n",
+                       pkt.pts, pkt.dts, req_position, self->current_position, int_position );
+
+               if ( int_position > 0 )
                {
                        if ( int_position < req_position )
                                // We are behind, so skip some
-                               *ignore = 1;
-                       else if ( int_position > req_position + 2 )
+                               *ignore = req_position - int_position;
+                       else if ( self->audio_index != INT_MAX && int_position > req_position + 2 )
                                // We are ahead, so seek backwards some more
-                               seek_audio( self, req_position, timecode - 1.0, ignore );
+                               seek_audio( self, req_position, timecode - 1.0 );
                }
+               // Cancel the find_first_pts() in seek_audio()
+               if ( self->video_index == -1 && self->last_position == POSITION_INITIAL )
+                       self->last_position = int_position;
        }
 
        self->audio_used[ index ] = audio_used;
@@ -2015,7 +2194,7 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
        pthread_mutex_lock( &self->audio_mutex );
        
        // Obtain the frame number of this frame
-       mlt_position position = mlt_properties_get_position( MLT_FRAME_PROPERTIES( frame ), "avformat_position" );
+       mlt_position position = mlt_frame_original_position( frame );
 
        // Calculate the real time code
        double real_timecode = producer_time_of_frame( self->parent, position );
@@ -2024,13 +2203,20 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
        double fps = mlt_producer_get_fps( self->parent );
 
        // Number of frames to ignore (for ffwd)
-       int ignore = 0;
+       int ignore[ MAX_AUDIO_STREAMS ] = { 0 };
 
        // Flag for paused (silence)
-       int paused = seek_audio( self, position, real_timecode, &ignore );
+       int paused = seek_audio( self, position, real_timecode );
+
+       // Initialize ignore for all streams from the seek return value
+       int i = MAX_AUDIO_STREAMS;
+       while ( i-- )
+               ignore[i] = ignore[0];
 
        // Fetch the audio_format
        AVFormatContext *context = self->audio_format;
+       if ( !context )
+               goto exit_get_audio;
 
        int sizeof_sample = sizeof( int16_t );
        
@@ -2040,49 +2226,25 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
        if ( self->audio_index == INT_MAX )
        {
                index = 0;
-               index_max = context->nb_streams;
+               index_max = FFMIN( MAX_AUDIO_STREAMS, context->nb_streams );
                *channels = self->total_channels;
-               *samples = *samples * FFMAX( self->max_frequency, *frequency ) / *frequency;
-               *frequency = FFMAX( self->max_frequency, *frequency );
+               *samples = mlt_sample_calculator( fps, self->max_frequency, position );
+               *frequency = self->max_frequency;
        }
 
-       // Initialize the resamplers and buffers
-       for ( ; index < index_max; index++ )
+       // Initialize the buffers
+       for ( ; index < index_max && index < MAX_AUDIO_STREAMS; index++ )
        {
                // Get codec context
                AVCodecContext *codec_context = self->audio_codec[ index ];
 
                if ( codec_context && !self->audio_buffer[ index ] )
                {
-                       // Check for resample and create if necessary
-                       if ( codec_context->channels <= 2 )
-                       {
-                               // Determine by how much resampling will increase number of samples
-                               double resample_factor = self->audio_index == INT_MAX ? 1 : (double) *channels / codec_context->channels;
-                               resample_factor *= (double) *frequency / codec_context->sample_rate;
-                               if ( resample_factor > self->resample_factor )
-                                       self->resample_factor = resample_factor;
-                               
-                               // Create the resampler
-#if (LIBAVCODEC_VERSION_INT >= ((52<<16)+(15<<8)+0))
-                               self->audio_resample[ index ] = av_audio_resample_init(
-                                       self->audio_index == INT_MAX ? codec_context->channels : *channels,
-                                       codec_context->channels, *frequency, codec_context->sample_rate,
-                                       SAMPLE_FMT_S16, codec_context->sample_fmt, 16, 10, 0, 0.8 );
-#else
-                               self->audio_resample[ index ] = audio_resample_init(
-                                       self->audio_index == INT_MAX ? codec_context->channels : *channels,
-                                       codec_context->channels, *frequency, codec_context->sample_rate );
-#endif
-                       }
-                       else
-                       {
-                               codec_context->request_channels = self->audio_index == INT_MAX ? codec_context->channels : *channels;
-                               sizeof_sample = sample_bytes( codec_context );
-                       }
+                       codec_context->request_channels = self->audio_index == INT_MAX ? codec_context->channels : *channels;
+                       sizeof_sample = sample_bytes( codec_context );
 
                        // Check for audio buffer and create if necessary
-                       self->audio_buffer_size[ index ] = AVCODEC_MAX_AUDIO_FRAME_SIZE * sizeof_sample;
+                       self->audio_buffer_size[ index ] = MAX_AUDIO_FRAME_SIZE * sizeof_sample;
                        self->audio_buffer[ index ] = mlt_pool_alloc( self->audio_buffer_size[ index ] );
 
                        // Check for decoder buffer and create if necessary
@@ -2091,7 +2253,7 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
        }
 
        // Get the audio if required
-       if ( !paused )
+       if ( !paused && *frequency > 0 )
        {
                int ret = 0;
                int got_audio = 0;
@@ -2099,68 +2261,104 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
 
                av_init_packet( &pkt );
                
-               // If not resampling, give consumer more than requested.
-               // It requested number samples based on requested frame rate.
-               // Do not clean this up with a samples *= ...!
-               if ( self->audio_index != INT_MAX && ! self->audio_resample[ self->audio_index ] )
-                       *samples = *samples * self->audio_codec[ self->audio_index ]->sample_rate / *frequency;
+               // Caller requested number samples based on requested sample rate.
+               if ( self->audio_index != INT_MAX )
+                       *samples = mlt_sample_calculator( fps, self->audio_codec[ self->audio_index ]->sample_rate, position );
 
                while ( ret >= 0 && !got_audio )
                {
                        // Check if the buffer already contains the samples required
-                       if ( self->audio_index != INT_MAX && self->audio_used[ self->audio_index ] >= *samples && ignore == 0 )
+                       if ( self->audio_index != INT_MAX &&
+                                self->audio_used[ self->audio_index ] >= *samples &&
+                                ignore[ self->audio_index ] == 0 )
                        {
                                got_audio = 1;
                                break;
                        }
+                       else if ( self->audio_index == INT_MAX )
+                       {
+                               // Check if there is enough audio for all streams
+                               got_audio = 1;
+                               for ( index = 0; got_audio && index < index_max; index++ )
+                                       if ( ( self->audio_codec[ index ] && self->audio_used[ index ] < *samples ) || ignore[ index ] )
+                                               got_audio = 0;
+                               if ( got_audio )
+                                       break;
+                       }
 
                        // Read a packet
-                       ret = av_read_frame( context, &pkt );
+                       pthread_mutex_lock( &self->packets_mutex );
+                       if ( mlt_deque_count( self->apackets ) )
+                       {
+                               AVPacket *tmp = (AVPacket*) mlt_deque_pop_front( self->apackets );
+                               pkt = *tmp;
+                               free( tmp );
+                       }
+                       else
+                       {
+                               ret = av_read_frame( context, &pkt );
+                               if ( ret >= 0 && !self->seekable && pkt.stream_index == self->video_index )
+                               {
+                                       if ( !av_dup_packet( &pkt ) )
+                                       {
+                                               AVPacket *tmp = malloc( sizeof(AVPacket) );
+                                               *tmp = pkt;
+                                               mlt_deque_push_back( self->vpackets, tmp );
+                                       }
+                               }
+                               else if ( ret < 0 )
+                               {
+                                       mlt_producer producer = self->parent;
+                                       mlt_properties properties = MLT_PRODUCER_PROPERTIES( producer );
+                                       mlt_log_verbose( MLT_PRODUCER_SERVICE(producer), "av_read_frame returned error %d inside get_audio\n", ret );
+                                       if ( !self->seekable && mlt_properties_get_int( properties, "reconnect" ) )
+                                       {
+                                               // Try to reconnect to live sources by closing context and codecs,
+                                               // and letting next call to get_frame() reopen.
+                                               prepare_reopen( self );
+                                               pthread_mutex_unlock( &self->packets_mutex );
+                                               goto exit_get_audio;
+                                       }
+                                       if ( !self->seekable && mlt_properties_get_int( properties, "exit_on_disconnect" ) )
+                                       {
+                                               mlt_log_fatal( MLT_PRODUCER_SERVICE(producer), "Exiting with error due to disconnected source.\n" );
+                                               exit( EXIT_FAILURE );
+                                       }
+                               }
+                       }
+                       pthread_mutex_unlock( &self->packets_mutex );
 
                        // We only deal with audio from the selected audio index
                        index = pkt.stream_index;
-                       if ( ret >= 0 && pkt.data && pkt.size > 0 && ( index == self->audio_index ||
+                       if ( index < MAX_AUDIO_STREAMS && ret >= 0 && pkt.data && pkt.size > 0 && ( index == self->audio_index ||
                                 ( self->audio_index == INT_MAX && context->streams[ index ]->codec->codec_type == CODEC_TYPE_AUDIO ) ) )
                        {
-                               int channels2 = ( self->audio_index == INT_MAX || !self->audio_resample[index] ) ?
-                                       self->audio_codec[index]->channels : *channels;
-                               ret = decode_audio( self, &ignore, pkt, channels2, *samples, real_timecode, fps );
+                               int channels2 = self->audio_codec[index]->channels;
+                               ret = decode_audio( self, &ignore[index], pkt, channels2, *samples, real_timecode, fps );
                        }
-                       av_free_packet( &pkt );
 
-                       if ( self->audio_index == INT_MAX && ret >= 0 )
-                       {
-                               // Determine if there is enough audio for all streams
-                               got_audio = 1;
-                               for ( index = 0; index < context->nb_streams; index++ )
-                               {
-                                       if ( self->audio_codec[ index ] && self->audio_used[ index ] < *samples )
-                                               got_audio = 0;
-                               }
-                       }
+                       if ( self->seekable || index != self->video_index )
+                               av_free_packet( &pkt );
+
                }
 
                // Set some additional return values
                *format = mlt_audio_s16;
-               if ( self->audio_index != INT_MAX && !self->audio_resample[ self->audio_index ] )
+               if ( self->audio_index != INT_MAX )
                {
                        index = self->audio_index;
                        *channels = self->audio_codec[ index ]->channels;
                        *frequency = self->audio_codec[ index ]->sample_rate;
-                       *format = self->audio_codec[ index ]->sample_fmt == SAMPLE_FMT_S32 ? mlt_audio_s32le
-                               : self->audio_codec[ index ]->sample_fmt == SAMPLE_FMT_FLT ? mlt_audio_f32le
-                               : mlt_audio_s16;
+                       *format = pick_audio_format( self->audio_codec[ index ]->sample_fmt );
                        sizeof_sample = sample_bytes( self->audio_codec[ index ] );
                }
                else if ( self->audio_index == INT_MAX )
                {
-                       // This only works if all audio tracks have the same sample format.
                        for ( index = 0; index < index_max; index++ )
-                               if ( self->audio_codec[ index ] && !self->audio_resample[ index ] )
+                               if ( self->audio_codec[ index ] )
                                {
-                                       *format = self->audio_codec[ index ]->sample_fmt == SAMPLE_FMT_S32 ? mlt_audio_s32le
-                                               : self->audio_codec[ index ]->sample_fmt == SAMPLE_FMT_FLT ? mlt_audio_f32le
-                                               : mlt_audio_s16;
+                                       // XXX: This only works if all audio tracks have the same sample format.
+                                       *format = pick_audio_format( self->audio_codec[ index ]->sample_fmt );
                                        sizeof_sample = sample_bytes( self->audio_codec[ index ] );
                                        break;
                                }
@@ -2224,6 +2422,7 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
        }
        else
        {
+exit_get_audio:
                // Get silence and don't touch the context
                mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
        }
@@ -2252,8 +2451,12 @@ static int audio_codec_init( producer_avformat self, int index, mlt_properties p
                AVCodec *codec = avcodec_find_decoder( codec_context->codec_id );
 
                // If we don't have a codec and we can't initialise it, we can't do much more...
-               avformat_lock( );
+               pthread_mutex_lock( &self->open_mutex );
+#if LIBAVCODEC_VERSION_INT >= ((53<<16)+(8<<8)+0)
+               if ( codec && avcodec_open2( codec_context, codec, NULL ) >= 0 )
+#else
                if ( codec && avcodec_open( codec_context, codec ) >= 0 )
+#endif
                {
                        // Now store the codec with its destructor
                        if ( self->audio_codec[ index ] )
@@ -2265,14 +2468,12 @@ static int audio_codec_init( producer_avformat self, int index, mlt_properties p
                        // Remember that we can't use self later
                        self->audio_index = -1;
                }
-               avformat_unlock( );
+               pthread_mutex_unlock( &self->open_mutex );
 
                // Process properties as AVOptions
                apply_properties( codec_context, properties, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
-#if LIBAVCODEC_VERSION_MAJOR > 52
                if ( codec && codec->priv_class && codec_context->priv_data )
                        apply_properties( codec_context->priv_data, properties, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM );
-#endif
        }
        return self->audio_codec[ index ] && self->audio_index > -1;
 }
@@ -2305,18 +2506,9 @@ static void producer_set_up_audio( producer_avformat self, mlt_frame frame )
        // Reopen the file if necessary
        if ( !context && self->audio_index > -1 && index > -1 )
        {
-               mlt_events_block( properties, producer );
                producer_open( self, mlt_service_profile( MLT_PRODUCER_SERVICE(producer) ),
-                       mlt_properties_get( properties, "resource" ) );
+                       mlt_properties_get( properties, "resource" ), 1 );
                context = self->audio_format;
-               if ( self->dummy_context )
-               {
-                       av_close_input_file( self->dummy_context );
-                       self->dummy_context = NULL;
-               }
-               mlt_events_unblock( properties, producer );
-               if ( self->audio_format )
-                       get_audio_streams_info( self );
        }
 
        // Exception handling for audio_index
@@ -2333,17 +2525,20 @@ static void producer_set_up_audio( producer_avformat self, mlt_frame frame )
                index = self->audio_index;
                mlt_properties_set_int( properties, "audio_index", index );
        }
+       if ( context && index > -1 && index < INT_MAX &&
+                pick_audio_format( context->streams[ index ]->codec->sample_fmt ) == mlt_audio_none )
+       {
+               index = -1;
+       }
 
        // Update the audio properties if the index changed
        if ( context && index > -1 && index != self->audio_index )
        {
+               pthread_mutex_lock( &self->open_mutex );
                if ( self->audio_codec[ self->audio_index ] )
-               {
-                       avformat_lock();
                        avcodec_close( self->audio_codec[ self->audio_index ] );
-                       avformat_unlock();
-               }
                self->audio_codec[ self->audio_index ] = NULL;
+               pthread_mutex_unlock( &self->open_mutex );
        }
        if ( self->audio_index != -1 )
                self->audio_index = index;
@@ -2361,14 +2556,11 @@ static void producer_set_up_audio( producer_avformat self, mlt_frame frame )
                                audio_codec_init( self, index, properties );
                }
        }
-       else if ( context && index > -1 && audio_codec_init( self, index, properties ) )
+       else if ( context && index > -1 && index < MAX_AUDIO_STREAMS &&
+               audio_codec_init( self, index, properties ) )
        {
-               // Set the frame properties
-               if ( index < INT_MAX )
-               {
-                       mlt_properties_set_int( frame_properties, "frequency", self->audio_codec[ index ]->sample_rate );
-                       mlt_properties_set_int( frame_properties, "channels", self->audio_codec[ index ]->channels );
-               }
+               mlt_properties_set_int( frame_properties, "audio_frequency", self->audio_codec[ index ]->sample_rate );
+               mlt_properties_set_int( frame_properties, "audio_channels", self->audio_codec[ index ]->channels );
        }
        if ( context && index > -1 )
        {
@@ -2414,15 +2606,16 @@ static int producer_get_frame( mlt_producer producer, mlt_frame_ptr frame, int i
        // Update timecode on the frame we're creating
        mlt_frame_set_position( *frame, mlt_producer_position( producer ) );
 
-       // Set the position of this producer
-       mlt_properties_set_position( MLT_FRAME_PROPERTIES( *frame ), "avformat_position", mlt_producer_frame( producer ) );
-       
        // Set up the video
        producer_set_up_video( self, *frame );
 
        // Set up the audio
        producer_set_up_audio( self, *frame );
 
+       // Set the position of this producer
+       mlt_position position = self->seekable ? mlt_producer_frame( producer ) : self->nonseek_position++;
+       mlt_properties_set_position( MLT_FRAME_PROPERTIES( *frame ), "original_position", position );
+
        // Calculate the next timecode
        mlt_producer_prepare_next( producer );
 
@@ -2432,35 +2625,81 @@ static int producer_get_frame( mlt_producer producer, mlt_frame_ptr frame, int i
 static void producer_avformat_close( producer_avformat self )
 {
        mlt_log_debug( NULL, "producer_avformat_close\n" );
-       // Close the file
-       av_free( self->av_frame );
-       avformat_lock();
+
+       // Cleanup av contexts
+       av_free_packet( &self->pkt );
+       av_free( self->video_frame );
+       av_free( self->audio_frame );
+       if ( self->is_mutex_init )
+               pthread_mutex_lock( &self->open_mutex );
        int i;
        for ( i = 0; i < MAX_AUDIO_STREAMS; i++ )
        {
-               if ( self->audio_resample[i] )
-                       audio_resample_close( self->audio_resample[i] );
                mlt_pool_release( self->audio_buffer[i] );
                av_free( self->decode_buffer[i] );
                if ( self->audio_codec[i] )
                        avcodec_close( self->audio_codec[i] );
+               self->audio_codec[i] = NULL;
        }
        if ( self->video_codec )
                avcodec_close( self->video_codec );
+       self->video_codec = NULL;
+       // Close the file
+#if LIBAVFORMAT_VERSION_INT >= ((53<<16)+(17<<8)+0)
+       if ( self->dummy_context )
+               avformat_close_input( &self->dummy_context );
+       if ( self->seekable && self->audio_format )
+               avformat_close_input( &self->audio_format );
+       if ( self->video_format )
+               avformat_close_input( &self->video_format );
+#else
        if ( self->dummy_context )
                av_close_input_file( self->dummy_context );
-       if ( self->audio_format )
+       if ( self->seekable && self->audio_format )
                av_close_input_file( self->audio_format );
        if ( self->video_format )
                av_close_input_file( self->video_format );
-       avformat_unlock();
+#endif
+       if ( self->is_mutex_init )
+               pthread_mutex_unlock( &self->open_mutex );
 #ifdef VDPAU
        vdpau_producer_close( self );
 #endif
        if ( self->image_cache )
                mlt_cache_close( self->image_cache );
-       pthread_mutex_destroy( &self->audio_mutex );
-       pthread_mutex_destroy( &self->video_mutex );
+
+       // Cleanup the mutexes
+       if ( self->is_mutex_init )
+       {
+               pthread_mutex_destroy( &self->audio_mutex );
+               pthread_mutex_destroy( &self->video_mutex );
+               pthread_mutex_destroy( &self->packets_mutex );
+               pthread_mutex_destroy( &self->open_mutex );
+       }
+
+       // Cleanup the packet queues
+       AVPacket *pkt;
+       if ( self->apackets )
+       {
+               while ( ( pkt = mlt_deque_pop_back( self->apackets ) ) )
+               {
+                       av_free_packet( pkt );
+                       free( pkt );
+               }
+               mlt_deque_close( self->apackets );
+               self->apackets = NULL;
+       }
+       if ( self->vpackets )
+       {
+               while ( ( pkt = mlt_deque_pop_back( self->vpackets ) ) )
+               {
+                       av_free_packet( pkt );
+                       free( pkt );
+               }
+               mlt_deque_close( self->vpackets );
+               self->vpackets = NULL;
+       }
+
        free( self );
 }