]> git.sesse.net Git - mlt/blobdiff - src/modules/avformat/producer_avformat.c
Fix crash on audio only clip using uninialized mutex.
[mlt] / src / modules / avformat / producer_avformat.c
index ccbdb0146a60815877de70ab4c24f77c4e8aae26..cbdd38fd670485c9a6c8987899c6bd2d9b872d19 100644 (file)
@@ -102,7 +102,7 @@ struct producer_avformat_s
        unsigned int invalid_pts_counter;
        unsigned int invalid_dts_counter;
        mlt_cache image_cache;
-       int colorspace;
+       int yuv_colorspace, color_primaries;
        int full_luma;
        pthread_mutex_t video_mutex;
        pthread_mutex_t audio_mutex;
@@ -338,8 +338,8 @@ static mlt_properties find_default_streams( producer_avformat self )
                                snprintf( key, sizeof(key), "meta.media.%d.codec.height", i );
                                mlt_properties_set_int( meta_media, key, codec_context->height );
                                snprintf( key, sizeof(key), "meta.media.%d.codec.frame_rate", i );
-                               mlt_properties_set_double( meta_media, key, (double) codec_context->time_base.den /
-                                                                                  ( codec_context->time_base.num == 0 ? 1 : codec_context->time_base.num ) );
+                               AVRational frame_rate = { codec_context->time_base.den, codec_context->time_base.num * codec_context->ticks_per_frame };
+                               mlt_properties_set_double( meta_media, key, av_q2d( frame_rate ) );
                                snprintf( key, sizeof(key), "meta.media.%d.codec.pix_fmt", i );
 #if LIBAVUTIL_VERSION_INT >= ((51<<16)+(3<<8)+0)
                                mlt_properties_set( meta_media, key, av_get_pix_fmt_name( codec_context->pix_fmt ) );
@@ -430,121 +430,16 @@ static mlt_properties find_default_streams( producer_avformat self )
        return meta_media;
 }
 
-static inline int dv_is_pal( AVPacket *pkt )
+static void get_aspect_ratio( mlt_properties properties, AVStream *stream, AVCodecContext *codec_context )
 {
-       return pkt->data[3] & 0x80;
-}
-
-static int dv_is_wide( AVPacket *pkt )
-{
-       int i = 80 /* block size */ *3 /* VAUX starts at block 3 */ +3 /* skip block header */;
-
-       for ( ; i < pkt->size; i += 5 /* packet size */ )
-       {
-               if ( pkt->data[ i ] == 0x61 )
-               {
-                       uint8_t x = pkt->data[ i + 2 ] & 0x7;
-                       return ( x == 2 ) || ( x == 7 );
-               }
-       }
-       return 0;
-}
-
-static double get_aspect_ratio( mlt_properties properties, AVStream *stream, AVCodecContext *codec_context, AVPacket *pkt )
-{
-       double aspect_ratio = 1.0;
-
-       if ( codec_context->codec_id == AV_CODEC_ID_DVVIDEO )
-       {
-               if ( pkt )
-               {
-                       if ( dv_is_pal( pkt ) )
-                       {
-                               if ( dv_is_wide( pkt ) )
-                               {
-                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 64 );
-                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 45 );
-                               }
-                               else
-                               {
-                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 16 );
-                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 15 );
-                               }
-                       }
-                       else
-                       {
-                               if ( dv_is_wide( pkt ) )
-                               {
-                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 32 );
-                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 27 );
-                               }
-                               else
-                               {
-                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 8 );
-                                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 9 );
-                               }
-                       }
-               }
-               else
-               {
-                       AVRational ar = stream->sample_aspect_ratio;
-                       // Override FFmpeg's notion of DV aspect ratios, which are
-                       // based upon a width of 704. Since we do not have a normaliser
-                       // that crops (nor is cropping 720 wide ITU-R 601 video always desirable)
-                       // we just coerce the values to facilitate a passive behaviour through
-                       // the rescale normaliser when using equivalent producers and consumers.
-                       // = display_aspect / (width * height)
-                       if ( ar.num == 10 && ar.den == 11 )
-                       {
-                               // 4:3 NTSC
-                               mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 8 );
-                               mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 9 );
-                       }
-                       else if ( ar.num == 59 && ar.den == 54 )
-                       {
-                               // 4:3 PAL
-                               mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 16 );
-                               mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 15 );
-                       }
-                       else if ( ar.num == 40 && ar.den == 33 )
-                       {
-                               // 16:9 NTSC
-                               mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 32 );
-                               mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 27 );
-                       }
-                       else if ( ar.num == 118 && ar.den == 81 )
-                       {
-                               // 16:9 PAL
-                               mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 64 );
-                               mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 45 );
-                       }
-               }
-       }
-       else
-       {
-               AVRational codec_sar = codec_context->sample_aspect_ratio;
-               AVRational stream_sar = stream->sample_aspect_ratio;
-               if ( codec_sar.num > 0 )
-               {
-                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", codec_sar.num );
-                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", codec_sar.den );
-               }
-               else if ( stream_sar.num > 0 )
-               {
-                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", stream_sar.num );
-                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", stream_sar.den );
-               }
-               else
-               {
-                       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", 1 );
-                       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", 1 );
-               }
-       }
-       AVRational ar = { mlt_properties_get_double( properties, "meta.media.sample_aspect_num" ), mlt_properties_get_double( properties, "meta.media.sample_aspect_den" ) };
-       aspect_ratio = av_q2d( ar );
-       mlt_properties_set_double( properties, "aspect_ratio", aspect_ratio );
-
-       return aspect_ratio;
+       AVRational sar = stream->sample_aspect_ratio;
+       if ( sar.num <= 0 || sar.den <= 0 )
+               sar = codec_context->sample_aspect_ratio;
+       if ( sar.num <= 0 || sar.den <= 0 )
+               sar.num = sar.den = 1;
+       mlt_properties_set_int( properties, "meta.media.sample_aspect_num", sar.num );
+       mlt_properties_set_int( properties, "meta.media.sample_aspect_den", sar.den );
+       mlt_properties_set_double( properties, "aspect_ratio", av_q2d( sar ) );
 }
 
 #if LIBAVFORMAT_VERSION_INT > ((53<<16)+(6<<8)+0)
@@ -680,10 +575,6 @@ static int get_basic_info( producer_avformat self, mlt_profile profile, const ch
 
        AVFormatContext *format = self->video_format;
 
-       // We will treat everything with the producer fps.
-       // TODO: make this more flexible.
-       double fps = mlt_profile_fps( profile );
-
        // Get the duration
        if ( !mlt_properties_get_int( properties, "_length_computed" ) )
        {
@@ -692,7 +583,9 @@ static int get_basic_info( producer_avformat self, mlt_profile profile, const ch
                if ( format->duration != AV_NOPTS_VALUE )
                {
                        // This isn't going to be accurate for all formats
-                       mlt_position frames = ( mlt_position )( ( ( double )format->duration / ( double )AV_TIME_BASE ) * fps );
+                       // We will treat everything with the producer fps.
+                       mlt_position frames = ( mlt_position )( int )( format->duration *
+                               profile->frame_rate_num / profile->frame_rate_den / AV_TIME_BASE);
                        mlt_properties_set_position( properties, "out", frames - 1 );
                        mlt_properties_set_position( properties, "length", frames );
                        mlt_properties_set_int( properties, "_length_computed", 1 );
@@ -744,29 +637,7 @@ static int get_basic_info( producer_avformat self, mlt_profile profile, const ch
                AVCodecContext *codec_context = format->streams[ self->video_index ]->codec;
                mlt_properties_set_int( properties, "width", codec_context->width );
                mlt_properties_set_int( properties, "height", codec_context->height );
-
-               if ( codec_context->codec_id == AV_CODEC_ID_DVVIDEO )
-               {
-                       // Fetch the first frame of DV so we can read it directly
-                       AVPacket pkt;
-                       int ret = 0;
-                       while ( ret >= 0 )
-                       {
-                               ret = av_read_frame( format, &pkt );
-                               if ( ret >= 0 && pkt.stream_index == self->video_index && pkt.size > 0 )
-                               {
-                                       get_aspect_ratio( properties, format->streams[ self->video_index ], codec_context, &pkt );
-                                       av_free_packet(&pkt);
-                                       break;
-                               }
-                               if ( ret >= 0 )
-                                       av_free_packet(&pkt);
-                       }
-               }
-               else
-               {
-                       get_aspect_ratio( properties, format->streams[ self->video_index ], codec_context, NULL );
-               }
+               get_aspect_ratio( properties, format->streams[ self->video_index ], codec_context );
 
                // Verify that we can convert this to YUV 4:2:2
                // TODO: we can now also return RGB and RGBA and quite possibly more in the future.
@@ -789,17 +660,18 @@ static int producer_open( producer_avformat self, mlt_profile profile, const cha
        int error = 0;
        mlt_properties properties = MLT_PRODUCER_PROPERTIES( self->parent );
 
+       if ( !self->is_mutex_init )
+       {
+               pthread_mutex_init( &self->audio_mutex, NULL );
+               pthread_mutex_init( &self->video_mutex, NULL );
+               pthread_mutex_init( &self->packets_mutex, NULL );
+               pthread_mutex_init( &self->open_mutex, NULL );
+               self->is_mutex_init = 1;
+       }
+
        // Lock the service
        if ( take_lock )
        {
-               if ( !self->is_mutex_init )
-               {
-                       pthread_mutex_init( &self->audio_mutex, NULL );
-                       pthread_mutex_init( &self->video_mutex, NULL );
-                       pthread_mutex_init( &self->packets_mutex, NULL );
-                       pthread_mutex_init( &self->open_mutex, NULL );
-                       self->is_mutex_init = 1;
-               }
                pthread_mutex_lock( &self->audio_mutex );
                pthread_mutex_lock( &self->video_mutex );
        }
@@ -943,46 +815,66 @@ static int producer_open( producer_avformat self, mlt_profile profile, const cha
        return error;
 }
 
-static void reopen_video( producer_avformat self, mlt_producer producer )
+static void prepare_reopen( producer_avformat self )
 {
-       mlt_properties properties = MLT_PRODUCER_PROPERTIES( producer );
-       mlt_service_lock( MLT_PRODUCER_SERVICE( producer ) );
+       mlt_service_lock( MLT_PRODUCER_SERVICE( self->parent ) );
        pthread_mutex_lock( &self->audio_mutex );
        pthread_mutex_lock( &self->open_mutex );
 
+       int i;
+       for ( i = 0; i < MAX_AUDIO_STREAMS; i++ )
+       {
+               mlt_pool_release( self->audio_buffer[i] );
+               self->audio_buffer[i] = NULL;
+               av_free( self->decode_buffer[i] );
+               self->decode_buffer[i] = NULL;
+               if ( self->audio_codec[i] )
+                       avcodec_close( self->audio_codec[i] );
+               self->audio_codec[i] = NULL;
+       }
        if ( self->video_codec )
                avcodec_close( self->video_codec );
        self->video_codec = NULL;
+
 #if LIBAVFORMAT_VERSION_INT >= ((53<<16)+(17<<8)+0)
-       if ( self->dummy_context )
-               avformat_close_input( &self->dummy_context );
+       if ( self->seekable && self->audio_format )
+               avformat_close_input( &self->audio_format );
        if ( self->video_format )
                avformat_close_input( &self->video_format );
 #else
-       if ( self->dummy_context )
-               av_close_input_file( self->dummy_context );
+       if ( self->seekable && self->audio_format )
+               av_close_input_file( self->audio_format );
        if ( self->video_format )
                av_close_input_file( self->video_format );
 #endif
-       self->dummy_context = NULL;
+       self->audio_format = NULL;
        self->video_format = NULL;
        pthread_mutex_unlock( &self->open_mutex );
 
-       int audio_index = self->audio_index;
-       int video_index = self->video_index;
-
-       producer_open( self, mlt_service_profile( MLT_PRODUCER_SERVICE(producer) ),
-               mlt_properties_get( properties, "resource" ), 0 );
-
-       self->audio_index = audio_index;
-       if ( self->video_format && video_index > -1 )
+       // Cleanup the packet queues
+       AVPacket *pkt;
+       if ( self->apackets )
+       {
+               while ( ( pkt = mlt_deque_pop_back( self->apackets ) ) )
+               {
+                       av_free_packet( pkt );
+                       free( pkt );
+               }
+               mlt_deque_close( self->apackets );
+               self->apackets = NULL;
+       }
+       if ( self->vpackets )
        {
-               self->video_index = video_index;
-               video_codec_init( self, video_index, properties );
+               while ( ( pkt = mlt_deque_pop_back( self->vpackets ) ) )
+               {
+                       av_free_packet( pkt );
+                       free( pkt );
+               }
+               mlt_deque_close( self->vpackets );
+               self->vpackets = NULL;
        }
-
        pthread_mutex_unlock( &self->audio_mutex );
-       mlt_service_unlock( MLT_PRODUCER_SERVICE( producer ) );
+       mlt_service_unlock( MLT_PRODUCER_SERVICE( self->parent ) );
 }
 
 static int64_t best_pts( producer_avformat self, int64_t pts, int64_t dts )
@@ -1066,26 +958,15 @@ static int seek_video( producer_avformat self, mlt_position position,
                                timestamp -= 2 / av_q2d( self->video_time_base );
                        if ( timestamp < 0 )
                                timestamp = 0;
-                       mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "seeking timestamp %"PRId64" position %d expected %d last_pos %"PRId64"\n",
+                       mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "seeking timestamp %"PRId64" position " MLT_POSITION_FMT " expected "MLT_POSITION_FMT" last_pos %"PRId64"\n",
                                timestamp, position, self->video_expected, self->last_position );
 
                        // Seek to the timestamp
-                       // NOTE: reopen_video is disabled at this time because it is causing trouble with A/V sync.
-                       if ( 1 || req_position > 0 || self->last_position <= 0 )
-                       {
-                               codec_context->skip_loop_filter = AVDISCARD_NONREF;
-                               av_seek_frame( context, self->video_index, timestamp, AVSEEK_FLAG_BACKWARD );
+                       codec_context->skip_loop_filter = AVDISCARD_NONREF;
+                       av_seek_frame( context, self->video_index, timestamp, AVSEEK_FLAG_BACKWARD );
 
-                               // flush any pictures still in decode buffer
-                               avcodec_flush_buffers( codec_context );
-                       }
-                       else
-                       {
-                               // Re-open video stream when rewinding to beginning from somewhere else.
-                               // This is rather ugly, and I prefer not to do it this way, but ffmpeg is
-                               // not reliably seeking to the first frame across formats.
-                               reopen_video( self, producer );
-                       }
+                       // flush any pictures still in decode buffer
+                       avcodec_flush_buffers( codec_context );
 
                        // Remove the cached info relating to the previous position
                        self->current_position = POSITION_INVALID;
@@ -1145,40 +1026,50 @@ static void get_audio_streams_info( producer_avformat self )
                self->audio_streams, self->audio_max_stream, self->total_channels, self->max_channel );
 }
 
-static void set_luma_transfer( struct SwsContext *context, int colorspace, int use_full_range )
+static int set_luma_transfer( struct SwsContext *context, int src_colorspace, int dst_colorspace, int full_range )
 {
-       int *coefficients;
-       const int *new_coefficients;
-       int full_range;
-       int brightness, contrast, saturation;
-
-       if ( sws_getColorspaceDetails( context, &coefficients, &full_range, &coefficients, &full_range,
-                       &brightness, &contrast, &saturation ) != -1 )
-       {
-               // Don't change these from defaults unless explicitly told to.
-               if ( use_full_range >= 0 )
-                       full_range = use_full_range;
-               switch ( colorspace )
-               {
-               case 170:
-               case 470:
-               case 601:
-               case 624:
-                       new_coefficients = sws_getCoefficients( SWS_CS_ITU601 );
-                       break;
-               case 240:
-                       new_coefficients = sws_getCoefficients( SWS_CS_SMPTE240M );
-                       break;
-               case 709:
-                       new_coefficients = sws_getCoefficients( SWS_CS_ITU709 );
-                       break;
-               default:
-                       new_coefficients = coefficients;
-                       break;
-               }
-               sws_setColorspaceDetails( context, new_coefficients, full_range, new_coefficients, full_range,
-                       brightness, contrast, saturation );
+       const int *src_coefficients = sws_getCoefficients( SWS_CS_DEFAULT );
+       const int *dst_coefficients = sws_getCoefficients( SWS_CS_DEFAULT );
+       int brightness = 0;
+       int contrast = 1 << 16;
+       int saturation = 1  << 16;
+
+       switch ( src_colorspace )
+       {
+       case 170:
+       case 470:
+       case 601:
+       case 624:
+               src_coefficients = sws_getCoefficients( SWS_CS_ITU601 );
+               break;
+       case 240:
+               src_coefficients = sws_getCoefficients( SWS_CS_SMPTE240M );
+               break;
+       case 709:
+               src_coefficients = sws_getCoefficients( SWS_CS_ITU709 );
+               break;
+       default:
+               break;
+       }
+       switch ( dst_colorspace )
+       {
+       case 170:
+       case 470:
+       case 601:
+       case 624:
+               src_coefficients = sws_getCoefficients( SWS_CS_ITU601 );
+               break;
+       case 240:
+               src_coefficients = sws_getCoefficients( SWS_CS_SMPTE240M );
+               break;
+       case 709:
+               src_coefficients = sws_getCoefficients( SWS_CS_ITU709 );
+               break;
+       default:
+               break;
        }
+       return sws_setColorspaceDetails( context, src_coefficients, full_range, dst_coefficients, full_range,
+               brightness, contrast, saturation );
 }
 
 static mlt_image_format pick_pix_format( enum PixelFormat pix_fmt )
@@ -1236,10 +1127,13 @@ static mlt_audio_format pick_audio_format( int sample_fmt )
        }
 }
 
-static void convert_image( producer_avformat self, AVFrame *frame, uint8_t *buffer, int pix_fmt,
+// returns resulting YUV colorspace
+static int convert_image( producer_avformat self, AVFrame *frame, uint8_t *buffer, int pix_fmt,
        mlt_image_format *format, int width, int height, uint8_t **alpha )
 {
        int flags = SWS_BICUBIC | SWS_ACCURATE_RND;
+       mlt_profile profile = mlt_service_profile( MLT_PRODUCER_SERVICE( self->parent ) );
+       int result = self->yuv_colorspace;
 
 #ifdef USE_MMX
        flags |= SWS_CPU_CAPS_MMX;
@@ -1248,6 +1142,10 @@ static void convert_image( producer_avformat self, AVFrame *frame, uint8_t *buff
        flags |= SWS_CPU_CAPS_MMX2;
 #endif
 
+       mlt_log_debug( MLT_PRODUCER_SERVICE(self->parent), "%s @ %dx%d space %d->%d\n",
+               mlt_image_format_name( *format ),
+               width, height, self->yuv_colorspace, profile->colorspace );
+
        // extract alpha from planar formats
        if ( ( pix_fmt == PIX_FMT_YUVA420P
 #if defined(FFUDIV) && LIBAVUTIL_VERSION_INT >= ((51<<16)+(35<<8)+101)
@@ -1270,7 +1168,8 @@ static void convert_image( producer_avformat self, AVFrame *frame, uint8_t *buff
        if ( *format == mlt_image_yuv420p )
        {
                struct SwsContext *context = sws_getContext( width, height, pix_fmt,
-                       width, height, PIX_FMT_YUV420P, flags, NULL, NULL, NULL);
+                       width, height, self->full_luma ? PIX_FMT_YUVJ420P : PIX_FMT_YUV420P,
+                       flags, NULL, NULL, NULL);
                AVPicture output;
                output.data[0] = buffer;
                output.data[1] = buffer + width * height;
@@ -1278,7 +1177,8 @@ static void convert_image( producer_avformat self, AVFrame *frame, uint8_t *buff
                output.linesize[0] = width;
                output.linesize[1] = width >> 1;
                output.linesize[2] = width >> 1;
-               set_luma_transfer( context, self->colorspace, -1 );
+               if ( !set_luma_transfer( context, self->yuv_colorspace, profile->colorspace, self->full_luma ) )
+                       result = profile->colorspace;
                sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
                        output.data, output.linesize);
                sws_freeContext( context );
@@ -1289,7 +1189,7 @@ static void convert_image( producer_avformat self, AVFrame *frame, uint8_t *buff
                        width, height, PIX_FMT_RGB24, flags | SWS_FULL_CHR_H_INT, NULL, NULL, NULL);
                AVPicture output;
                avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
-               set_luma_transfer( context, self->colorspace, self->full_luma );
+               set_luma_transfer( context, self->yuv_colorspace, profile->colorspace, self->full_luma );
                sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
                        output.data, output.linesize);
                sws_freeContext( context );
@@ -1300,7 +1200,7 @@ static void convert_image( producer_avformat self, AVFrame *frame, uint8_t *buff
                        width, height, PIX_FMT_RGBA, flags | SWS_FULL_CHR_H_INT, NULL, NULL, NULL);
                AVPicture output;
                avpicture_fill( &output, buffer, PIX_FMT_RGBA, width, height );
-               set_luma_transfer( context, self->colorspace, self->full_luma );
+               set_luma_transfer( context, self->yuv_colorspace, profile->colorspace, self->full_luma );
                sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
                        output.data, output.linesize);
                sws_freeContext( context );
@@ -1311,11 +1211,13 @@ static void convert_image( producer_avformat self, AVFrame *frame, uint8_t *buff
                        width, height, PIX_FMT_YUYV422, flags | SWS_FULL_CHR_H_INP, NULL, NULL, NULL);
                AVPicture output;
                avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height );
-               set_luma_transfer( context, self->colorspace, -1 );
+               if ( !set_luma_transfer( context, self->yuv_colorspace, profile->colorspace, self->full_luma ) )
+                       result = profile->colorspace;
                sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
                        output.data, output.linesize);
                sws_freeContext( context );
        }
+       return result;
 }
 
 /** Allocate the image buffer and set it on the frame.
@@ -1363,8 +1265,14 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
 
        pthread_mutex_lock( &self->video_mutex );
 
+       uint8_t *alpha = NULL;
+       int got_picture = 0;
+       int image_size = 0;
+
        // Fetch the video format context
        AVFormatContext *context = self->video_format;
+       if ( !context )
+               goto exit_get_image;
 
        // Get the video stream
        AVStream *stream = context->streams[ self->video_index ];
@@ -1372,10 +1280,6 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
        // Get codec context
        AVCodecContext *codec_context = stream->codec;
 
-       uint8_t *alpha = NULL;
-       int got_picture = 0;
-       int image_size = 0;
-
        // Get the image cache
        if ( ! self->image_cache )
        {
@@ -1486,13 +1390,15 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                picture.linesize[0] = codec_context->width;
                                picture.linesize[1] = codec_context->width / 2;
                                picture.linesize[2] = codec_context->width / 2;
-                               convert_image( self, (AVFrame*) &picture, *buffer,
+                               int yuv_colorspace = convert_image( self, (AVFrame*) &picture, *buffer,
                                        PIX_FMT_YUV420P, format, *width, *height, &alpha );
+                               mlt_properties_set_int( frame_properties, "colorspace", yuv_colorspace );
                        }
                        else
 #endif
-                       convert_image( self, self->video_frame, *buffer, codec_context->pix_fmt,
+                       int yuv_colorspace = convert_image( self, self->video_frame, *buffer, codec_context->pix_fmt,
                                format, *width, *height, &alpha );
+                       mlt_properties_set_int( frame_properties, "colorspace", yuv_colorspace );
                        got_picture = 1;
                }
        }
@@ -1531,6 +1437,23 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                                mlt_deque_push_back( self->apackets, tmp );
                                        }
                                }
+                               else if ( ret < 0 )
+                               {
+                                       mlt_log_verbose( MLT_PRODUCER_SERVICE(producer), "av_read_frame returned error %d inside get_image\n", ret );
+                                       if ( !self->seekable && mlt_properties_get_int( properties, "reconnect" ) )
+                                       {
+                                               // Try to reconnect to live sources by closing context and codecs,
+                                               // and letting next call to get_frame() reopen.
+                                               prepare_reopen( self );
+                                               pthread_mutex_unlock( &self->packets_mutex );
+                                               goto exit_get_image;
+                                       }
+                                       if ( !self->seekable && mlt_properties_get_int( properties, "exit_on_disconnect" ) )
+                                       {
+                                               mlt_log_fatal( MLT_PRODUCER_SERVICE(producer), "Exiting with error due to disconnected source.\n" );
+                                               exit( EXIT_FAILURE );
+                                       }
+                               }
                        }
                        pthread_mutex_unlock( &self->packets_mutex );
 
@@ -1588,6 +1511,8 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                        {
                                                if ( ++decode_errors <= 10 )
                                                        ret = 0;
+                                               else
+                                                       mlt_log_warning( MLT_PRODUCER_SERVICE(producer), "video decoding error %d\n", ret );
                                        }
                                        else
                                        {
@@ -1649,8 +1574,9 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                                        VdpStatus status = vdp_surface_get_bits( render->surface, dest_format, planes, pitches );
                                                        if ( status == VDP_STATUS_OK )
                                                        {
-                                                               convert_image( self, self->video_frame, *buffer, PIX_FMT_YUV420P,
+                                                               int yuv_colorspace = convert_image( self, self->video_frame, *buffer, PIX_FMT_YUV420P,
                                                                        format, *width, *height, &alpha );
+                                                               mlt_properties_set_int( frame_properties, "colorspace", yuv_colorspace );
                                                        }
                                                        else
                                                        {
@@ -1666,8 +1592,9 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                        }
                                        else
 #endif
-                                       convert_image( self, self->video_frame, *buffer, codec_context->pix_fmt,
+                                       int yuv_colorspace = convert_image( self, self->video_frame, *buffer, codec_context->pix_fmt,
                                                format, *width, *height, &alpha );
+                                       mlt_properties_set_int( frame_properties, "colorspace", yuv_colorspace );
                                        self->top_field_first |= self->video_frame->top_field_first;
                                        self->current_position = int_position;
                                }
@@ -1715,13 +1642,15 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form
                                picture.linesize[0] = codec_context->width;
                                picture.linesize[1] = codec_context->width / 2;
                                picture.linesize[2] = codec_context->width / 2;
-                               convert_image( self, (AVFrame*) &picture, *buffer,
+                               int yuv_colorspace = convert_image( self, (AVFrame*) &picture, *buffer,
                                        PIX_FMT_YUV420P, format, *width, *height, &alpha );
+                               mlt_properties_set_int( frame_properties, "colorspace", yuv_colorspace );
                        }
                        else
 #endif
-                       convert_image( self, self->video_frame, *buffer, codec_context->pix_fmt,
+                       int yuv_colorspace = convert_image( self, self->video_frame, *buffer, codec_context->pix_fmt,
                                format, *width, *height, &alpha );
+                       mlt_properties_set_int( frame_properties, "colorspace", yuv_colorspace );
                        got_picture = 1;
                }
        }
@@ -1862,95 +1791,107 @@ static int video_codec_init( producer_avformat self, int index, mlt_properties p
                {
                        mlt_properties_set_int( properties, "width", self->video_codec->width );
                        mlt_properties_set_int( properties, "height", self->video_codec->height );
+                       get_aspect_ratio( properties, stream, self->video_codec );
                }
-               // For DV, we'll just use the saved aspect ratio
-               if ( codec_context->codec_id != AV_CODEC_ID_DVVIDEO )
-                       get_aspect_ratio( properties, stream, self->video_codec, NULL );
 
-               // Determine the fps first from the codec
-               double codec_fps = (double) self->video_codec->time_base.den /
-                                                                  ( self->video_codec->time_base.num == 0 ? 1 : self->video_codec->time_base.num );
-               
-               {
-                       // If the muxer reports a frame rate different than the codec
+               // Start with the muxer frame rate.
 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(42<<8)+0)
-                       double muxer_fps = av_q2d( stream->avg_frame_rate );
-#if LIBAVFORMAT_VERSION_MAJOR < 55
-                       if ( isnan( muxer_fps ) || muxer_fps == 0 )
-                               muxer_fps = av_q2d( stream->r_frame_rate );
-#endif
+               AVRational frame_rate = stream->avg_frame_rate;
 #else
-                       double muxer_fps = av_q2d( stream->r_frame_rate );
+               AVRational frame_rate = stream->r_frame_rate;
 #endif
-                       if ( !isnan( muxer_fps ) && muxer_fps > 0 )
-                       {
-#if LIBAVFORMAT_VERSION_MAJOR >= 55
-                               AVRational frame_rate = stream->avg_frame_rate;
-#else
-                               AVRational frame_rate = stream->r_frame_rate;
+               double fps = av_q2d( frame_rate );
+
+#if LIBAVFORMAT_VERSION_MAJOR < 55
+               // Verify and sanitize the muxer frame rate.
+               if ( isnan( fps ) || isinf( fps ) || fps == 0 )
+               {
+                       frame_rate = stream->r_frame_rate;
+                       fps = av_q2d( frame_rate );
+               }
 #endif
-                               // With my samples when r_frame_rate != 1000 but avg_frame_rate is valid,
-                               // avg_frame_rate gives some approximate value that does not well match the media.
-                               // Also, on my sample where r_frame_rate = 1000, using avg_frame_rate directly
-                               // results in some very choppy output, but some value slightly different works
-                               // great.
 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(42<<8)+0) && LIBAVFORMAT_VERSION_MAJOR < 55
-                               if ( av_q2d( stream->r_frame_rate ) >= 1000 && av_q2d( stream->avg_frame_rate ) > 0 )
-                                       frame_rate = av_d2q( av_q2d( stream->avg_frame_rate ), 1024 );
+               // With my samples when r_frame_rate != 1000 but avg_frame_rate is valid,
+               // avg_frame_rate gives some approximate value that does not well match the media.
+               // Also, on my sample where r_frame_rate = 1000, using avg_frame_rate directly
+               // results in some very choppy output, but some value slightly different works
+               // great.
+               if ( av_q2d( stream->r_frame_rate ) >= 1000 && av_q2d( stream->avg_frame_rate ) > 0 )
+               {
+                       frame_rate = av_d2q( av_q2d( stream->avg_frame_rate ), 1024 );
+                       fps = av_q2d( frame_rate );
+               }
 #endif
-                               mlt_properties_set_int( properties, "meta.media.frame_rate_num", frame_rate.num );
-                               mlt_properties_set_int( properties, "meta.media.frame_rate_den", frame_rate.den );
-                       }
-                       else if ( codec_fps >= 1.0 )
-                       {
-                               mlt_properties_set_int( properties, "meta.media.frame_rate_num", self->video_codec->time_base.den );
-                               mlt_properties_set_int( properties, "meta.media.frame_rate_den", self->video_codec->time_base.num == 0 ? 1 : self->video_codec->time_base.num );
-                       }
-                       else
-                       {
-                               AVRational frame_rate = av_d2q( mlt_producer_get_fps( self->parent ), 255 );
-                               mlt_properties_set_int( properties, "meta.media.frame_rate_num", frame_rate.num );
-                               mlt_properties_set_int( properties, "meta.media.frame_rate_den", frame_rate.den );
-                       }
+               // XXX frame rates less than 1 fps are not considered sane
+               if ( isnan( fps ) || isinf( fps ) || fps < 1.0 )
+               {
+                       // Get the frame rate from the codec.
+                       frame_rate.num = self->video_codec->time_base.den;
+                       frame_rate.den = self->video_codec->time_base.num * self->video_codec->ticks_per_frame;
+                       fps = av_q2d( frame_rate );
+               }
+               if ( isnan( fps ) || isinf( fps ) || fps < 1.0 )
+               {
+                       // Use the profile frame rate if all else fails.
+                       mlt_profile profile = mlt_service_profile( MLT_PRODUCER_SERVICE( self->parent ) );
+                       frame_rate.num = profile->frame_rate_num;
+                       frame_rate.den = profile->frame_rate_den;
                }
+
                self->video_time_base = stream->time_base;
                if ( mlt_properties_get( properties, "force_fps" ) )
                {
-                       AVRational fps = av_d2q( mlt_properties_get_double( properties, "force_fps" ), 1024 );
-                       self->video_time_base.num *= mlt_properties_get_int( properties, "meta.media.frame_rate_num" ) * fps.den;
-                       self->video_time_base.den *= mlt_properties_get_int( properties, "meta.media.frame_rate_den" ) * fps.num;
-                       mlt_properties_set_int( properties, "meta.media.frame_rate_num", fps.num );
-                       mlt_properties_set_int( properties, "meta.media.frame_rate_den", fps.den );
+                       AVRational force_fps = av_d2q( mlt_properties_get_double( properties, "force_fps" ), 1024 );
+                       self->video_time_base = av_mul_q( stream->time_base, av_div_q( frame_rate, force_fps ) );
+                       frame_rate = force_fps;
                }
+               mlt_properties_set_int( properties, "meta.media.frame_rate_num", frame_rate.num );
+               mlt_properties_set_int( properties, "meta.media.frame_rate_den", frame_rate.den );
 
                // Set the YUV colorspace from override or detect
-               self->colorspace = mlt_properties_get_int( properties, "force_colorspace" );
-#if LIBAVCODEC_VERSION_INT > ((52<<16)+(28<<8)+0)              
-               if ( ! self->colorspace )
+               self->yuv_colorspace = mlt_properties_get_int( properties, "force_colorspace" );
+#if LIBAVCODEC_VERSION_INT > ((52<<16)+(28<<8)+0)
+               if ( ! self->yuv_colorspace )
                {
                        switch ( self->video_codec->colorspace )
                        {
                        case AVCOL_SPC_SMPTE240M:
-                               self->colorspace = 240;
+                               self->yuv_colorspace = 240;
                                break;
                        case AVCOL_SPC_BT470BG:
                        case AVCOL_SPC_SMPTE170M:
-                               self->colorspace = 601;
+                               self->yuv_colorspace = 601;
                                break;
                        case AVCOL_SPC_BT709:
-                               self->colorspace = 709;
+                               self->yuv_colorspace = 709;
                                break;
                        default:
                                // This is a heuristic Charles Poynton suggests in "Digital Video and HDTV"
-                               self->colorspace = self->video_codec->width * self->video_codec->height > 750000 ? 709 : 601;
+                               self->yuv_colorspace = self->video_codec->width * self->video_codec->height > 750000 ? 709 : 601;
                                break;
                        }
                }
 #endif
                // Let apps get chosen colorspace
-               mlt_properties_set_int( properties, "meta.media.colorspace", self->colorspace );
+               mlt_properties_set_int( properties, "meta.media.colorspace", self->yuv_colorspace );
 
-               self->full_luma = -1;
+               switch ( self->video_codec->color_primaries )
+               {
+               case AVCOL_PRI_BT470BG:
+                       self->color_primaries = 601625;
+                       break;
+               case AVCOL_PRI_SMPTE170M:
+               case AVCOL_PRI_SMPTE240M:
+                       self->color_primaries = 601525;
+                       break;
+               case AVCOL_PRI_BT709:
+               case AVCOL_PRI_UNSPECIFIED:
+               default:
+                       self->color_primaries = 709;
+                       break;
+               }
+
+               self->full_luma = 0;
 #if LIBAVCODEC_VERSION_INT >= ((52<<16)+(72<<8)+2)
                mlt_log_debug( MLT_PRODUCER_SERVICE(self->parent), "color_range %d\n", codec_context->color_range );
                if ( codec_context->color_range == AVCOL_RANGE_JPEG )
@@ -1979,11 +1920,15 @@ static void producer_set_up_video( producer_avformat self, mlt_frame frame )
        // Get the video_index
        int index = mlt_properties_get_int( properties, "video_index" );
 
+       int unlock_needed = 0;
+
        // Reopen the file if necessary
        if ( !context && index > -1 )
        {
+               unlock_needed = 1;
+               pthread_mutex_lock( &self->video_mutex );
                producer_open( self, mlt_service_profile( MLT_PRODUCER_SERVICE(producer) ),
-                       mlt_properties_get( properties, "resource" ), 1 );
+                       mlt_properties_get( properties, "resource" ), 0 );
                context = self->video_format;
        }
 
@@ -2004,7 +1949,7 @@ static void producer_set_up_video( producer_avformat self, mlt_frame frame )
        }
 
        // Update the video properties if the index changed
-       if ( index != self->video_index )
+       if ( context && index > -1 && index != self->video_index )
        {
                // Reset the video properties if the index changed
                self->video_index = index;
@@ -2032,7 +1977,9 @@ static void producer_set_up_video( producer_avformat self, mlt_frame frame )
                mlt_properties_set_int( properties, "meta.media.width", self->video_codec->width );
                mlt_properties_set_int( properties, "meta.media.height", self->video_codec->height );
                mlt_properties_set_double( frame_properties, "aspect_ratio", aspect_ratio );
-               mlt_properties_set_int( frame_properties, "colorspace", self->colorspace );
+               mlt_properties_set_int( frame_properties, "colorspace", self->yuv_colorspace );
+               mlt_properties_set_int( frame_properties, "color_primaries", self->color_primaries );
+               mlt_properties_set_int( frame_properties, "full_luma", self->full_luma );
 
                // Workaround 1088 encodings missing cropping info.
                if ( self->video_codec->height == 1088 && mlt_profile_dar( mlt_service_profile( MLT_PRODUCER_SERVICE( producer ) ) ) == 16.0/9.0 )
@@ -2049,6 +1996,8 @@ static void producer_set_up_video( producer_avformat self, mlt_frame frame )
                // If something failed, use test card image
                mlt_properties_set_int( frame_properties, "test_image", 1 );
        }
+       if ( unlock_needed )
+               pthread_mutex_unlock( &self->video_mutex );
 }
 
 static int seek_audio( producer_avformat self, mlt_position position, double timecode )
@@ -2105,31 +2054,33 @@ static int sample_bytes( AVCodecContext *context )
 #endif
 }
 
-static void planar_to_interleaved( uint8_t *dest, uint8_t *src, int samples, int channels, int bytes_per_sample )
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+static void planar_to_interleaved( uint8_t *dest, AVFrame *src, int samples, int channels, int bytes_per_sample )
 {
        int s, c;
        for ( s = 0; s < samples; s++ )
        {
                for ( c = 0; c < channels; c++ )
                {
-                       memcpy( dest, src + ( c * samples + s ) * bytes_per_sample, bytes_per_sample );
+                       memcpy( dest, &src->data[c][s * bytes_per_sample], bytes_per_sample );
                        dest += bytes_per_sample;
                }
        }
 }
-
-static void planar_to_interleaved2( uint8_t *dest, AVFrame *src, int samples, int channels, int bytes_per_sample )
+#else
+static void planar_to_interleaved( uint8_t *dest, uint8_t *src, int samples, int channels, int bytes_per_sample )
 {
        int s, c;
        for ( s = 0; s < samples; s++ )
        {
                for ( c = 0; c < channels; c++ )
                {
-                       memcpy( dest, &src->data[c][s * bytes_per_sample], bytes_per_sample );
+                       memcpy( dest, src + ( c * samples + s ) * bytes_per_sample, bytes_per_sample );
                        dest += bytes_per_sample;
                }
        }
 }
+#endif
 
 static int decode_audio( producer_avformat self, int *ignore, AVPacket pkt, int channels, int samples, double timecode, double fps )
 {
@@ -2201,7 +2152,7 @@ static int decode_audio( producer_avformat self, int *ignore, AVPacket pkt, int
                        case AV_SAMPLE_FMT_S32P:
                        case AV_SAMPLE_FMT_FLTP:
 #if LIBAVCODEC_VERSION_MAJOR >= 55
-                               planar_to_interleaved2( dest, self->audio_frame, convert_samples, codec_context->channels, sizeof_sample );
+                               planar_to_interleaved( dest, self->audio_frame, convert_samples, codec_context->channels, sizeof_sample );
 #else
                                planar_to_interleaved( dest, decode_buffer, convert_samples, codec_context->channels, sizeof_sample );
 #endif
@@ -2291,6 +2242,8 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
 
        // Fetch the audio_format
        AVFormatContext *context = self->audio_format;
+       if ( !context )
+               goto exit_get_audio;
 
        int sizeof_sample = sizeof( int16_t );
        
@@ -2302,8 +2255,8 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
                index = 0;
                index_max = FFMIN( MAX_AUDIO_STREAMS, context->nb_streams );
                *channels = self->total_channels;
-               *samples = *samples * FFMAX( self->max_frequency, *frequency ) / *frequency;
-               *frequency = FFMAX( self->max_frequency, *frequency );
+               *samples = mlt_sample_calculator( fps, self->max_frequency, position );
+               *frequency = self->max_frequency;
        }
 
        // Initialize the buffers
@@ -2335,10 +2288,9 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
 
                av_init_packet( &pkt );
                
-               // It requested number samples based on requested frame rate.
-               // Do not clean this up with a samples *= ...!
+               // Caller requested number samples based on requested sample rate.
                if ( self->audio_index != INT_MAX )
-                       *samples = *samples * self->audio_codec[ self->audio_index ]->sample_rate / *frequency;
+                       *samples = mlt_sample_calculator( fps, self->audio_codec[ self->audio_index ]->sample_rate, position );
 
                while ( ret >= 0 && !got_audio )
                {
@@ -2381,6 +2333,25 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
                                                mlt_deque_push_back( self->vpackets, tmp );
                                        }
                                }
+                               else if ( ret < 0 )
+                               {
+                                       mlt_producer producer = self->parent;
+                                       mlt_properties properties = MLT_PRODUCER_PROPERTIES( producer );
+                                       mlt_log_verbose( MLT_PRODUCER_SERVICE(producer), "av_read_frame returned error %d inside get_audio\n", ret );
+                                       if ( !self->seekable && mlt_properties_get_int( properties, "reconnect" ) )
+                                       {
+                                               // Try to reconnect to live sources by closing context and codecs,
+                                               // and letting next call to get_frame() reopen.
+                                               prepare_reopen( self );
+                                               pthread_mutex_unlock( &self->packets_mutex );
+                                               goto exit_get_audio;
+                                       }
+                                       if ( !self->seekable && mlt_properties_get_int( properties, "exit_on_disconnect" ) )
+                                       {
+                                               mlt_log_fatal( MLT_PRODUCER_SERVICE(producer), "Exiting with error due to disconnected source.\n" );
+                                               exit( EXIT_FAILURE );
+                                       }
+                               }
                        }
                        pthread_mutex_unlock( &self->packets_mutex );
 
@@ -2478,6 +2449,7 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format
        }
        else
        {
+exit_get_audio:
                // Get silence and don't touch the context
                mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples );
        }
@@ -2611,14 +2583,11 @@ static void producer_set_up_audio( producer_avformat self, mlt_frame frame )
                                audio_codec_init( self, index, properties );
                }
        }
-       else if ( context && index > -1 && audio_codec_init( self, index, properties ) )
+       else if ( context && index > -1 && index < MAX_AUDIO_STREAMS &&
+               audio_codec_init( self, index, properties ) )
        {
-               // Set the frame properties
-               if ( index < MAX_AUDIO_STREAMS )
-               {
-                       mlt_properties_set_int( frame_properties, "audio_frequency", self->audio_codec[ index ]->sample_rate );
-                       mlt_properties_set_int( frame_properties, "audio_channels", self->audio_codec[ index ]->channels );
-               }
+               mlt_properties_set_int( frame_properties, "audio_frequency", self->audio_codec[ index ]->sample_rate );
+               mlt_properties_set_int( frame_properties, "audio_channels", self->audio_codec[ index ]->channels );
        }
        if ( context && index > -1 )
        {
@@ -2688,7 +2657,8 @@ static void producer_avformat_close( producer_avformat self )
        av_free_packet( &self->pkt );
        av_free( self->video_frame );
        av_free( self->audio_frame );
-       pthread_mutex_lock( &self->open_mutex );
+       if ( self->is_mutex_init )
+               pthread_mutex_lock( &self->open_mutex );
        int i;
        for ( i = 0; i < MAX_AUDIO_STREAMS; i++ )
        {
@@ -2717,7 +2687,8 @@ static void producer_avformat_close( producer_avformat self )
        if ( self->video_format )
                av_close_input_file( self->video_format );
 #endif
-       pthread_mutex_unlock( &self->open_mutex );
+       if ( self->is_mutex_init )
+               pthread_mutex_unlock( &self->open_mutex );
 #ifdef VDPAU
        vdpau_producer_close( self );
 #endif