X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=src%2Fmodules%2Favformat%2Fproducer_avformat.c;h=be1cc06e07e47b23c7fe4c0de32a437e04e89da8;hb=4fd6c14958ac5bd739c8d2b66b86cbf99ae68c23;hp=89d59b7ea39434b72f30eed0301d461a13db2034;hpb=7601aa4468f96fb8e3395266b194e7e7bd095453;p=mlt diff --git a/src/modules/avformat/producer_avformat.c b/src/modules/avformat/producer_avformat.c index 89d59b7e..be1cc06e 100644 --- a/src/modules/avformat/producer_avformat.c +++ b/src/modules/avformat/producer_avformat.c @@ -1,6 +1,6 @@ /* * producer_avformat.c -- avformat producer - * Copyright (C) 2003-2012 Ushodaya Enterprises Limited + * Copyright (C) 2003-2014 Ushodaya Enterprises Limited * Author: Charles Yates * Author: Dan Dennedy * Much code borrowed from ffmpeg.c: Copyright (c) 2000-2003 Fabrice Bellard @@ -102,7 +102,7 @@ struct producer_avformat_s unsigned int invalid_pts_counter; unsigned int invalid_dts_counter; mlt_cache image_cache; - int colorspace; + int yuv_colorspace, color_primaries; int full_luma; pthread_mutex_t video_mutex; pthread_mutex_t audio_mutex; @@ -575,10 +575,6 @@ static int get_basic_info( producer_avformat self, mlt_profile profile, const ch AVFormatContext *format = self->video_format; - // We will treat everything with the producer fps. - // TODO: make this more flexible. - double fps = mlt_profile_fps( profile ); - // Get the duration if ( !mlt_properties_get_int( properties, "_length_computed" ) ) { @@ -587,7 +583,9 @@ static int get_basic_info( producer_avformat self, mlt_profile profile, const ch if ( format->duration != AV_NOPTS_VALUE ) { // This isn't going to be accurate for all formats - mlt_position frames = ( mlt_position )( ( ( double )format->duration / ( double )AV_TIME_BASE ) * fps ); + // We will treat everything with the producer fps. + mlt_position frames = ( mlt_position )( int )( format->duration * + profile->frame_rate_num / profile->frame_rate_den / AV_TIME_BASE); mlt_properties_set_position( properties, "out", frames - 1 ); mlt_properties_set_position( properties, "length", frames ); mlt_properties_set_int( properties, "_length_computed", 1 ); @@ -662,17 +660,18 @@ static int producer_open( producer_avformat self, mlt_profile profile, const cha int error = 0; mlt_properties properties = MLT_PRODUCER_PROPERTIES( self->parent ); + if ( !self->is_mutex_init ) + { + pthread_mutex_init( &self->audio_mutex, NULL ); + pthread_mutex_init( &self->video_mutex, NULL ); + pthread_mutex_init( &self->packets_mutex, NULL ); + pthread_mutex_init( &self->open_mutex, NULL ); + self->is_mutex_init = 1; + } + // Lock the service if ( take_lock ) { - if ( !self->is_mutex_init ) - { - pthread_mutex_init( &self->audio_mutex, NULL ); - pthread_mutex_init( &self->video_mutex, NULL ); - pthread_mutex_init( &self->packets_mutex, NULL ); - pthread_mutex_init( &self->open_mutex, NULL ); - self->is_mutex_init = 1; - } pthread_mutex_lock( &self->audio_mutex ); pthread_mutex_lock( &self->video_mutex ); } @@ -1027,40 +1026,50 @@ static void get_audio_streams_info( producer_avformat self ) self->audio_streams, self->audio_max_stream, self->total_channels, self->max_channel ); } -static void set_luma_transfer( struct SwsContext *context, int colorspace, int use_full_range ) +static int set_luma_transfer( struct SwsContext *context, int src_colorspace, int dst_colorspace, int full_range ) { - int *coefficients; - const int *new_coefficients; - int full_range; - int brightness, contrast, saturation; - - if ( sws_getColorspaceDetails( context, &coefficients, &full_range, &coefficients, &full_range, - &brightness, &contrast, &saturation ) != -1 ) - { - // Don't change these from defaults unless explicitly told to. - if ( use_full_range >= 0 ) - full_range = use_full_range; - switch ( colorspace ) - { - case 170: - case 470: - case 601: - case 624: - new_coefficients = sws_getCoefficients( SWS_CS_ITU601 ); - break; - case 240: - new_coefficients = sws_getCoefficients( SWS_CS_SMPTE240M ); - break; - case 709: - new_coefficients = sws_getCoefficients( SWS_CS_ITU709 ); - break; - default: - new_coefficients = coefficients; - break; - } - sws_setColorspaceDetails( context, new_coefficients, full_range, new_coefficients, full_range, - brightness, contrast, saturation ); + const int *src_coefficients = sws_getCoefficients( SWS_CS_DEFAULT ); + const int *dst_coefficients = sws_getCoefficients( SWS_CS_DEFAULT ); + int brightness = 0; + int contrast = 1 << 16; + int saturation = 1 << 16; + + switch ( src_colorspace ) + { + case 170: + case 470: + case 601: + case 624: + src_coefficients = sws_getCoefficients( SWS_CS_ITU601 ); + break; + case 240: + src_coefficients = sws_getCoefficients( SWS_CS_SMPTE240M ); + break; + case 709: + src_coefficients = sws_getCoefficients( SWS_CS_ITU709 ); + break; + default: + break; + } + switch ( dst_colorspace ) + { + case 170: + case 470: + case 601: + case 624: + src_coefficients = sws_getCoefficients( SWS_CS_ITU601 ); + break; + case 240: + src_coefficients = sws_getCoefficients( SWS_CS_SMPTE240M ); + break; + case 709: + src_coefficients = sws_getCoefficients( SWS_CS_ITU709 ); + break; + default: + break; } + return sws_setColorspaceDetails( context, src_coefficients, full_range, dst_coefficients, full_range, + brightness, contrast, saturation ); } static mlt_image_format pick_pix_format( enum PixelFormat pix_fmt ) @@ -1118,10 +1127,13 @@ static mlt_audio_format pick_audio_format( int sample_fmt ) } } -static void convert_image( producer_avformat self, AVFrame *frame, uint8_t *buffer, int pix_fmt, +// returns resulting YUV colorspace +static int convert_image( producer_avformat self, AVFrame *frame, uint8_t *buffer, int pix_fmt, mlt_image_format *format, int width, int height, uint8_t **alpha ) { int flags = SWS_BICUBIC | SWS_ACCURATE_RND; + mlt_profile profile = mlt_service_profile( MLT_PRODUCER_SERVICE( self->parent ) ); + int result = self->yuv_colorspace; #ifdef USE_MMX flags |= SWS_CPU_CAPS_MMX; @@ -1130,6 +1142,10 @@ static void convert_image( producer_avformat self, AVFrame *frame, uint8_t *buff flags |= SWS_CPU_CAPS_MMX2; #endif + mlt_log_debug( MLT_PRODUCER_SERVICE(self->parent), "%s @ %dx%d space %d->%d\n", + mlt_image_format_name( *format ), + width, height, self->yuv_colorspace, profile->colorspace ); + // extract alpha from planar formats if ( ( pix_fmt == PIX_FMT_YUVA420P #if defined(FFUDIV) && LIBAVUTIL_VERSION_INT >= ((51<<16)+(35<<8)+101) @@ -1152,7 +1168,8 @@ static void convert_image( producer_avformat self, AVFrame *frame, uint8_t *buff if ( *format == mlt_image_yuv420p ) { struct SwsContext *context = sws_getContext( width, height, pix_fmt, - width, height, PIX_FMT_YUV420P, flags, NULL, NULL, NULL); + width, height, self->full_luma ? PIX_FMT_YUVJ420P : PIX_FMT_YUV420P, + flags, NULL, NULL, NULL); AVPicture output; output.data[0] = buffer; output.data[1] = buffer + width * height; @@ -1160,7 +1177,8 @@ static void convert_image( producer_avformat self, AVFrame *frame, uint8_t *buff output.linesize[0] = width; output.linesize[1] = width >> 1; output.linesize[2] = width >> 1; - set_luma_transfer( context, self->colorspace, -1 ); + if ( !set_luma_transfer( context, self->yuv_colorspace, profile->colorspace, self->full_luma ) ) + result = profile->colorspace; sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height, output.data, output.linesize); sws_freeContext( context ); @@ -1171,7 +1189,8 @@ static void convert_image( producer_avformat self, AVFrame *frame, uint8_t *buff width, height, PIX_FMT_RGB24, flags | SWS_FULL_CHR_H_INT, NULL, NULL, NULL); AVPicture output; avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height ); - set_luma_transfer( context, self->colorspace, self->full_luma ); + // libswscale wants the RGB colorspace to be SWS_CS_DEFAULT, which is = SWS_CS_ITU601. + set_luma_transfer( context, self->yuv_colorspace, 601, self->full_luma ); sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height, output.data, output.linesize); sws_freeContext( context ); @@ -1182,7 +1201,8 @@ static void convert_image( producer_avformat self, AVFrame *frame, uint8_t *buff width, height, PIX_FMT_RGBA, flags | SWS_FULL_CHR_H_INT, NULL, NULL, NULL); AVPicture output; avpicture_fill( &output, buffer, PIX_FMT_RGBA, width, height ); - set_luma_transfer( context, self->colorspace, self->full_luma ); + // libswscale wants the RGB colorspace to be SWS_CS_DEFAULT, which is = SWS_CS_ITU601. + set_luma_transfer( context, self->yuv_colorspace, 601, self->full_luma ); sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height, output.data, output.linesize); sws_freeContext( context ); @@ -1193,11 +1213,13 @@ static void convert_image( producer_avformat self, AVFrame *frame, uint8_t *buff width, height, PIX_FMT_YUYV422, flags | SWS_FULL_CHR_H_INP, NULL, NULL, NULL); AVPicture output; avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height ); - set_luma_transfer( context, self->colorspace, -1 ); + if ( !set_luma_transfer( context, self->yuv_colorspace, profile->colorspace, self->full_luma ) ) + result = profile->colorspace; sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height, output.data, output.linesize); sws_freeContext( context ); } + return result; } /** Allocate the image buffer and set it on the frame. @@ -1370,13 +1392,15 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form picture.linesize[0] = codec_context->width; picture.linesize[1] = codec_context->width / 2; picture.linesize[2] = codec_context->width / 2; - convert_image( self, (AVFrame*) &picture, *buffer, + int yuv_colorspace = convert_image( self, (AVFrame*) &picture, *buffer, PIX_FMT_YUV420P, format, *width, *height, &alpha ); + mlt_properties_set_int( frame_properties, "colorspace", yuv_colorspace ); } else #endif - convert_image( self, self->video_frame, *buffer, codec_context->pix_fmt, + int yuv_colorspace = convert_image( self, self->video_frame, *buffer, codec_context->pix_fmt, format, *width, *height, &alpha ); + mlt_properties_set_int( frame_properties, "colorspace", yuv_colorspace ); got_picture = 1; } } @@ -1552,8 +1576,9 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form VdpStatus status = vdp_surface_get_bits( render->surface, dest_format, planes, pitches ); if ( status == VDP_STATUS_OK ) { - convert_image( self, self->video_frame, *buffer, PIX_FMT_YUV420P, + int yuv_colorspace = convert_image( self, self->video_frame, *buffer, PIX_FMT_YUV420P, format, *width, *height, &alpha ); + mlt_properties_set_int( frame_properties, "colorspace", yuv_colorspace ); } else { @@ -1569,8 +1594,9 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form } else #endif - convert_image( self, self->video_frame, *buffer, codec_context->pix_fmt, + int yuv_colorspace = convert_image( self, self->video_frame, *buffer, codec_context->pix_fmt, format, *width, *height, &alpha ); + mlt_properties_set_int( frame_properties, "colorspace", yuv_colorspace ); self->top_field_first |= self->video_frame->top_field_first; self->current_position = int_position; } @@ -1618,13 +1644,15 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form picture.linesize[0] = codec_context->width; picture.linesize[1] = codec_context->width / 2; picture.linesize[2] = codec_context->width / 2; - convert_image( self, (AVFrame*) &picture, *buffer, + int yuv_colorspace = convert_image( self, (AVFrame*) &picture, *buffer, PIX_FMT_YUV420P, format, *width, *height, &alpha ); + mlt_properties_set_int( frame_properties, "colorspace", yuv_colorspace ); } else #endif - convert_image( self, self->video_frame, *buffer, codec_context->pix_fmt, + int yuv_colorspace = convert_image( self, self->video_frame, *buffer, codec_context->pix_fmt, format, *width, *height, &alpha ); + mlt_properties_set_int( frame_properties, "colorspace", yuv_colorspace ); got_picture = 1; } } @@ -1823,33 +1851,49 @@ static int video_codec_init( producer_avformat self, int index, mlt_properties p mlt_properties_set_int( properties, "meta.media.frame_rate_den", frame_rate.den ); // Set the YUV colorspace from override or detect - self->colorspace = mlt_properties_get_int( properties, "force_colorspace" ); + self->yuv_colorspace = mlt_properties_get_int( properties, "force_colorspace" ); #if LIBAVCODEC_VERSION_INT > ((52<<16)+(28<<8)+0) - if ( ! self->colorspace ) + if ( ! self->yuv_colorspace ) { switch ( self->video_codec->colorspace ) { case AVCOL_SPC_SMPTE240M: - self->colorspace = 240; + self->yuv_colorspace = 240; break; case AVCOL_SPC_BT470BG: case AVCOL_SPC_SMPTE170M: - self->colorspace = 601; + self->yuv_colorspace = 601; break; case AVCOL_SPC_BT709: - self->colorspace = 709; + self->yuv_colorspace = 709; break; default: // This is a heuristic Charles Poynton suggests in "Digital Video and HDTV" - self->colorspace = self->video_codec->width * self->video_codec->height > 750000 ? 709 : 601; + self->yuv_colorspace = self->video_codec->width * self->video_codec->height > 750000 ? 709 : 601; break; } } #endif // Let apps get chosen colorspace - mlt_properties_set_int( properties, "meta.media.colorspace", self->colorspace ); + mlt_properties_set_int( properties, "meta.media.colorspace", self->yuv_colorspace ); - self->full_luma = -1; + switch ( self->video_codec->color_primaries ) + { + case AVCOL_PRI_BT470BG: + self->color_primaries = 601625; + break; + case AVCOL_PRI_SMPTE170M: + case AVCOL_PRI_SMPTE240M: + self->color_primaries = 601525; + break; + case AVCOL_PRI_BT709: + case AVCOL_PRI_UNSPECIFIED: + default: + self->color_primaries = 709; + break; + } + + self->full_luma = 0; #if LIBAVCODEC_VERSION_INT >= ((52<<16)+(72<<8)+2) mlt_log_debug( MLT_PRODUCER_SERVICE(self->parent), "color_range %d\n", codec_context->color_range ); if ( codec_context->color_range == AVCOL_RANGE_JPEG ) @@ -1907,7 +1951,7 @@ static void producer_set_up_video( producer_avformat self, mlt_frame frame ) } // Update the video properties if the index changed - if ( index != self->video_index ) + if ( context && index > -1 && index != self->video_index ) { // Reset the video properties if the index changed self->video_index = index; @@ -1935,7 +1979,9 @@ static void producer_set_up_video( producer_avformat self, mlt_frame frame ) mlt_properties_set_int( properties, "meta.media.width", self->video_codec->width ); mlt_properties_set_int( properties, "meta.media.height", self->video_codec->height ); mlt_properties_set_double( frame_properties, "aspect_ratio", aspect_ratio ); - mlt_properties_set_int( frame_properties, "colorspace", self->colorspace ); + mlt_properties_set_int( frame_properties, "colorspace", self->yuv_colorspace ); + mlt_properties_set_int( frame_properties, "color_primaries", self->color_primaries ); + mlt_properties_set_int( frame_properties, "full_luma", self->full_luma ); // Workaround 1088 encodings missing cropping info. if ( self->video_codec->height == 1088 && mlt_profile_dar( mlt_service_profile( MLT_PRODUCER_SERVICE( producer ) ) ) == 16.0/9.0 ) @@ -2010,31 +2056,33 @@ static int sample_bytes( AVCodecContext *context ) #endif } -static void planar_to_interleaved( uint8_t *dest, uint8_t *src, int samples, int channels, int bytes_per_sample ) +#if LIBAVCODEC_VERSION_MAJOR >= 55 +static void planar_to_interleaved( uint8_t *dest, AVFrame *src, int samples, int channels, int bytes_per_sample ) { int s, c; for ( s = 0; s < samples; s++ ) { for ( c = 0; c < channels; c++ ) { - memcpy( dest, src + ( c * samples + s ) * bytes_per_sample, bytes_per_sample ); + memcpy( dest, &src->data[c][s * bytes_per_sample], bytes_per_sample ); dest += bytes_per_sample; } } } - -static void planar_to_interleaved2( uint8_t *dest, AVFrame *src, int samples, int channels, int bytes_per_sample ) +#else +static void planar_to_interleaved( uint8_t *dest, uint8_t *src, int samples, int channels, int bytes_per_sample ) { int s, c; for ( s = 0; s < samples; s++ ) { for ( c = 0; c < channels; c++ ) { - memcpy( dest, &src->data[c][s * bytes_per_sample], bytes_per_sample ); + memcpy( dest, src + ( c * samples + s ) * bytes_per_sample, bytes_per_sample ); dest += bytes_per_sample; } } } +#endif static int decode_audio( producer_avformat self, int *ignore, AVPacket pkt, int channels, int samples, double timecode, double fps ) { @@ -2106,7 +2154,7 @@ static int decode_audio( producer_avformat self, int *ignore, AVPacket pkt, int case AV_SAMPLE_FMT_S32P: case AV_SAMPLE_FMT_FLTP: #if LIBAVCODEC_VERSION_MAJOR >= 55 - planar_to_interleaved2( dest, self->audio_frame, convert_samples, codec_context->channels, sizeof_sample ); + planar_to_interleaved( dest, self->audio_frame, convert_samples, codec_context->channels, sizeof_sample ); #else planar_to_interleaved( dest, decode_buffer, convert_samples, codec_context->channels, sizeof_sample ); #endif @@ -2209,8 +2257,8 @@ static int producer_get_audio( mlt_frame frame, void **buffer, mlt_audio_format index = 0; index_max = FFMIN( MAX_AUDIO_STREAMS, context->nb_streams ); *channels = self->total_channels; - *samples = mlt_sample_calculator( fps, FFMAX( self->max_frequency, *frequency ), position ); - *frequency = FFMAX( self->max_frequency, *frequency ); + *samples = mlt_sample_calculator( fps, self->max_frequency, position ); + *frequency = self->max_frequency; } // Initialize the buffers @@ -2537,14 +2585,11 @@ static void producer_set_up_audio( producer_avformat self, mlt_frame frame ) audio_codec_init( self, index, properties ); } } - else if ( context && index > -1 && audio_codec_init( self, index, properties ) ) + else if ( context && index > -1 && index < MAX_AUDIO_STREAMS && + audio_codec_init( self, index, properties ) ) { - // Set the frame properties - if ( index < MAX_AUDIO_STREAMS ) - { - mlt_properties_set_int( frame_properties, "audio_frequency", self->audio_codec[ index ]->sample_rate ); - mlt_properties_set_int( frame_properties, "audio_channels", self->audio_codec[ index ]->channels ); - } + mlt_properties_set_int( frame_properties, "audio_frequency", self->audio_codec[ index ]->sample_rate ); + mlt_properties_set_int( frame_properties, "audio_channels", self->audio_codec[ index ]->channels ); } if ( context && index > -1 ) { @@ -2614,7 +2659,8 @@ static void producer_avformat_close( producer_avformat self ) av_free_packet( &self->pkt ); av_free( self->video_frame ); av_free( self->audio_frame ); - pthread_mutex_lock( &self->open_mutex ); + if ( self->is_mutex_init ) + pthread_mutex_lock( &self->open_mutex ); int i; for ( i = 0; i < MAX_AUDIO_STREAMS; i++ ) { @@ -2643,7 +2689,8 @@ static void producer_avformat_close( producer_avformat self ) if ( self->video_format ) av_close_input_file( self->video_format ); #endif - pthread_mutex_unlock( &self->open_mutex ); + if ( self->is_mutex_init ) + pthread_mutex_unlock( &self->open_mutex ); #ifdef VDPAU vdpau_producer_close( self ); #endif