X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=src%2Fmodules%2Favformat%2Fproducer_avformat.c;h=d42a307ee0f088c87386936bb7dda699703cae79;hb=2745c963dac1dfb56ea2eae8da5a3326d50875c3;hp=f72d9fa7f4bbe99b7b01321ed38b8bf8d82047fa;hpb=21209debb562b399069257566d765c45ea7180b0;p=mlt diff --git a/src/modules/avformat/producer_avformat.c b/src/modules/avformat/producer_avformat.c index f72d9fa7..d42a307e 100644 --- a/src/modules/avformat/producer_avformat.c +++ b/src/modules/avformat/producer_avformat.c @@ -2,30 +2,32 @@ * producer_avformat.c -- avformat producer * Copyright (C) 2003-2004 Ushodaya Enterprises Limited * Author: Charles Yates + * Much code borrowed from ffmpeg.c: Copyright (c) 2000-2003 Fabrice Bellard * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. * - * This program is distributed in the hope that it will be useful, + * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, - * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -// Local header files -#include "producer_avformat.h" - // MLT Header files +#include #include // ffmpeg Header files #include +#ifdef SWSCALE +#include +#endif // System header files #include @@ -37,13 +39,13 @@ void avformat_lock( ); void avformat_unlock( ); // Forward references. -static int producer_open( mlt_producer this, char *file ); +static int producer_open( mlt_producer this, mlt_profile profile, char *file ); static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index ); /** Constructor for libavformat. */ -mlt_producer producer_avformat_init( char *file ) +mlt_producer producer_avformat_init( mlt_profile profile, char *file ) { mlt_producer this = NULL; @@ -57,7 +59,7 @@ mlt_producer producer_avformat_init( char *file ) if ( mlt_producer_init( this, NULL ) == 0 ) { // Get the properties - mlt_properties properties = mlt_producer_properties( this ); + mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // Set the resource property (required for all producers) mlt_properties_set( properties, "resource", file ); @@ -66,7 +68,7 @@ mlt_producer producer_avformat_init( char *file ) this->get_frame = producer_get_frame; // Open the file - if ( producer_open( this, file ) != 0 ) + if ( producer_open( this, profile, file ) != 0 ) { // Clean up mlt_producer_close( this ); @@ -89,10 +91,13 @@ static void find_default_streams( AVFormatContext *context, int *audio_index, in for( i = 0; i < context->nb_streams; i++ ) { // Get the codec context - AVCodecContext *codec_context = &context->streams[ i ]->codec; + AVCodecContext *codec_context = context->streams[ i ]->codec; + + if ( avcodec_find_decoder( codec_context->codec_id ) == NULL ) + continue; // Determine the type and obtain the first index of each type - switch( codec_context->codec_type ) + switch( codec_context->codec_type ) { case CODEC_TYPE_VIDEO: if ( *video_index < 0 ) @@ -101,9 +106,9 @@ static void find_default_streams( AVFormatContext *context, int *audio_index, in case CODEC_TYPE_AUDIO: if ( *audio_index < 0 ) *audio_index = i; - break; + break; default: - break; + break; } } } @@ -147,7 +152,7 @@ static void producer_codec_close( void *codec ) /** Open the file. */ -static int producer_open( mlt_producer this, char *file ) +static int producer_open( mlt_producer this, mlt_profile profile, char *file ) { // Return an error code (0 == no error) int error = 0; @@ -156,10 +161,10 @@ static int producer_open( mlt_producer this, char *file ) AVFormatContext *context = NULL; // Get the properties - mlt_properties properties = mlt_producer_properties( this ); + mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // We will treat everything with the producer fps - double fps = mlt_properties_get_double( properties, "fps" ); + double fps = mlt_profile_fps( profile ); // Lock the mutex now avformat_lock( ); @@ -173,6 +178,9 @@ static int producer_open( mlt_producer this, char *file ) // AV option (0 = both, 1 = video, 2 = audio) int av = 0; + // Setting lowest log level + av_log_set_level( -1 ); + // Only if there is not a protocol specification that avformat can handle if ( mrl && !url_exist( file ) ) { @@ -193,13 +201,14 @@ static int producer_open( mlt_producer this, char *file ) // These are required by video4linux (defaults) params->width = 640; params->height = 480; - params->frame_rate = 25; - params->frame_rate_base = 1; - params->device = file; + params->time_base= (AVRational){1,25}; + // params->device = file; params->channels = 2; params->sample_rate = 48000; } + // XXX: this does not work anymore since avdevice + // TODO: make producer_avddevice? // Parse out params mrl = strchr( file, '?' ); while ( mrl ) @@ -215,9 +224,9 @@ static int producer_open( mlt_producer this, char *file ) if ( t ) t[0] = 0; if ( !strcmp( name, "frame_rate" ) ) - params->frame_rate = atoi( value ); + params->time_base.den = atoi( value ); else if ( !strcmp( name, "frame_rate_base" ) ) - params->frame_rate_base = atoi( value ); + params->time_base.num = atoi( value ); else if ( !strcmp( name, "sample_rate" ) ) params->sample_rate = atoi( value ); else if ( !strcmp( name, "channels" ) ) @@ -240,8 +249,7 @@ static int producer_open( mlt_producer this, char *file ) } // Now attempt to open the file - error = av_open_input_file( &context, file, format, 0, params ); - error = error < 0; + error = av_open_input_file( &context, file, format, 0, params ) < 0; // Cleanup AVFormatParameters free( standard ); @@ -259,32 +267,66 @@ static int producer_open( mlt_producer this, char *file ) // We will default to the first audio and video streams found int audio_index = -1; int video_index = -1; + int av_bypass = 0; // Now set properties where we can (use default unknowns if required) if ( context->duration != AV_NOPTS_VALUE ) { // This isn't going to be accurate for all formats - mlt_position frames = ( mlt_position )( ( ( double )context->duration / ( double )AV_TIME_BASE ) * fps ); - mlt_properties_set_position( properties, "out", frames - 2 ); - mlt_properties_set_position( properties, "length", frames - 1 ); + mlt_position frames = ( mlt_position )( ( ( double )context->duration / ( double )AV_TIME_BASE ) * fps + 0.5 ); + mlt_properties_set_position( properties, "out", frames - 1 ); + mlt_properties_set_position( properties, "length", frames ); } // Find default audio and video streams find_default_streams( context, &audio_index, &video_index ); - if ( context->start_time != AV_NOPTS_VALUE ) - mlt_properties_set_double( properties, "start_time", context->start_time ); + if ( context->start_time != AV_NOPTS_VALUE ) + mlt_properties_set_double( properties, "_start_time", context->start_time ); // Check if we're seekable (something funny about mpeg here :-/) - if ( strcmp( file, "pipe:" ) ) - mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ) ) >= 0 ); + if ( strcmp( file, "pipe:" ) && strncmp( file, "http://", 6 ) ) + { + mlt_properties_set_int( properties, "seekable", av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ), AVSEEK_FLAG_BACKWARD ) >= 0 ); + mlt_properties_set_data( properties, "dummy_context", context, 0, producer_file_close, NULL ); + av_open_input_file( &context, file, NULL, 0, NULL ); + av_find_stream_info( context ); + } + else + av_bypass = 1; // Store selected audio and video indexes on properties mlt_properties_set_int( properties, "audio_index", audio_index ); mlt_properties_set_int( properties, "video_index", video_index ); + mlt_properties_set_int( properties, "_last_position", -1 ); + + // Fetch the width, height and aspect ratio + if ( video_index != -1 ) + { + AVCodecContext *codec_context = context->streams[ video_index ]->codec; + mlt_properties_set_int( properties, "width", codec_context->width ); + mlt_properties_set_int( properties, "height", codec_context->height ); + mlt_properties_set_double( properties, "aspect_ratio", av_q2d( codec_context->sample_aspect_ratio ) ); + } + + // Read Metadata + if (context->title != NULL) + mlt_properties_set(properties, "meta.attr.title.markup", context->title ); + if (context->author != NULL) + mlt_properties_set(properties, "meta.attr.author.markup", context->author ); + if (context->copyright != NULL) + mlt_properties_set(properties, "meta.attr.copyright.markup", context->copyright ); + if (context->comment != NULL) + mlt_properties_set(properties, "meta.attr.comment.markup", context->comment ); + if (context->album != NULL) + mlt_properties_set(properties, "meta.attr.album.markup", context->album ); + if (context->year != 0) + mlt_properties_set_int(properties, "meta.attr.year.markup", context->year ); + if (context->track != 0) + mlt_properties_set_int(properties, "meta.attr.track.markup", context->track ); // We're going to cheat here - for a/v files, we will have two contexts (reasoning will be clear later) - if ( av == 0 && strcmp( file, "pipe:" ) && audio_index != -1 && video_index != -1 ) + if ( av == 0 && !av_bypass && audio_index != -1 && video_index != -1 ) { // We'll use the open one as our video_context mlt_properties_set_data( properties, "video_context", context, 0, producer_file_close, NULL ); @@ -311,6 +353,8 @@ static int producer_open( mlt_producer this, char *file ) // Something has gone wrong error = -1; } + + mlt_properties_set_int( properties, "av_bypass", av_bypass ); } } @@ -325,63 +369,49 @@ static int producer_open( mlt_producer this, char *file ) static double producer_time_of_frame( mlt_producer this, mlt_position position ) { - // Get the properties - mlt_properties properties = mlt_producer_properties( this ); - - // Obtain the fps - double fps = mlt_properties_get_double( properties, "fps" ); - - // Do the calc - return ( double )position / fps; + return ( double )position / mlt_producer_get_fps( this ); } static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt, mlt_image_format format, int width, int height ) { - // EXPERIMENTAL IMAGE NORMALISATIONS - if ( pix_fmt == PIX_FMT_YUV420P && format == mlt_image_yuv422 ) +#ifdef SWSCALE + if ( format == mlt_image_yuv420p ) { - register int i, j; - register int half = width >> 1; - register uint8_t *Y = ( ( AVPicture * )frame )->data[ 0 ]; - register uint8_t *U = ( ( AVPicture * )frame )->data[ 1 ]; - register uint8_t *V = ( ( AVPicture * )frame )->data[ 2 ]; - register uint8_t *d = buffer; - register uint8_t *y, *u, *v; - - i = height >> 1; - while ( i -- ) - { - y = Y; - u = U; - v = V; - j = half; - while ( j -- ) - { - *d ++ = *y ++; - *d ++ = *u ++; - *d ++ = *y ++; - *d ++ = *v ++; - } - - Y += ( ( AVPicture * )frame )->linesize[ 0 ]; - y = Y; - u = U; - v = V; - j = half; - while ( j -- ) - { - *d ++ = *y ++; - *d ++ = *u ++; - *d ++ = *y ++; - *d ++ = *v ++; - } - - Y += ( ( AVPicture * )frame )->linesize[ 0 ]; - U += ( ( AVPicture * )frame )->linesize[ 1 ]; - V += ( ( AVPicture * )frame )->linesize[ 2 ]; - } + struct SwsContext *context = sws_getContext( width, height, pix_fmt, + width, height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL); + AVPicture output; + output.data[0] = buffer; + output.data[1] = buffer + width * height; + output.data[2] = buffer + ( 3 * width * height ) / 2; + output.linesize[0] = width; + output.linesize[1] = width >> 1; + output.linesize[2] = width >> 1; + sws_scale( context, frame->data, frame->linesize, 0, height, + output.data, output.linesize); + sws_freeContext( context ); } - else if ( format == mlt_image_yuv420p ) + else if ( format == mlt_image_rgb24 ) + { + struct SwsContext *context = sws_getContext( width, height, pix_fmt, + width, height, PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL); + AVPicture output; + avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height ); + sws_scale( context, frame->data, frame->linesize, 0, height, + output.data, output.linesize); + sws_freeContext( context ); + } + else + { + struct SwsContext *context = sws_getContext( width, height, pix_fmt, + width, height, PIX_FMT_YUYV422, SWS_FAST_BILINEAR, NULL, NULL, NULL); + AVPicture output; + avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height ); + sws_scale( context, frame->data, frame->linesize, 0, height, + output.data, output.linesize); + sws_freeContext( context ); + } +#else + if ( format == mlt_image_yuv420p ) { AVPicture pict; pict.data[0] = buffer; @@ -392,12 +422,19 @@ static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt, pict.linesize[2] = width >> 1; img_convert( &pict, PIX_FMT_YUV420P, (AVPicture *)frame, pix_fmt, width, height ); } + else if ( format == mlt_image_rgb24 ) + { + AVPicture output; + avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height ); + img_convert( &output, PIX_FMT_RGB24, (AVPicture *)frame, pix_fmt, width, height ); + } else { AVPicture output; avpicture_fill( &output, buffer, PIX_FMT_YUV422, width, height ); img_convert( &output, PIX_FMT_YUV422, (AVPicture *)frame, pix_fmt, width, height ); } +#endif } /** Get an image from a frame. @@ -406,7 +443,7 @@ static inline void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt, static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_format *format, int *width, int *height, int writable ) { // Get the properties from the frame - mlt_properties frame_properties = mlt_frame_properties( frame ); + mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame ); // Obtain the frame number of this frame mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" ); @@ -415,7 +452,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form mlt_producer this = mlt_properties_get_data( frame_properties, "avformat_producer", NULL ); // Get the producer properties - mlt_properties properties = mlt_producer_properties( this ); + mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // Fetch the video_context AVFormatContext *context = mlt_properties_get_data( properties, "video_context", NULL ); @@ -424,16 +461,13 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form int index = mlt_properties_get_int( properties, "video_index" ); // Obtain the expected frame numer - mlt_position expected = mlt_properties_get_position( properties, "video_expected" ); - - // Calculate the real time code - double real_timecode = producer_time_of_frame( this, position ); + mlt_position expected = mlt_properties_get_position( properties, "_video_expected" ); // Get the video stream AVStream *stream = context->streams[ index ]; // Get codec context - AVCodecContext *codec_context = &stream->codec; + AVCodecContext *codec_context = stream->codec; // Packet AVPacket pkt; @@ -447,11 +481,12 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form // Special case ffwd handling int ignore = 0; - // Current time calcs - double current_time = mlt_properties_get_double( properties, "current_time" ); - // We may want to use the source fps if available double source_fps = mlt_properties_get_double( properties, "source_fps" ); + double fps = mlt_producer_get_fps( this ); + + // This is the physical frame position in the source + int req_position = ( int )( position / fps * source_fps + 0.5 ); // Get the seekable status int seekable = mlt_properties_get_int( properties, "seekable" ); @@ -459,13 +494,29 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form // Generate the size in bytes int size = 0; + // Hopefully provide better support for streams... + int av_bypass = mlt_properties_get_int( properties, "av_bypass" ); + + // Determines if we have to decode all frames in a sequence + int must_decode = 1; + // Set the result arguments that we know here (only *buffer is now required) - if ( *format != mlt_image_yuv422 && *format != mlt_image_yuv420p ) - *format = mlt_image_yuv422; *width = codec_context->width; *height = codec_context->height; - size = *width * ( *height + 1 ) * 2; + switch ( *format ) + { + case mlt_image_yuv420p: + size = *width * 3 * ( *height + 1 ) / 2; + break; + case mlt_image_rgb24: + size = *width * ( *height + 1 ) * 3; + break; + default: + *format = mlt_image_yuv422; + size = *width * ( *height + 1 ) * 2; + break; + } // Set this on the frame properties mlt_properties_set_int( frame_properties, "width", *width ); @@ -474,10 +525,15 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form // Construct the output image *buffer = mlt_pool_alloc( size ); + // Temporary hack to improve intra frame only + must_decode = strcmp( codec_context->codec->name, "mjpeg" ) && + strcmp( codec_context->codec->name, "rawvideo" ) && + strcmp( codec_context->codec->name, "dvvideo" ); + // Seek if necessary if ( position != expected ) { - if ( position + 1 == expected ) + if ( av_frame != NULL && position + 1 == expected ) { // We're paused - use last image paused = 1; @@ -485,24 +541,34 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form else if ( !seekable && position > expected && ( position - expected ) < 250 ) { // Fast forward - seeking is inefficient for small distances - just ignore following frames - ignore = position - expected; + ignore = ( int )( ( position - expected ) / fps * source_fps ); } - else if ( codec_context->gop_size == 0 || ( position < expected || position - expected >= 12 ) ) + else if ( seekable && ( position < expected || position - expected >= 12 ) ) { - // Set to the real timecode - av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ) + real_timecode * 1000000.0 ); + // Calculate the timestamp for the requested frame + int64_t timestamp = ( int64_t )( ( double )req_position / source_fps * AV_TIME_BASE + 0.5 ); + if ( ( uint64_t )context->start_time != AV_NOPTS_VALUE ) + timestamp += context->start_time; + if ( must_decode ) + timestamp -= AV_TIME_BASE; + if ( timestamp < 0 ) + timestamp = 0; + + // Set to the timestamp + av_seek_frame( context, -1, timestamp, AVSEEK_FLAG_BACKWARD ); // Remove the cached info relating to the previous position - mlt_properties_set_double( properties, "current_time", real_timecode ); - + mlt_properties_set_int( properties, "_current_position", -1 ); + mlt_properties_set_int( properties, "_last_position", -1 ); mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL ); av_frame = NULL; } } - - // Duplicate the last image if necessary - if ( av_frame != NULL && ( paused || mlt_properties_get_double( properties, "current_time" ) >= real_timecode ) && - strcmp( mlt_properties_get( properties, "resource" ), "pipe:" ) ) + + // Duplicate the last image if necessary (see comment on rawvideo below) + int current_position = mlt_properties_get_int( properties, "_current_position" ); + int got_picture = mlt_properties_get_int( properties, "_got_picture" ); + if ( av_frame != NULL && got_picture && ( paused || current_position >= req_position ) && av_bypass == 0 ) { // Duplicate it convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height ); @@ -513,16 +579,16 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form else { int ret = 0; - int got_picture = 0; + int int_position = 0; + got_picture = 0; - memset( &pkt, 0, sizeof( pkt ) ); + av_init_packet( &pkt ); // Construct an AVFrame for YUV422 conversion if ( av_frame == NULL ) { - av_frame = calloc( 1, sizeof( AVFrame ) ); - mlt_properties_set_data( properties, "av_frame", av_frame, 0, free, NULL ); - paused = 0; + av_frame = avcodec_alloc_frame( ); + mlt_properties_set_data( properties, "av_frame", av_frame, 0, av_free, NULL ); } while( ret >= 0 && !got_picture ) @@ -533,65 +599,67 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form // We only deal with video from the selected video_index if ( ret >= 0 && pkt.stream_index == index && pkt.size > 0 ) { + // Determine time code of the packet + int_position = ( int )( av_q2d( stream->time_base ) * pkt.dts * source_fps + 0.5 ); + if ( context->start_time != AV_NOPTS_VALUE ) + int_position -= ( int )( context->start_time * source_fps / AV_TIME_BASE + 0.5 ); + int last_position = mlt_properties_get_int( properties, "_last_position" ); + if ( int_position == last_position ) + int_position = last_position + 1; + mlt_properties_set_int( properties, "_last_position", int_position ); + // Decode the image - ret = avcodec_decode_video( codec_context, av_frame, &got_picture, pkt.data, pkt.size ); + if ( must_decode || int_position >= req_position ) + ret = avcodec_decode_video( codec_context, av_frame, &got_picture, pkt.data, pkt.size ); if ( got_picture ) { - if ( pkt.pts != AV_NOPTS_VALUE ) - current_time = ( double )pkt.pts / 1000000.0; - else - current_time = real_timecode; - // Handle ignore - if ( ( int )( current_time * 100 ) < ( int )( real_timecode * 100 ) - 7 ) + if ( int_position < req_position ) { ignore = 0; got_picture = 0; } - else if ( current_time >= real_timecode ) + else if ( int_position >= req_position ) { - //current_time = real_timecode; ignore = 0; } else if ( ignore -- ) { got_picture = 0; } - mlt_properties_set_int( properties, "top_field_first", av_frame->top_field_first ); } + av_free_packet( &pkt ); } - - // We're finished with this packet regardless - av_free_packet( &pkt ); - } - - // Now handle the picture if we have one - if ( got_picture ) - { - convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height ); - - mlt_properties_set_data( frame_properties, "image", *buffer, size, (mlt_destructor)mlt_pool_release, NULL ); - - if ( current_time == 0 && source_fps != 0 ) + else if ( ret >= 0 ) { - double fps = mlt_properties_get_double( properties, "fps" ); - current_time = ceil( source_fps * ( double )position / fps ) * ( 1 / source_fps ); - mlt_properties_set_double( properties, "current_time", current_time ); + av_free_packet( &pkt ); } - else + + // Now handle the picture if we have one + if ( got_picture ) { - mlt_properties_set_double( properties, "current_time", current_time ); + mlt_properties_set_int( frame_properties, "progressive", !av_frame->interlaced_frame ); + mlt_properties_set_int( frame_properties, "top_field_first", av_frame->top_field_first ); + mlt_properties_set_int( properties, "top_field_first", av_frame->top_field_first ); + convert_image( av_frame, *buffer, codec_context->pix_fmt, *format, *width, *height ); + mlt_properties_set_data( frame_properties, "image", *buffer, size, (mlt_destructor)mlt_pool_release, NULL ); + mlt_properties_set_int( properties, "_current_position", int_position ); + mlt_properties_set_int( properties, "_got_picture", 1 ); } } } - + + // Very untidy - for rawvideo, the packet contains the frame, hence the free packet + // above will break the pause behaviour - so we wipe the frame now + if ( !strcmp( codec_context->codec->name, "rawvideo" ) ) + mlt_properties_set_data( properties, "av_frame", NULL, 0, NULL, NULL ); + // Set the field order property for this frame - mlt_properties_set_int( frame_properties, "top_field_first", - mlt_properties_get_int( properties, "top_field_first" ) ); + mlt_properties_set_int( frame_properties, "top_field_first", mlt_properties_get_int( properties, "top_field_first" ) ); // Regardless of speed, we expect to get the next frame (cos we ain't too bright) - mlt_properties_set_position( properties, "video_expected", position + 1 ); + mlt_properties_set_position( properties, "_video_expected", position + 1 ); return 0; } @@ -602,7 +670,7 @@ static int producer_get_image( mlt_frame frame, uint8_t **buffer, mlt_image_form static void producer_set_up_video( mlt_producer this, mlt_frame frame ) { // Get the properties - mlt_properties properties = mlt_producer_properties( this ); + mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // Fetch the video_context AVFormatContext *context = mlt_properties_get_data( properties, "video_context", NULL ); @@ -611,7 +679,7 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) int index = mlt_properties_get_int( properties, "video_index" ); // Get the frame properties - mlt_properties frame_properties = mlt_frame_properties( frame ); + mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame ); if ( context != NULL && index != -1 ) { @@ -619,7 +687,7 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) AVStream *stream = context->streams[ index ]; // Get codec context - AVCodecContext *codec_context = &stream->codec; + AVCodecContext *codec_context = stream->codec; // Get the codec AVCodec *codec = mlt_properties_get_data( properties, "video_codec", NULL ); @@ -627,6 +695,16 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) // Initialise the codec if necessary if ( codec == NULL ) { + // Initialise multi-threading + int thread_count = mlt_properties_get_int( properties, "threads" ); + if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) ) + thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) ); + if ( thread_count > 1 ) + { + avcodec_thread_init( codec_context, thread_count ); + codec_context->thread_count = thread_count; + } + // Find the codec codec = avcodec_find_decoder( codec_context->codec_id ); @@ -647,32 +725,41 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) if ( codec != NULL ) { double source_fps = 0; + int norm_aspect_ratio = mlt_properties_get_int( properties, "norm_aspect_ratio" ); + double force_aspect_ratio = mlt_properties_get_double( properties, "force_aspect_ratio" ); + double aspect_ratio; // XXX: We won't know the real aspect ratio until an image is decoded // but we do need it now (to satisfy filter_resize) - take a guess based // on pal/ntsc - if ( codec_context->sample_aspect_ratio.num > 0 ) + if ( force_aspect_ratio > 0.0 ) { - mlt_properties_set_double( properties, "aspect_ratio", av_q2d( codec_context->sample_aspect_ratio ) ); + aspect_ratio = force_aspect_ratio; + } + else if ( !norm_aspect_ratio && codec_context->sample_aspect_ratio.num > 0 ) + { + aspect_ratio = av_q2d( codec_context->sample_aspect_ratio ); } else { - int is_pal = mlt_properties_get_double( properties, "fps" ) == 25.0; - mlt_properties_set_double( properties, "aspect_ratio", is_pal ? 128.0/117.0 : 72.0/79.0 ); + int is_pal = mlt_producer_get_fps( this ) == 25.0; + aspect_ratio = is_pal ? 59.0/54.0 : 10.0/11.0; } - //fprintf( stderr, "AVFORMAT: sample aspect %f %dx%d\n", av_q2d( codec_context->sample_aspect_ratio ), codec_context->width, codec_context->height ); - // Determine the fps - source_fps = ( double )codec_context->frame_rate / ( codec_context->frame_rate_base == 0 ? 1 : codec_context->frame_rate_base ); + source_fps = ( double )codec_context->time_base.den / ( codec_context->time_base.num == 0 ? 1 : codec_context->time_base.num ); // We'll use fps if it's available - if ( source_fps > 0 && source_fps < 30 ) + if ( source_fps > 0 ) mlt_properties_set_double( properties, "source_fps", source_fps ); + else + mlt_properties_set_double( properties, "source_fps", mlt_producer_get_fps( this ) ); + mlt_properties_set_double( properties, "aspect_ratio", aspect_ratio ); // Set the width and height mlt_properties_set_int( frame_properties, "width", codec_context->width ); mlt_properties_set_int( frame_properties, "height", codec_context->height ); + mlt_properties_set_double( frame_properties, "aspect_ratio", aspect_ratio ); mlt_frame_push_get_image( frame, producer_get_image ); mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL ); @@ -694,7 +781,7 @@ static void producer_set_up_video( mlt_producer this, mlt_frame frame ) static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_format *format, int *frequency, int *channels, int *samples ) { // Get the properties from the frame - mlt_properties frame_properties = mlt_frame_properties( frame ); + mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame ); // Obtain the frame number of this frame mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" ); @@ -703,7 +790,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form mlt_producer this = mlt_properties_get_data( frame_properties, "avformat_producer", NULL ); // Get the producer properties - mlt_properties properties = mlt_producer_properties( this ); + mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // Fetch the audio_context AVFormatContext *context = mlt_properties_get_data( properties, "audio_context", NULL ); @@ -715,7 +802,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form int seekable = mlt_properties_get_int( properties, "seekable" ); // Obtain the expected frame numer - mlt_position expected = mlt_properties_get_position( properties, "audio_expected" ); + mlt_position expected = mlt_properties_get_position( properties, "_audio_expected" ); // Obtain the resample context if it exists (not always needed) ReSampleContext *resample = mlt_properties_get_data( properties, "audio_resample", NULL ); @@ -724,7 +811,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form int16_t *audio_buffer = mlt_properties_get_data( properties, "audio_buffer", NULL ); // Get amount of audio used - int audio_used = mlt_properties_get_int( properties, "audio_used" ); + int audio_used = mlt_properties_get_int( properties, "_audio_used" ); // Calculate the real time code double real_timecode = producer_time_of_frame( this, position ); @@ -733,7 +820,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form AVStream *stream = context->streams[ index ]; // Get codec context - AVCodecContext *codec_context = &stream->codec; + AVCodecContext *codec_context = stream->codec; // Packet AVPacket pkt; @@ -785,7 +872,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form else if ( position < expected || position - expected >= 12 ) { // Set to the real timecode - if ( av_seek_frame( context, -1, mlt_properties_get_double( properties, "start_time" ) + real_timecode * 1000000.0 ) != 0 ) + if ( av_seek_frame( context, -1, mlt_properties_get_double( properties, "_start_time" ) + real_timecode * 1000000.0, AVSEEK_FLAG_BACKWARD ) != 0 ) paused = 1; // Clear the usage in the audio buffer @@ -798,9 +885,9 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form { int ret = 0; int got_audio = 0; - int16_t *temp = mlt_pool_alloc( sizeof( int16_t ) * AVCODEC_MAX_AUDIO_FRAME_SIZE ); + int16_t *temp = av_malloc( sizeof( int16_t ) * AVCODEC_MAX_AUDIO_FRAME_SIZE ); - memset( &pkt, 0, sizeof( pkt ) ); + av_init_packet( &pkt ); while( ret >= 0 && !got_audio ) { @@ -814,16 +901,20 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form // Read a packet ret = av_read_frame( context, &pkt ); - int len = pkt.size; - uint8_t *ptr = pkt.data; - int data_size; + int len = pkt.size; + uint8_t *ptr = pkt.data; // We only deal with audio from the selected audio_index while ( ptr != NULL && ret >= 0 && pkt.stream_index == index && len > 0 ) { + int data_size = sizeof( int16_t ) * AVCODEC_MAX_AUDIO_FRAME_SIZE; + // Decode the audio +#if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(29<<8)+0)) + ret = avcodec_decode_audio2( codec_context, temp, &data_size, ptr, len ); +#else ret = avcodec_decode_audio( codec_context, temp, &data_size, ptr, len ); - +#endif if ( ret < 0 ) { ret = 0; @@ -855,7 +946,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form } // If we're behind, ignore this packet - float current_pts = (float)pkt.pts / 1000000.0; + float current_pts = av_q2d( stream->time_base ) * pkt.pts; if ( seekable && ( !ignore && current_pts <= ( real_timecode - 0.02 ) ) ) ignore = 1; } @@ -880,21 +971,20 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form } // Store the number of audio samples still available - mlt_properties_set_int( properties, "audio_used", audio_used ); + mlt_properties_set_int( properties, "_audio_used", audio_used ); // Release the temporary audio - mlt_pool_release( temp ); + av_free( temp ); } else { // Get silence and don't touch the context - frame->get_audio = NULL; mlt_frame_get_audio( frame, buffer, format, frequency, channels, samples ); } // Regardless of speed (other than paused), we expect to get the next frame if ( !paused ) - mlt_properties_set_position( properties, "audio_expected", position + 1 ); + mlt_properties_set_position( properties, "_audio_expected", position + 1 ); return 0; } @@ -905,7 +995,7 @@ static int producer_get_audio( mlt_frame frame, int16_t **buffer, mlt_audio_form static void producer_set_up_audio( mlt_producer this, mlt_frame frame ) { // Get the properties - mlt_properties properties = mlt_producer_properties( this ); + mlt_properties properties = MLT_PRODUCER_PROPERTIES( this ); // Fetch the audio_context AVFormatContext *context = mlt_properties_get_data( properties, "audio_context", NULL ); @@ -917,13 +1007,13 @@ static void producer_set_up_audio( mlt_producer this, mlt_frame frame ) if ( context != NULL && index != -1 ) { // Get the frame properties - mlt_properties frame_properties = mlt_frame_properties( frame ); + mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame ); // Get the audio stream AVStream *stream = context->streams[ index ]; // Get codec context - AVCodecContext *codec_context = &stream->codec; + AVCodecContext *codec_context = stream->codec; // Get the codec AVCodec *codec = mlt_properties_get_data( properties, "audio_codec", NULL ); @@ -931,6 +1021,16 @@ static void producer_set_up_audio( mlt_producer this, mlt_frame frame ) // Initialise the codec if necessary if ( codec == NULL ) { + // Initialise multi-threading + int thread_count = mlt_properties_get_int( properties, "threads" ); + if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) ) + thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) ); + if ( thread_count > 1 ) + { + avcodec_thread_init( codec_context, thread_count ); + codec_context->thread_count = thread_count; + } + // Find the codec codec = avcodec_find_decoder( codec_context->codec_id ); @@ -951,8 +1051,10 @@ static void producer_set_up_audio( mlt_producer this, mlt_frame frame ) // No codec, no show... if ( codec != NULL ) { - frame->get_audio = producer_get_audio; + mlt_frame_push_audio( frame, producer_get_audio ); mlt_properties_set_data( frame_properties, "avformat_producer", this, 0, NULL, NULL ); + mlt_properties_set_int( frame_properties, "frequency", codec_context->sample_rate ); + mlt_properties_set_int( frame_properties, "channels", codec_context->channels ); } } } @@ -963,13 +1065,13 @@ static void producer_set_up_audio( mlt_producer this, mlt_frame frame ) static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index ) { // Create an empty frame - *frame = mlt_frame_init( ); + *frame = mlt_frame_init( MLT_PRODUCER_SERVICE( this ) ); // Update timecode on the frame we're creating mlt_frame_set_position( *frame, mlt_producer_position( this ) ); // Set the position of this producer - mlt_properties_set_position( mlt_frame_properties( *frame ), "avformat_position", mlt_producer_position( this ) ); + mlt_properties_set_position( MLT_FRAME_PROPERTIES( *frame ), "avformat_position", mlt_producer_frame( this ) ); // Set up the video producer_set_up_video( this, *frame ); @@ -978,7 +1080,7 @@ static int producer_get_frame( mlt_producer this, mlt_frame_ptr frame, int index producer_set_up_audio( this, *frame ); // Set the aspect_ratio - mlt_properties_set_double( mlt_frame_properties( *frame ), "aspect_ratio", mlt_properties_get_double( mlt_producer_properties( this ), "aspect_ratio" ) ); + mlt_properties_set_double( MLT_FRAME_PROPERTIES( *frame ), "aspect_ratio", mlt_properties_get_double( MLT_PRODUCER_PROPERTIES( this ), "aspect_ratio" ) ); // Calculate the next timecode mlt_producer_prepare_next( this );