X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=src%2Fmodules%2Favformat%2Fconsumer_avformat.c;h=3d1bc18546b2dfa80e7a7ca8967cdcdebe0cc8db;hb=6a094bbd3e26be58507cd52c157d77bac53e670f;hp=7f38665b19575c71ff7ac7cc6070178b8d9d56f5;hpb=83ea9457463989807aa751eb84aa0034e48599cd;p=mlt diff --git a/src/modules/avformat/consumer_avformat.c b/src/modules/avformat/consumer_avformat.c index 7f38665b..3d1bc185 100644 --- a/src/modules/avformat/consumer_avformat.c +++ b/src/modules/avformat/consumer_avformat.c @@ -24,6 +24,7 @@ #include #include #include +#include // System header files #include @@ -35,17 +36,28 @@ #include // avformat header files -#include +#include #ifdef SWSCALE -#include +#include +#endif +#include +#if LIBAVUTIL_VERSION_INT >= ((50<<16)+(8<<8)+0) +#include #endif -#include #if LIBAVUTIL_VERSION_INT < (50<<16) #define PIX_FMT_RGB32 PIX_FMT_RGBA32 #define PIX_FMT_YUYV422 PIX_FMT_YUV422 #endif +#define MAX_AUDIO_STREAMS (8) +#define AUDIO_ENCODE_BUFFER_SIZE (48000 * 2 * MAX_AUDIO_STREAMS) +#define AUDIO_BUFFER_SIZE (1024 * 42) +#define VIDEO_BUFFER_SIZE (2048 * 1024) + +void avformat_lock( ); +void avformat_unlock( ); + // // This structure should be extended and made globally available in mlt // @@ -140,7 +152,7 @@ static int consumer_is_stopped( mlt_consumer this ); static void *consumer_thread( void *arg ); static void consumer_close( mlt_consumer this ); -/** Initialise the dv consumer. +/** Initialise the consumer. */ mlt_consumer consumer_avformat_init( mlt_profile profile, char *arg ) @@ -186,6 +198,8 @@ mlt_consumer consumer_avformat_init( mlt_profile profile, char *arg ) this->start = consumer_start; this->stop = consumer_stop; this->is_stopped = consumer_is_stopped; + + mlt_events_register( properties, "consumer-fatal-error", NULL ); } // Return this @@ -205,33 +219,60 @@ static int consumer_start( mlt_consumer this ) char *s = mlt_properties_get( properties, "f" ); if ( s && strcmp( s, "list" ) == 0 ) { - fprintf( stderr, "---\nformats:\n" ); + mlt_properties doc = mlt_properties_new(); + mlt_properties formats = mlt_properties_new(); + char key[20]; AVOutputFormat *format = NULL; + + mlt_properties_set_data( properties, "f", formats, 0, (mlt_destructor) mlt_properties_close, NULL ); + mlt_properties_set_data( doc, "formats", formats, 0, NULL, NULL ); while ( ( format = av_oformat_next( format ) ) ) - fprintf( stderr, " - %s\n", format->name ); - fprintf( stderr, "...\n" ); + { + snprintf( key, sizeof(key), "%d", mlt_properties_count( formats ) ); + mlt_properties_set( formats, key, format->name ); + } + fprintf( stderr, "%s", mlt_properties_serialise_yaml( doc ) ); + mlt_properties_close( doc ); error = 1; } s = mlt_properties_get( properties, "acodec" ); if ( s && strcmp( s, "list" ) == 0 ) { - fprintf( stderr, "---\naudio_codecs:\n" ); + mlt_properties doc = mlt_properties_new(); + mlt_properties codecs = mlt_properties_new(); + char key[20]; AVCodec *codec = NULL; + + mlt_properties_set_data( properties, "acodec", codecs, 0, (mlt_destructor) mlt_properties_close, NULL ); + mlt_properties_set_data( doc, "audio_codecs", codecs, 0, NULL, NULL ); while ( ( codec = av_codec_next( codec ) ) ) if ( codec->encode && codec->type == CODEC_TYPE_AUDIO ) - fprintf( stderr, " - %s\n", codec->name ); - fprintf( stderr, "...\n" ); + { + snprintf( key, sizeof(key), "%d", mlt_properties_count( codecs ) ); + mlt_properties_set( codecs, key, codec->name ); + } + fprintf( stderr, "%s", mlt_properties_serialise_yaml( doc ) ); + mlt_properties_close( doc ); error = 1; } s = mlt_properties_get( properties, "vcodec" ); if ( s && strcmp( s, "list" ) == 0 ) { - fprintf( stderr, "---\nvideo_codecs:\n" ); + mlt_properties doc = mlt_properties_new(); + mlt_properties codecs = mlt_properties_new(); + char key[20]; AVCodec *codec = NULL; + + mlt_properties_set_data( properties, "vcodec", codecs, 0, (mlt_destructor) mlt_properties_close, NULL ); + mlt_properties_set_data( doc, "video_codecs", codecs, 0, NULL, NULL ); while ( ( codec = av_codec_next( codec ) ) ) if ( codec->encode && codec->type == CODEC_TYPE_VIDEO ) - fprintf( stderr, " - %s\n", codec->name ); - fprintf( stderr, "...\n" ); + { + snprintf( key, sizeof(key), "%d", mlt_properties_count( codecs ) ); + mlt_properties_set( codecs, key, codec->name ); + } + fprintf( stderr, "%s", mlt_properties_serialise_yaml( doc ) ); + mlt_properties_close( doc ); error = 1; } @@ -348,7 +389,7 @@ static int consumer_is_stopped( mlt_consumer this ) /** Process properties as AVOptions and apply to AV context obj */ -static void apply_properties( void *obj, mlt_properties properties, int flags ) +static void apply_properties( void *obj, mlt_properties properties, int flags, int alloc ) { int i; int count = mlt_properties_count( properties ); @@ -358,9 +399,9 @@ static void apply_properties( void *obj, mlt_properties properties, int flags ) const AVOption *opt = av_find_opt( obj, opt_name, NULL, flags, flags ); if ( opt != NULL ) #if LIBAVCODEC_VERSION_INT >= ((52<<16)+(7<<8)+0) - av_set_string3( obj, opt_name, mlt_properties_get( properties, opt_name), 0, NULL ); + av_set_string3( obj, opt_name, mlt_properties_get( properties, opt_name), alloc, NULL ); #elif LIBAVCODEC_VERSION_INT >= ((51<<16)+(59<<8)+0) - av_set_string2( obj, opt_name, mlt_properties_get( properties, opt_name), 0 ); + av_set_string2( obj, opt_name, mlt_properties_get( properties, opt_name), alloc ); #else av_set_string( obj, opt_name, mlt_properties_get( properties, opt_name) ); #endif @@ -370,13 +411,13 @@ static void apply_properties( void *obj, mlt_properties properties, int flags ) /** Add an audio output stream */ -static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int codec_id ) +static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int codec_id, int channels ) { // Get the properties mlt_properties properties = MLT_CONSUMER_PROPERTIES( this ); // Create a new stream - AVStream *st = av_new_stream( oc, 1 ); + AVStream *st = av_new_stream( oc, oc->nb_streams ); // If created, then initialise from properties if ( st != NULL ) @@ -388,13 +429,16 @@ static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int c c->codec_id = codec_id; c->codec_type = CODEC_TYPE_AUDIO; + c->sample_fmt = SAMPLE_FMT_S16; +#if 0 // disabled until some audio codecs are multi-threaded // Setup multi-threading int thread_count = mlt_properties_get_int( properties, "threads" ); if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) ) thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) ); if ( thread_count > 1 ) avcodec_thread_init( c, thread_count ); +#endif if (oc->oformat->flags & AVFMT_GLOBALHEADER) c->flags |= CODEC_FLAG_GLOBAL_HEADER; @@ -411,7 +455,14 @@ static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int c } // Process properties as AVOptions - apply_properties( c, properties, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM ); + char *apre = mlt_properties_get( properties, "apre" ); + if ( apre ) + { + mlt_properties p = mlt_properties_load( apre ); + apply_properties( c, p, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, 1 ); + mlt_properties_close( p ); + } + apply_properties( c, properties, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, 0 ); int audio_qscale = mlt_properties_get_int( properties, "aq" ); if ( audio_qscale > QSCALE_NONE ) @@ -423,7 +474,7 @@ static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int c // Set parameters controlled by MLT c->sample_rate = mlt_properties_get_int( properties, "frequency" ); c->time_base = ( AVRational ){ 1, c->sample_rate }; - c->channels = mlt_properties_get_int( properties, "channels" ); + c->channels = channels; if ( mlt_properties_get( properties, "alang" ) != NULL ) strncpy( st->language, mlt_properties_get( properties, "alang" ), sizeof( st->language ) ); @@ -447,6 +498,8 @@ static int open_audio( AVFormatContext *oc, AVStream *st, int audio_outbuf_size // Find the encoder AVCodec *codec = avcodec_find_encoder( c->codec_id ); + avformat_lock(); + // Continue if codec found and we can open it if ( codec != NULL && avcodec_open( c, codec ) >= 0 ) { @@ -482,6 +535,8 @@ static int open_audio( AVFormatContext *oc, AVStream *st, int audio_outbuf_size { mlt_log_warning( NULL, "%s: Unable to encode audio - disabling audio output.\n", __FILE__ ); } + + avformat_unlock(); return audio_input_frame_size; } @@ -489,7 +544,11 @@ static int open_audio( AVFormatContext *oc, AVStream *st, int audio_outbuf_size static void close_audio( AVFormatContext *oc, AVStream *st ) { if ( st && st->codec ) + { + avformat_lock(); avcodec_close( st->codec ); + avformat_unlock(); + } } /** Add a video output stream @@ -501,7 +560,7 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c mlt_properties properties = MLT_CONSUMER_PROPERTIES( this ); // Create a new stream - AVStream *st = av_new_stream( oc, 0 ); + AVStream *st = av_new_stream( oc, oc->nb_streams ); if ( st != NULL ) { @@ -522,7 +581,41 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c avcodec_thread_init( c, thread_count ); // Process properties as AVOptions - apply_properties( c, properties, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM ); + char *vpre = mlt_properties_get( properties, "vpre" ); + if ( vpre ) + { + mlt_properties p = mlt_properties_load( vpre ); +#ifdef AVDATADIR + if ( mlt_properties_count( p ) < 1 ) + { + AVCodec *codec = avcodec_find_encoder( c->codec_id ); + if ( codec ) + { + char *path = malloc( strlen(AVDATADIR) + strlen(codec->name) + strlen(vpre) + strlen(".ffpreset") + 2 ); + strcpy( path, AVDATADIR ); + strcat( path, codec->name ); + strcat( path, "-" ); + strcat( path, vpre ); + strcat( path, ".ffpreset" ); + + mlt_properties_close( p ); + p = mlt_properties_load( path ); + mlt_properties_debug( p, path, stderr ); + free( path ); + } + } + else + { + mlt_properties_debug( p, vpre, stderr ); + } +#endif + apply_properties( c, p, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, 1 ); + mlt_properties_close( p ); + } + int colorspace = mlt_properties_get_int( properties, "colorspace" ); + mlt_properties_set( properties, "colorspace", NULL ); + apply_properties( c, properties, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, 0 ); + mlt_properties_set_int( properties, "colorspace", colorspace ); // Set options controlled by MLT c->width = mlt_properties_get_int( properties, "width" ); @@ -531,7 +624,32 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c c->time_base.den = mlt_properties_get_int( properties, "frame_rate_num" ); if ( st->time_base.den == 0 ) st->time_base = c->time_base; +#if LIBAVUTIL_VERSION_INT >= ((50<<16)+(8<<8)+0) + c->pix_fmt = pix_fmt ? av_get_pix_fmt( pix_fmt ) : PIX_FMT_YUV420P; +#else c->pix_fmt = pix_fmt ? avcodec_get_pix_fmt( pix_fmt ) : PIX_FMT_YUV420P; +#endif + +#if LIBAVCODEC_VERSION_INT > ((52<<16)+(28<<8)+0) + switch ( colorspace ) + { + case 170: + c->colorspace = AVCOL_SPC_SMPTE170M; + break; + case 240: + c->colorspace = AVCOL_SPC_SMPTE240M; + break; + case 470: + c->colorspace = AVCOL_SPC_BT470BG; + break; + case 601: + c->colorspace = ( 576 % c->height ) ? AVCOL_SPC_SMPTE170M : AVCOL_SPC_BT470BG; + break; + case 709: + c->colorspace = AVCOL_SPC_BT709; + break; + } +#endif if ( mlt_properties_get( properties, "aspect" ) ) { @@ -745,13 +863,21 @@ static int open_video(AVFormatContext *oc, AVStream *st) } // Open the codec safely - return codec != NULL && avcodec_open( video_enc, codec ) >= 0; + avformat_lock(); + int result = codec != NULL && avcodec_open( video_enc, codec ) >= 0; + avformat_unlock(); + + return result; } void close_video(AVFormatContext *oc, AVStream *st) { if ( st && st->codec ) + { + avformat_lock(); avcodec_close(st->codec); + avformat_unlock(); + } } static inline long time_difference( struct timeval *time1 ) @@ -794,18 +920,19 @@ static void *consumer_thread( void *arg ) // Get default audio properties mlt_audio_format aud_fmt = mlt_audio_s16; int channels = mlt_properties_get_int( properties, "channels" ); + int total_channels = channels; int frequency = mlt_properties_get_int( properties, "frequency" ); int16_t *pcm = NULL; int samples = 0; // AVFormat audio buffer and frame size - int audio_outbuf_size = ( 1024 * 42 ); + int audio_outbuf_size = AUDIO_BUFFER_SIZE; uint8_t *audio_outbuf = av_malloc( audio_outbuf_size ); int audio_input_frame_size = 0; // AVFormat video buffer and frame count int frame_count = 0; - int video_outbuf_size = ( 1024 * 1024 ); + int video_outbuf_size = VIDEO_BUFFER_SIZE; uint8_t *video_outbuf = av_malloc( video_outbuf_size ); // Used for the frame properties @@ -825,7 +952,8 @@ static void *consumer_thread( void *arg ) mlt_image_format img_fmt = mlt_image_yuv422; // For receiving audio samples back from the fifo - int16_t *buffer = av_malloc( 48000 * 2 ); + int16_t *audio_buf_1 = av_malloc( AUDIO_ENCODE_BUFFER_SIZE ); + int16_t *audio_buf_2 = NULL; int count = 0; // Allocate the context @@ -836,17 +964,14 @@ static void *consumer_thread( void *arg ) #endif // Streams - AVStream *audio_st = NULL; AVStream *video_st = NULL; + AVStream *audio_st[ MAX_AUDIO_STREAMS ]; // Time stamps double audio_pts = 0; double video_pts = 0; - // Loop variable - int i; - - // Frames despatched + // Frames dispatched long int frames = 0; long int total_time = 0; @@ -861,17 +986,38 @@ static void *consumer_thread( void *arg ) int audio_codec_id; int video_codec_id; + // Misc + char key[27]; + mlt_properties frame_meta_properties = mlt_properties_new(); + + // Initialize audio_st + int i = MAX_AUDIO_STREAMS; + while ( i-- ) + audio_st[i] = NULL; + // Check for user selected format first if ( format != NULL ) +#if LIBAVFORMAT_VERSION_INT < ((52<<16)+(45<<8)+0) fmt = guess_format( format, NULL, NULL ); +#else + fmt = av_guess_format( format, NULL, NULL ); +#endif // Otherwise check on the filename if ( fmt == NULL && filename != NULL ) +#if LIBAVFORMAT_VERSION_INT < ((52<<16)+(45<<8)+0) fmt = guess_format( NULL, filename, NULL ); +#else + fmt = av_guess_format( NULL, filename, NULL ); +#endif // Otherwise default to mpeg if ( fmt == NULL ) +#if LIBAVFORMAT_VERSION_INT < ((52<<16)+(45<<8)+0) fmt = guess_format( "mpeg", NULL, NULL ); +#else + fmt = av_guess_format( "mpeg", NULL, NULL ); +#endif // We need a filename - default to stdout? if ( filename == NULL || !strcmp( filename, "" ) ) @@ -906,6 +1052,28 @@ static void *consumer_thread( void *arg ) } // Write metadata +#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(31<<8)+0) + for ( i = 0; i < mlt_properties_count( properties ); i++ ) + { + char *name = mlt_properties_get_name( properties, i ); + if ( name && !strncmp( name, "meta.attr.", 10 ) ) + { + char *key = strdup( name + 10 ); + char *markup = strrchr( key, '.' ); + if ( markup && !strcmp( markup, ".markup") ) + { + markup[0] = '\0'; + if ( !strstr( key, ".stream." ) ) +#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(43<<8)+0) + av_metadata_set2( &oc->metadata, key, mlt_properties_get_value( properties, i ), 0 ); +#else + av_metadata_set( &oc->metadata, key, mlt_properties_get_value( properties, i ) ); +#endif + } + free( key ); + } + } +#else char *tmp = NULL; int metavalue; @@ -929,15 +1097,38 @@ static void *consumer_thread( void *arg ) metavalue = mlt_properties_get_int( properties, "meta.attr.track.markup"); if (metavalue != 0) oc->track = metavalue; +#endif oc->oformat = fmt; snprintf( oc->filename, sizeof(oc->filename), "%s", filename ); - // Add audio and video streams + // Add audio and video streams if ( video_codec_id != CODEC_ID_NONE ) video_st = add_video_stream( this, oc, video_codec_id ); if ( audio_codec_id != CODEC_ID_NONE ) - audio_st = add_audio_stream( this, oc, audio_codec_id ); + { + int is_multi = 0; + + total_channels = 0; + // multitrack audio + for ( i = 0; i < MAX_AUDIO_STREAMS; i++ ) + { + sprintf( key, "channels.%d", i ); + int j = mlt_properties_get_int( properties, key ); + if ( j ) + { + is_multi = 1; + total_channels += j; + audio_st[i] = add_audio_stream( this, oc, audio_codec_id, j ); + } + } + // single track + if ( !is_multi ) + { + audio_st[0] = add_audio_stream( this, oc, audio_codec_id, channels ); + total_channels = channels; + } + } // Set the parameters (even though we have none...) if ( av_set_parameters(oc, NULL) >= 0 ) @@ -946,15 +1137,22 @@ static void *consumer_thread( void *arg ) oc->max_delay= ( int )( mlt_properties_get_double( properties, "muxdelay" ) * AV_TIME_BASE ); // Process properties as AVOptions - apply_properties( oc, properties, AV_OPT_FLAG_ENCODING_PARAM ); + char *fpre = mlt_properties_get( properties, "fpre" ); + if ( fpre ) + { + mlt_properties p = mlt_properties_load( fpre ); + apply_properties( oc, p, AV_OPT_FLAG_ENCODING_PARAM, 1 ); + mlt_properties_close( p ); + } + apply_properties( oc, properties, AV_OPT_FLAG_ENCODING_PARAM, 0 ); if ( video_st && !open_video( oc, video_st ) ) video_st = NULL; - if ( audio_st ) + for ( i = 0; i < MAX_AUDIO_STREAMS && audio_st[i]; i++ ) { - audio_input_frame_size = open_audio( oc, audio_st, audio_outbuf_size ); + audio_input_frame_size = open_audio( oc, audio_st[i], audio_outbuf_size ); if ( !audio_input_frame_size ) - audio_st = NULL; + audio_st[i] = NULL; } // Open the output file, if needed @@ -967,7 +1165,7 @@ static void *consumer_thread( void *arg ) } } - // Write the stream header, if any + // Write the stream header. if ( mlt_properties_get_int( properties, "running" ) ) av_write_header( oc ); } @@ -982,22 +1180,22 @@ static void *consumer_thread( void *arg ) output = alloc_picture( video_st->codec->pix_fmt, width, height ); // Last check - need at least one stream - if ( audio_st == NULL && video_st == NULL ) + if ( !audio_st[0] && !video_st ) mlt_properties_set_int( properties, "running", 0 ); // Get the starting time (can ignore the times above) gettimeofday( &ante, NULL ); // Loop while running - while( mlt_properties_get_int( properties, "running" ) && !terminated ) + while( mlt_properties_get_int( properties, "running" ) && + ( !terminated || ( video_st && mlt_deque_count( queue ) ) ) ) { - // Get the frame frame = mlt_consumer_rt_frame( this ); // Check that we have a frame to work with if ( frame != NULL ) { - // Increment frames despatched + // Increment frames dispatched frames ++; // Default audio args @@ -1007,11 +1205,14 @@ static void *consumer_thread( void *arg ) terminated = terminate_on_pause && mlt_properties_get_double( frame_properties, "_speed" ) == 0.0; // Get audio and append to the fifo - if ( !terminated && audio_st ) + if ( !terminated && audio_st[0] ) { samples = mlt_sample_calculator( fps, frequency, count ++ ); mlt_frame_get_audio( frame, (void**) &pcm, &aud_fmt, &frequency, &channels, &samples ); + // Save the audio channel remap properties for later + mlt_properties_pass( frame_meta_properties, frame_properties, "meta.map.audio." ); + // Create the fifo if we don't have one if ( fifo == NULL ) { @@ -1019,6 +1220,7 @@ static void *consumer_thread( void *arg ) mlt_properties_set_data( properties, "sample_fifo", fifo, 0, ( mlt_destructor )sample_fifo_close, NULL ); } + // Silence if not normal forward speed if ( mlt_properties_get_double( frame_properties, "_speed" ) != 1.0 ) memset( pcm, 0, samples * channels * 2 ); @@ -1038,38 +1240,141 @@ static void *consumer_thread( void *arg ) while ( 1 ) { // Write interleaved audio and video frames - if ( !video_st || ( video_st && audio_st && audio_pts < video_pts ) ) + if ( !video_st || ( video_st && audio_st[0] && audio_pts < video_pts ) ) { - if ( ( channels * audio_input_frame_size ) < sample_fifo_used( fifo ) ) + // Write audio + if ( ( video_st && terminated ) || ( channels * audio_input_frame_size ) < sample_fifo_used( fifo ) ) { - AVCodecContext *c; - AVPacket pkt; - av_init_packet( &pkt ); - - c = audio_st->codec; - - sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size ); + int j = 0; // channel offset into interleaved source buffer + int n = FFMIN( FFMIN( channels * audio_input_frame_size, sample_fifo_used( fifo ) ), AUDIO_ENCODE_BUFFER_SIZE ); - pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer ); - // Write the compressed frame in the media file - if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE ) + // Get the audio samples + if ( n > 0 ) + { + sample_fifo_fetch( fifo, audio_buf_1, n ); + } + else if ( audio_codec_id == CODEC_ID_VORBIS && terminated ) + { + // This prevents an infinite loop when some versions of vorbis do not + // increment pts when encoding silence. + audio_pts = video_pts; + break; + } + else { - pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base ); - mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "audio pkt pts %lld frame pts %lld", pkt.pts, c->coded_frame->pts ); + memset( audio_buf_1, 0, AUDIO_ENCODE_BUFFER_SIZE ); } - pkt.flags |= PKT_FLAG_KEY; - pkt.stream_index= audio_st->index; - pkt.data= audio_outbuf; + samples = n / channels; - if ( pkt.size > 0 ) - if ( av_interleaved_write_frame( oc, &pkt ) != 0 ) - mlt_log_error( MLT_CONSUMER_SERVICE( this ), "error writing audio frame\n" ); + // For each output stream + for ( i = 0; i < MAX_AUDIO_STREAMS && audio_st[i] && j < total_channels; i++ ) + { + AVStream *stream = audio_st[i]; + AVCodecContext *codec = stream->codec; + AVPacket pkt; - mlt_log_debug( MLT_CONSUMER_SERVICE( this ), " frame_size %d\n", c->frame_size ); - if ( audio_codec_id == CODEC_ID_VORBIS ) - audio_pts = (double)c->coded_frame->pts * av_q2d( audio_st->time_base ); - else - audio_pts = (double)audio_st->pts.val * av_q2d( audio_st->time_base ); + av_init_packet( &pkt ); + + // Optimized for single track and no channel remap + if ( !audio_st[1] && !mlt_properties_count( frame_meta_properties ) ) + { + pkt.size = avcodec_encode_audio( codec, audio_outbuf, audio_outbuf_size, audio_buf_1 ); + } + else + { + // Extract the audio channels according to channel mapping + int dest_offset = 0; // channel offset into interleaved dest buffer + + // Get the number of channels for this stream + sprintf( key, "channels.%d", i ); + int current_channels = mlt_properties_get_int( properties, key ); + + // Clear the destination audio buffer. + if ( !audio_buf_2 ) + audio_buf_2 = av_mallocz( AUDIO_ENCODE_BUFFER_SIZE ); + else + memset( audio_buf_2, 0, AUDIO_ENCODE_BUFFER_SIZE ); + + // For each output channel + while ( dest_offset < current_channels && j < total_channels ) + { + int map_start = -1, map_channels = 0; + int source_offset = 0; + int k; + + // Look for a mapping that starts at j + for ( k = 0; k < (MAX_AUDIO_STREAMS * 2) && map_start != j; k++ ) + { + sprintf( key, "%d.channels", k ); + map_channels = mlt_properties_get_int( frame_meta_properties, key ); + sprintf( key, "%d.start", k ); + if ( mlt_properties_get( frame_meta_properties, key ) ) + map_start = mlt_properties_get_int( frame_meta_properties, key ); + if ( map_start != j ) + source_offset += map_channels; + } + + // If no mapping + if ( map_start != j ) + { + map_channels = current_channels; + source_offset = j; + } + + // Copy samples if source offset valid + if ( source_offset < channels ) + { + // Interleave the audio buffer with the # channels for this stream/mapping. + for ( k = 0; k < map_channels; k++, j++, source_offset++, dest_offset++ ) + { + int16_t *src = audio_buf_1 + source_offset; + int16_t *dest = audio_buf_2 + dest_offset; + int s = samples + 1; + + while ( --s ) { + *dest = *src; + dest += current_channels; + src += channels; + } + } + } + // Otherwise silence + else + { + j += current_channels; + dest_offset += current_channels; + } + } + pkt.size = avcodec_encode_audio( codec, audio_outbuf, audio_outbuf_size, audio_buf_2 ); + } + + // Write the compressed frame in the media file + if ( codec->coded_frame && codec->coded_frame->pts != AV_NOPTS_VALUE ) + { + pkt.pts = av_rescale_q( codec->coded_frame->pts, codec->time_base, stream->time_base ); + mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "audio stream %d pkt pts %lld frame pts %lld", + stream->index, pkt.pts, codec->coded_frame->pts ); + } + pkt.flags |= PKT_FLAG_KEY; + pkt.stream_index = stream->index; + pkt.data = audio_outbuf; + + if ( pkt.size > 0 ) + { + if ( av_interleaved_write_frame( oc, &pkt ) ) + { + mlt_log_fatal( MLT_CONSUMER_SERVICE( this ), "error writing audio frame\n" ); + mlt_events_fire( properties, "consumer-fatal-error", NULL ); + goto on_fatal_error; + } + } + + mlt_log_debug( MLT_CONSUMER_SERVICE( this ), " frame_size %d\n", codec->frame_size ); + if ( i == 0 ) + { + audio_pts = (double)stream->pts.val * av_q2d( stream->time_base ); + } + } } else { @@ -1078,9 +1383,10 @@ static void *consumer_thread( void *arg ) } else if ( video_st ) { + // Write video if ( mlt_deque_count( queue ) ) { - int out_size, ret; + int out_size, ret = 0; AVCodecContext *c; frame = mlt_deque_pop_front( queue ); @@ -1091,12 +1397,9 @@ static void *consumer_thread( void *arg ) if ( mlt_properties_get_int( frame_properties, "rendered" ) ) { int i = 0; - int j = 0; uint8_t *p; uint8_t *q; - mlt_events_fire( properties, "consumer-frame-show", frame, NULL ); - mlt_frame_get_image( frame, &image, &img_fmt, &img_width, &img_height, 0 ); q = image; @@ -1105,18 +1408,21 @@ static void *consumer_thread( void *arg ) for ( i = 0; i < height; i ++ ) { p = input->data[ 0 ] + i * input->linesize[ 0 ]; - j = width; - while( j -- ) - { - *p ++ = *q ++; - *p ++ = *q ++; - } + memcpy( p, q, width * 2 ); + q += width * 2; } // Do the colour space conversion #ifdef SWSCALE + int flags = SWS_BILINEAR; +#ifdef USE_MMX + flags |= SWS_CPU_CAPS_MMX; +#endif +#ifdef USE_SSE + flags |= SWS_CPU_CAPS_MMX2; +#endif struct SwsContext *context = sws_getContext( width, height, PIX_FMT_YUYV422, - width, height, video_st->codec->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL); + width, height, video_st->codec->pix_fmt, flags, NULL, NULL, NULL); sws_scale( context, input->data, input->linesize, 0, height, output->data, output->linesize); sws_freeContext( context ); @@ -1124,6 +1430,8 @@ static void *consumer_thread( void *arg ) img_convert( ( AVPicture * )output, video_st->codec->pix_fmt, ( AVPicture * )input, PIX_FMT_YUYV422, width, height ); #endif + mlt_events_fire( properties, "consumer-frame-show", frame, NULL ); + // Apply the alpha if applicable if ( video_st->codec->pix_fmt == PIX_FMT_RGB32 ) { @@ -1133,11 +1441,7 @@ static void *consumer_thread( void *arg ) for ( i = 0; i < height; i ++ ) { n = ( width + 7 ) / 8; - p = output->data[ 0 ] + i * output->linesize[ 0 ]; - - #ifndef __DARWIN__ - p += 3; - #endif + p = output->data[ 0 ] + i * output->linesize[ 0 ] + 3; switch( width % 8 ) { @@ -1178,6 +1482,7 @@ static void *consumer_thread( void *arg ) // Set frame interlace hints output->interlaced_frame = !mlt_properties_get_int( frame_properties, "progressive" ); output->top_field_first = mlt_properties_get_int( frame_properties, "top_field_first" ); + output->pts = frame_count; // Encode the image out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, output ); @@ -1206,12 +1511,18 @@ static void *consumer_thread( void *arg ) if ( mlt_properties_get_data( properties, "_logfile", NULL ) && c->stats_out ) fprintf( mlt_properties_get_data( properties, "_logfile", NULL ), "%s", c->stats_out ); } - else + else if ( out_size < 0 ) { - mlt_log_warning( MLT_CONSUMER_SERVICE( this ), "error with video encode\n" ); + mlt_log_warning( MLT_CONSUMER_SERVICE( this ), "error with video encode %d\n", frame_count ); } } frame_count++; + if ( ret ) + { + mlt_log_fatal( MLT_CONSUMER_SERVICE( this ), "error writing video frame\n" ); + mlt_events_fire( properties, "consumer-fatal-error", NULL ); + goto on_fatal_error; + } mlt_frame_close( frame ); } else @@ -1219,14 +1530,14 @@ static void *consumer_thread( void *arg ) break; } } - if ( audio_st ) - mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "audio pts %lld (%f) ", audio_st->pts.val, audio_pts ); + if ( audio_st[0] ) + mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "audio pts %lld (%f) ", audio_st[0]->pts.val, audio_pts ); if ( video_st ) mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "video pts %lld (%f) ", video_st->pts.val, video_pts ); mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "\n" ); } - if ( real_time_output == 1 && frames % 12 == 0 ) + if ( real_time_output == 1 && frames % 2 == 0 ) { long passed = time_difference( &ante ); if ( fifo != NULL ) @@ -1243,13 +1554,14 @@ static void *consumer_thread( void *arg ) } } -#ifdef FLUSH - if ( ! real_time_output ) + // Flush the encoder buffers + if ( real_time_output <= 0 ) { // Flush audio fifo - if ( audio_st && audio_st->codec->frame_size > 1 ) for (;;) + // TODO: flush all audio streams + if ( audio_st[0] && audio_st[0]->codec->frame_size > 1 ) for (;;) { - AVCodecContext *c = audio_st->codec; + AVCodecContext *c = audio_st[0]->codec; AVPacket pkt; av_init_packet( &pkt ); pkt.size = 0; @@ -1257,24 +1569,26 @@ static void *consumer_thread( void *arg ) if ( /*( c->capabilities & CODEC_CAP_SMALL_LAST_FRAME ) &&*/ ( channels * audio_input_frame_size < sample_fifo_used( fifo ) ) ) { - sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size ); - pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer ); + sample_fifo_fetch( fifo, audio_buf_1, channels * audio_input_frame_size ); + pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, audio_buf_1 ); } if ( pkt.size <= 0 ) pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, NULL ); + mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "flushing audio size %d\n", pkt.size ); if ( pkt.size <= 0 ) break; // Write the compressed frame in the media file if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE ) - pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base ); + pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st[0]->time_base ); pkt.flags |= PKT_FLAG_KEY; - pkt.stream_index = audio_st->index; + pkt.stream_index = audio_st[0]->index; pkt.data = audio_outbuf; if ( av_interleaved_write_frame( oc, &pkt ) != 0 ) { - fprintf( stderr, "%s: Error while writing flushed audio frame\n", __FILE__ ); - break; + mlt_log_fatal( MLT_CONSUMER_SERVICE( this ), "error writing flushed audio frame\n" ); + mlt_events_fire( properties, "consumer-fatal-error", NULL ); + goto on_fatal_error; } } @@ -1287,6 +1601,7 @@ static void *consumer_thread( void *arg ) // Encode the image pkt.size = avcodec_encode_video( c, video_outbuf, video_outbuf_size, NULL ); + mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "flushing video size %d\n", pkt.size ); if ( pkt.size <= 0 ) break; @@ -1300,32 +1615,37 @@ static void *consumer_thread( void *arg ) // write the compressed frame in the media file if ( av_interleaved_write_frame( oc, &pkt ) != 0 ) { - fprintf( stderr, "%s: Error while writing flushed video frame\n". __FILE__ ); - break; + mlt_log_fatal( MLT_CONSUMER_SERVICE(this), "error writing flushed video frame\n" ); + mlt_events_fire( properties, "consumer-fatal-error", NULL ); + goto on_fatal_error; } + // Dual pass logging + if ( mlt_properties_get_data( properties, "_logfile", NULL ) && c->stats_out ) + fprintf( mlt_properties_get_data( properties, "_logfile", NULL ), "%s", c->stats_out ); } } -#endif + +on_fatal_error: + + // Write the trailer, if any + av_write_trailer( oc ); // close each codec - if (video_st) + if ( video_st ) close_video(oc, video_st); - if (audio_st) - close_audio(oc, audio_st); - - // Write the trailer, if any - av_write_trailer(oc); + for ( i = 0; i < MAX_AUDIO_STREAMS && audio_st[i]; i++ ) + close_audio( oc, audio_st[i] ); // Free the streams - for(i = 0; i < oc->nb_streams; i++) - av_freep(&oc->streams[i]); + for ( i = 0; i < oc->nb_streams; i++ ) + av_freep( &oc->streams[i] ); // Close the output file - if (!(fmt->flags & AVFMT_NOFILE)) + if ( !( fmt->flags & AVFMT_NOFILE ) ) #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(0<<8)+0) - url_fclose(oc->pb); + url_fclose( oc->pb ); #else - url_fclose(&oc->pb); + url_fclose( &oc->pb ); #endif // Clean up input and output frames @@ -1335,17 +1655,19 @@ static void *consumer_thread( void *arg ) av_free( input->data[0] ); av_free( input ); av_free( video_outbuf ); - av_free( buffer ); + av_free( audio_buf_1 ); + av_free( audio_buf_2 ); // Free the stream - av_free(oc); + av_free( oc ); // Just in case we terminated on pause mlt_properties_set_int( properties, "running", 0 ); mlt_consumer_stopped( this ); + mlt_properties_close( frame_meta_properties ); - if ( mlt_properties_get_int( properties, "pass" ) == 2 ) + if ( mlt_properties_get_int( properties, "pass" ) > 1 ) { // Remove the dual pass log file if ( mlt_properties_get( properties, "_logfilename" ) )