X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=src%2Fmodules%2Favformat%2Fconsumer_avformat.c;h=f26c3740da3e98436e7bb6547952e95dea13f6fb;hb=7a1871285ec0ec538baba550d8fefe6e3d08e5d0;hp=2229ff17827c1f492fe087bacfca2e6f279fc758;hpb=b21286a45c165e5e4561d0700f1ed3beb58e6048;p=mlt diff --git a/src/modules/avformat/consumer_avformat.c b/src/modules/avformat/consumer_avformat.c index 2229ff17..f26c3740 100644 --- a/src/modules/avformat/consumer_avformat.c +++ b/src/modules/avformat/consumer_avformat.c @@ -40,12 +40,18 @@ #include #endif #include +#if LIBAVUTIL_VERSION_INT >= ((50<<16)+(8<<8)+0) +#include +#endif #if LIBAVUTIL_VERSION_INT < (50<<16) #define PIX_FMT_RGB32 PIX_FMT_RGBA32 #define PIX_FMT_YUYV422 PIX_FMT_YUV422 #endif +#define MAX_AUDIO_STREAMS (8) +#define AUDIO_ENCODE_BUFFER_SIZE (48000 * 2 * MAX_AUDIO_STREAMS) + // // This structure should be extended and made globally available in mlt // @@ -140,7 +146,7 @@ static int consumer_is_stopped( mlt_consumer this ); static void *consumer_thread( void *arg ); static void consumer_close( mlt_consumer this ); -/** Initialise the dv consumer. +/** Initialise the consumer. */ mlt_consumer consumer_avformat_init( mlt_profile profile, char *arg ) @@ -259,7 +265,7 @@ static int consumer_start( mlt_consumer this ) } else { - fprintf( stderr, "%s: Invalid size property %s - ignoring.\n", __FILE__, size ); + mlt_log_warning( MLT_CONSUMER_SERVICE( this ), "Invalid size property %s - ignoring.\n", size ); } } @@ -348,7 +354,7 @@ static int consumer_is_stopped( mlt_consumer this ) /** Process properties as AVOptions and apply to AV context obj */ -static void apply_properties( void *obj, mlt_properties properties, int flags ) +static void apply_properties( void *obj, mlt_properties properties, int flags, int alloc ) { int i; int count = mlt_properties_count( properties ); @@ -358,9 +364,9 @@ static void apply_properties( void *obj, mlt_properties properties, int flags ) const AVOption *opt = av_find_opt( obj, opt_name, NULL, flags, flags ); if ( opt != NULL ) #if LIBAVCODEC_VERSION_INT >= ((52<<16)+(7<<8)+0) - av_set_string3( obj, opt_name, mlt_properties_get( properties, opt_name), 0, NULL ); + av_set_string3( obj, opt_name, mlt_properties_get( properties, opt_name), alloc, NULL ); #elif LIBAVCODEC_VERSION_INT >= ((51<<16)+(59<<8)+0) - av_set_string2( obj, opt_name, mlt_properties_get( properties, opt_name), 0 ); + av_set_string2( obj, opt_name, mlt_properties_get( properties, opt_name), alloc ); #else av_set_string( obj, opt_name, mlt_properties_get( properties, opt_name) ); #endif @@ -370,13 +376,13 @@ static void apply_properties( void *obj, mlt_properties properties, int flags ) /** Add an audio output stream */ -static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int codec_id ) +static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int codec_id, int channels ) { // Get the properties mlt_properties properties = MLT_CONSUMER_PROPERTIES( this ); // Create a new stream - AVStream *st = av_new_stream( oc, 1 ); + AVStream *st = av_new_stream( oc, oc->nb_streams ); // If created, then initialise from properties if ( st != NULL ) @@ -388,13 +394,16 @@ static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int c c->codec_id = codec_id; c->codec_type = CODEC_TYPE_AUDIO; + c->sample_fmt = SAMPLE_FMT_S16; +#if 0 // disabled until some audio codecs are multi-threaded // Setup multi-threading int thread_count = mlt_properties_get_int( properties, "threads" ); if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) ) thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) ); if ( thread_count > 1 ) avcodec_thread_init( c, thread_count ); +#endif if (oc->oformat->flags & AVFMT_GLOBALHEADER) c->flags |= CODEC_FLAG_GLOBAL_HEADER; @@ -411,7 +420,14 @@ static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int c } // Process properties as AVOptions - apply_properties( c, properties, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM ); + char *apre = mlt_properties_get( properties, "apre" ); + if ( apre ) + { + mlt_properties p = mlt_properties_load( apre ); + apply_properties( c, p, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, 1 ); + mlt_properties_close( p ); + } + apply_properties( c, properties, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, 0 ); int audio_qscale = mlt_properties_get_int( properties, "aq" ); if ( audio_qscale > QSCALE_NONE ) @@ -423,14 +439,14 @@ static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int c // Set parameters controlled by MLT c->sample_rate = mlt_properties_get_int( properties, "frequency" ); c->time_base = ( AVRational ){ 1, c->sample_rate }; - c->channels = mlt_properties_get_int( properties, "channels" ); + c->channels = channels; if ( mlt_properties_get( properties, "alang" ) != NULL ) strncpy( st->language, mlt_properties_get( properties, "alang" ), sizeof( st->language ) ); } else { - fprintf( stderr, "%s: Could not allocate a stream for audio\n", __FILE__ ); + mlt_log_error( MLT_CONSUMER_SERVICE( this ), "Could not allocate a stream for audio\n" ); } return st; @@ -480,7 +496,7 @@ static int open_audio( AVFormatContext *oc, AVStream *st, int audio_outbuf_size } else { - fprintf( stderr, "%s: Unable to encode audio - disabling audio output.\n", __FILE__ ); + mlt_log_warning( NULL, "%s: Unable to encode audio - disabling audio output.\n", __FILE__ ); } return audio_input_frame_size; @@ -488,7 +504,8 @@ static int open_audio( AVFormatContext *oc, AVStream *st, int audio_outbuf_size static void close_audio( AVFormatContext *oc, AVStream *st ) { - avcodec_close( st->codec ); + if ( st && st->codec ) + avcodec_close( st->codec ); } /** Add a video output stream @@ -500,7 +517,7 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c mlt_properties properties = MLT_CONSUMER_PROPERTIES( this ); // Create a new stream - AVStream *st = av_new_stream( oc, 0 ); + AVStream *st = av_new_stream( oc, oc->nb_streams ); if ( st != NULL ) { @@ -521,7 +538,17 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c avcodec_thread_init( c, thread_count ); // Process properties as AVOptions - apply_properties( c, properties, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM ); + char *vpre = mlt_properties_get( properties, "vpre" ); + if ( vpre ) + { + mlt_properties p = mlt_properties_load( vpre ); + apply_properties( c, p, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, 1 ); + mlt_properties_close( p ); + } + int colorspace = mlt_properties_get_int( properties, "colorspace" ); + mlt_properties_set( properties, "colorspace", NULL ); + apply_properties( c, properties, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, 0 ); + mlt_properties_set_int( properties, "colorspace", colorspace ); // Set options controlled by MLT c->width = mlt_properties_get_int( properties, "width" ); @@ -530,7 +557,30 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c c->time_base.den = mlt_properties_get_int( properties, "frame_rate_num" ); if ( st->time_base.den == 0 ) st->time_base = c->time_base; +#if LIBAVUTIL_VERSION_INT >= ((50<<16)+(8<<8)+0) + c->pix_fmt = pix_fmt ? av_get_pix_fmt( pix_fmt ) : PIX_FMT_YUV420P; +#else c->pix_fmt = pix_fmt ? avcodec_get_pix_fmt( pix_fmt ) : PIX_FMT_YUV420P; +#endif + + switch ( colorspace ) + { + case 170: + c->colorspace = AVCOL_SPC_SMPTE170M; + break; + case 240: + c->colorspace = AVCOL_SPC_SMPTE240M; + break; + case 470: + c->colorspace = AVCOL_SPC_BT470BG; + break; + case 601: + c->colorspace = ( 576 % c->height ) ? AVCOL_SPC_SMPTE170M : AVCOL_SPC_BT470BG; + break; + case 709: + c->colorspace = AVCOL_SPC_BT709; + break; + } if ( mlt_properties_get( properties, "aspect" ) ) { @@ -612,7 +662,7 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c int start, end, q; int e = sscanf( rc_override, "%d,%d,%d", &start, &end, &q ); if ( e != 3 ) - fprintf( stderr, "%s: Error parsing rc_override\n", __FILE__ ); + mlt_log_warning( MLT_CONSUMER_SERVICE( this ), "Error parsing rc_override\n" ); c->rc_override = av_realloc( c->rc_override, sizeof( RcOverride ) * ( i + 1 ) ); c->rc_override[i].start_frame = start; c->rc_override[i].end_frame = end; @@ -674,7 +724,7 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c fseek( f, 0, SEEK_SET ); logbuffer = av_malloc( size + 1 ); if ( !logbuffer ) - fprintf( stderr, "%s: Could not allocate log buffer\n", __FILE__ ); + mlt_log_fatal( MLT_CONSUMER_SERVICE( this ), "Could not allocate log buffer\n" ); else { size = fread( logbuffer, 1, size, f ); @@ -689,7 +739,7 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c } else { - fprintf( stderr, "%s: Could not allocate a stream for video\n", __FILE__ ); + mlt_log_error( MLT_CONSUMER_SERVICE( this ), "Could not allocate a stream for video\n" ); } return st; @@ -749,7 +799,8 @@ static int open_video(AVFormatContext *oc, AVStream *st) void close_video(AVFormatContext *oc, AVStream *st) { - avcodec_close(st->codec); + if ( st && st->codec ) + avcodec_close(st->codec); } static inline long time_difference( struct timeval *time1 ) @@ -790,8 +841,9 @@ static void *consumer_thread( void *arg ) int img_height = height; // Get default audio properties - mlt_audio_format aud_fmt = mlt_audio_pcm; + mlt_audio_format aud_fmt = mlt_audio_s16; int channels = mlt_properties_get_int( properties, "channels" ); + int total_channels = channels; int frequency = mlt_properties_get_int( properties, "frequency" ); int16_t *pcm = NULL; int samples = 0; @@ -823,7 +875,8 @@ static void *consumer_thread( void *arg ) mlt_image_format img_fmt = mlt_image_yuv422; // For receiving audio samples back from the fifo - int16_t *buffer = av_malloc( 48000 * 2 ); + int16_t *audio_buf_1 = av_malloc( AUDIO_ENCODE_BUFFER_SIZE ); + int16_t *audio_buf_2 = NULL; int count = 0; // Allocate the context @@ -834,17 +887,14 @@ static void *consumer_thread( void *arg ) #endif // Streams - AVStream *audio_st = NULL; AVStream *video_st = NULL; + AVStream *audio_st[ MAX_AUDIO_STREAMS ]; // Time stamps double audio_pts = 0; double video_pts = 0; - // Loop variable - int i; - - // Frames despatched + // Frames dispatched long int frames = 0; long int total_time = 0; @@ -859,17 +909,38 @@ static void *consumer_thread( void *arg ) int audio_codec_id; int video_codec_id; + // Misc + char key[27]; + mlt_properties frame_meta_properties = mlt_properties_new(); + + // Initialize audio_st + int i = MAX_AUDIO_STREAMS; + while ( i-- ) + audio_st[i] = NULL; + // Check for user selected format first if ( format != NULL ) +#if LIBAVFORMAT_VERSION_INT < ((52<<16)+(45<<8)+0) fmt = guess_format( format, NULL, NULL ); +#else + fmt = av_guess_format( format, NULL, NULL ); +#endif // Otherwise check on the filename if ( fmt == NULL && filename != NULL ) +#if LIBAVFORMAT_VERSION_INT < ((52<<16)+(45<<8)+0) fmt = guess_format( NULL, filename, NULL ); +#else + fmt = av_guess_format( NULL, filename, NULL ); +#endif // Otherwise default to mpeg if ( fmt == NULL ) +#if LIBAVFORMAT_VERSION_INT < ((52<<16)+(45<<8)+0) fmt = guess_format( "mpeg", NULL, NULL ); +#else + fmt = av_guess_format( "mpeg", NULL, NULL ); +#endif // We need a filename - default to stdout? if ( filename == NULL || !strcmp( filename, "" ) ) @@ -888,7 +959,7 @@ static void *consumer_thread( void *arg ) if ( p != NULL ) audio_codec_id = p->id; else - fprintf( stderr, "%s: audio codec %s unrecognised - ignoring\n", __FILE__, acodec ); + mlt_log_warning( MLT_CONSUMER_SERVICE( this ), "audio codec %s unrecognised - ignoring\n", acodec ); } // Check for video codec overides @@ -900,7 +971,7 @@ static void *consumer_thread( void *arg ) if ( p != NULL ) video_codec_id = p->id; else - fprintf( stderr, "%s: video codec %s unrecognised - ignoring\n", __FILE__, vcodec ); + mlt_log_warning( MLT_CONSUMER_SERVICE( this ), "video codec %s unrecognised - ignoring\n", vcodec ); } // Write metadata @@ -931,11 +1002,33 @@ static void *consumer_thread( void *arg ) oc->oformat = fmt; snprintf( oc->filename, sizeof(oc->filename), "%s", filename ); - // Add audio and video streams + // Add audio and video streams if ( video_codec_id != CODEC_ID_NONE ) video_st = add_video_stream( this, oc, video_codec_id ); if ( audio_codec_id != CODEC_ID_NONE ) - audio_st = add_audio_stream( this, oc, audio_codec_id ); + { + int is_multi = 0; + + total_channels = 0; + // multitrack audio + for ( i = 0; i < MAX_AUDIO_STREAMS; i++ ) + { + sprintf( key, "channels.%d", i ); + int j = mlt_properties_get_int( properties, key ); + if ( j ) + { + is_multi = 1; + total_channels += j; + audio_st[i] = add_audio_stream( this, oc, audio_codec_id, j ); + } + } + // single track + if ( !is_multi ) + { + audio_st[0] = add_audio_stream( this, oc, audio_codec_id, channels ); + total_channels = channels; + } + } // Set the parameters (even though we have none...) if ( av_set_parameters(oc, NULL) >= 0 ) @@ -944,30 +1037,41 @@ static void *consumer_thread( void *arg ) oc->max_delay= ( int )( mlt_properties_get_double( properties, "muxdelay" ) * AV_TIME_BASE ); // Process properties as AVOptions - apply_properties( oc, properties, AV_OPT_FLAG_ENCODING_PARAM ); + char *fpre = mlt_properties_get( properties, "fpre" ); + if ( fpre ) + { + mlt_properties p = mlt_properties_load( fpre ); + apply_properties( oc, p, AV_OPT_FLAG_ENCODING_PARAM, 1 ); + mlt_properties_close( p ); + } + apply_properties( oc, properties, AV_OPT_FLAG_ENCODING_PARAM, 0 ); if ( video_st && !open_video( oc, video_st ) ) video_st = NULL; - if ( audio_st ) - audio_input_frame_size = open_audio( oc, audio_st, audio_outbuf_size ); + for ( i = 0; i < MAX_AUDIO_STREAMS && audio_st[i]; i++ ) + { + audio_input_frame_size = open_audio( oc, audio_st[i], audio_outbuf_size ); + if ( !audio_input_frame_size ) + audio_st[i] = NULL; + } // Open the output file, if needed if ( !( fmt->flags & AVFMT_NOFILE ) ) { if ( url_fopen( &oc->pb, filename, URL_WRONLY ) < 0 ) { - fprintf( stderr, "%s: Could not open '%s'\n", __FILE__, filename ); + mlt_log_error( MLT_CONSUMER_SERVICE( this ), "Could not open '%s'\n", filename ); mlt_properties_set_int( properties, "running", 0 ); } } - // Write the stream header, if any + // Write the stream header. if ( mlt_properties_get_int( properties, "running" ) ) av_write_header( oc ); } else { - fprintf( stderr, "%s: Invalid output format parameters\n", __FILE__ ); + mlt_log_error( MLT_CONSUMER_SERVICE( this ), "Invalid output format parameters\n" ); mlt_properties_set_int( properties, "running", 0 ); } @@ -976,22 +1080,22 @@ static void *consumer_thread( void *arg ) output = alloc_picture( video_st->codec->pix_fmt, width, height ); // Last check - need at least one stream - if ( audio_st == NULL && video_st == NULL ) + if ( !audio_st[0] && !video_st ) mlt_properties_set_int( properties, "running", 0 ); // Get the starting time (can ignore the times above) gettimeofday( &ante, NULL ); // Loop while running - while( mlt_properties_get_int( properties, "running" ) && !terminated ) + while( mlt_properties_get_int( properties, "running" ) && + ( !terminated || ( video_st && mlt_deque_count( queue ) ) ) ) { - // Get the frame frame = mlt_consumer_rt_frame( this ); // Check that we have a frame to work with if ( frame != NULL ) { - // Increment frames despatched + // Increment frames dispatched frames ++; // Default audio args @@ -1001,10 +1105,13 @@ static void *consumer_thread( void *arg ) terminated = terminate_on_pause && mlt_properties_get_double( frame_properties, "_speed" ) == 0.0; // Get audio and append to the fifo - if ( !terminated && audio_st ) + if ( !terminated && audio_st[0] ) { samples = mlt_sample_calculator( fps, frequency, count ++ ); - mlt_frame_get_audio( frame, &pcm, &aud_fmt, &frequency, &channels, &samples ); + mlt_frame_get_audio( frame, (void**) &pcm, &aud_fmt, &frequency, &channels, &samples ); + + // Save the audio channel remap properties for later + mlt_properties_pass( frame_meta_properties, frame_properties, "meta.map.audio." ); // Create the fifo if we don't have one if ( fifo == NULL ) @@ -1013,6 +1120,7 @@ static void *consumer_thread( void *arg ) mlt_properties_set_data( properties, "sample_fifo", fifo, 0, ( mlt_destructor )sample_fifo_close, NULL ); } + // Silence if not normal forward speed if ( mlt_properties_get_double( frame_properties, "_speed" ) != 1.0 ) memset( pcm, 0, samples * channels * 2 ); @@ -1032,36 +1140,126 @@ static void *consumer_thread( void *arg ) while ( 1 ) { // Write interleaved audio and video frames - if ( !video_st || ( video_st && audio_st && audio_pts < video_pts ) ) + if ( !video_st || ( video_st && audio_st[0] && audio_pts < video_pts ) ) { - if ( ( channels * audio_input_frame_size ) < sample_fifo_used( fifo ) ) + // Write audio + if ( ( video_st && terminated ) || ( channels * audio_input_frame_size ) < sample_fifo_used( fifo ) ) { - AVCodecContext *c; - AVPacket pkt; - av_init_packet( &pkt ); - - c = audio_st->codec; - - sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size ); - - pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer ); - // Write the compressed frame in the media file - if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE ) - pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base ); - mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "audio pkt pts %lld frame pts %lld", pkt.pts, c->coded_frame->pts ); - pkt.flags |= PKT_FLAG_KEY; - pkt.stream_index= audio_st->index; - pkt.data= audio_outbuf; - - if ( pkt.size ) - if ( av_interleaved_write_frame( oc, &pkt ) != 0) - fprintf( stderr, "%s: Error while writing audio frame\n", __FILE__ ); - - mlt_log_debug( MLT_CONSUMER_SERVICE( this ), " frame_size %d\n", c->frame_size ); - if ( audio_codec_id == CODEC_ID_VORBIS ) - audio_pts = (double)c->coded_frame->pts * av_q2d( audio_st->time_base ); + int j = 0; // channel offset into interleaved source buffer + int n = FFMIN( FFMIN( channels * audio_input_frame_size, sample_fifo_used( fifo ) ), AUDIO_ENCODE_BUFFER_SIZE ); + + // Get the audio samples + if ( n > 0 ) + sample_fifo_fetch( fifo, audio_buf_1, n ); else - audio_pts = (double)audio_st->pts.val * av_q2d( audio_st->time_base ); + memset( audio_buf_1, 0, AUDIO_ENCODE_BUFFER_SIZE ); + samples = n / channels; + + // For each output stream + for ( i = 0; i < MAX_AUDIO_STREAMS && audio_st[i] && j < total_channels; i++ ) + { + AVStream *stream = audio_st[i]; + AVCodecContext *codec = stream->codec; + AVPacket pkt; + + av_init_packet( &pkt ); + + // Optimized for single track and no channel remap + if ( !audio_st[1] && !mlt_properties_count( frame_meta_properties ) ) + { + pkt.size = avcodec_encode_audio( codec, audio_outbuf, audio_outbuf_size, audio_buf_1 ); + } + else + { + // Extract the audio channels according to channel mapping + int dest_offset = 0; // channel offset into interleaved dest buffer + + // Get the number of channels for this stream + sprintf( key, "channels.%d", i ); + int current_channels = mlt_properties_get_int( properties, key ); + + // Clear the destination audio buffer. + if ( !audio_buf_2 ) + audio_buf_2 = av_mallocz( AUDIO_ENCODE_BUFFER_SIZE ); + else + memset( audio_buf_2, 0, AUDIO_ENCODE_BUFFER_SIZE ); + + // For each output channel + while ( dest_offset < current_channels && j < total_channels ) + { + int map_start = -1, map_channels = 0; + int source_offset = 0; + int k; + + // Look for a mapping that starts at j + for ( k = 0; k < (MAX_AUDIO_STREAMS * 2) && map_start != j; k++ ) + { + sprintf( key, "%d.channels", k ); + map_channels = mlt_properties_get_int( frame_meta_properties, key ); + sprintf( key, "%d.start", k ); + if ( mlt_properties_get( frame_meta_properties, key ) ) + map_start = mlt_properties_get_int( frame_meta_properties, key ); + if ( map_start != j ) + source_offset += map_channels; + } + + // If no mapping + if ( map_start != j ) + { + map_channels = current_channels; + source_offset = j; + } + + // Copy samples if source offset valid + if ( source_offset < channels ) + { + // Interleave the audio buffer with the # channels for this stream/mapping. + for ( k = 0; k < map_channels; k++, j++, source_offset++, dest_offset++ ) + { + int16_t *src = audio_buf_1 + source_offset; + int16_t *dest = audio_buf_2 + dest_offset; + int s = samples + 1; + + while ( --s ) { + *dest = *src; + dest += current_channels; + src += channels; + } + } + } + // Otherwise silence + else + { + j += current_channels; + dest_offset += current_channels; + } + } + pkt.size = avcodec_encode_audio( codec, audio_outbuf, audio_outbuf_size, audio_buf_2 ); + } + + // Write the compressed frame in the media file + if ( codec->coded_frame && codec->coded_frame->pts != AV_NOPTS_VALUE ) + { + pkt.pts = av_rescale_q( codec->coded_frame->pts, codec->time_base, stream->time_base ); + mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "audio stream %d pkt pts %lld frame pts %lld", + stream->index, pkt.pts, codec->coded_frame->pts ); + } + pkt.flags |= PKT_FLAG_KEY; + pkt.stream_index = stream->index; + pkt.data = audio_outbuf; + + if ( pkt.size > 0 ) + { + if ( av_interleaved_write_frame( oc, &pkt ) ) + mlt_log_error( MLT_CONSUMER_SERVICE( this ), "error writing audio frame %d\n", frames - 1 ); + } + + mlt_log_debug( MLT_CONSUMER_SERVICE( this ), " frame_size %d\n", codec->frame_size ); + if ( i == 0 ) + { + audio_pts = (double)stream->pts.val * av_q2d( stream->time_base ); + } + } } else { @@ -1070,6 +1268,7 @@ static void *consumer_thread( void *arg ) } else if ( video_st ) { + // Write video if ( mlt_deque_count( queue ) ) { int out_size, ret; @@ -1087,8 +1286,6 @@ static void *consumer_thread( void *arg ) uint8_t *p; uint8_t *q; - mlt_events_fire( properties, "consumer-frame-show", frame, NULL ); - mlt_frame_get_image( frame, &image, &img_fmt, &img_width, &img_height, 0 ); q = image; @@ -1107,8 +1304,15 @@ static void *consumer_thread( void *arg ) // Do the colour space conversion #ifdef SWSCALE + int flags = SWS_BILINEAR; +#ifdef USE_MMX + flags |= SWS_CPU_CAPS_MMX; +#endif +#ifdef USE_SSE + flags |= SWS_CPU_CAPS_MMX2; +#endif struct SwsContext *context = sws_getContext( width, height, PIX_FMT_YUYV422, - width, height, video_st->codec->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL); + width, height, video_st->codec->pix_fmt, flags, NULL, NULL, NULL); sws_scale( context, input->data, input->linesize, 0, height, output->data, output->linesize); sws_freeContext( context ); @@ -1116,6 +1320,8 @@ static void *consumer_thread( void *arg ) img_convert( ( AVPicture * )output, video_st->codec->pix_fmt, ( AVPicture * )input, PIX_FMT_YUYV422, width, height ); #endif + mlt_events_fire( properties, "consumer-frame-show", frame, NULL ); + // Apply the alpha if applicable if ( video_st->codec->pix_fmt == PIX_FMT_RGB32 ) { @@ -1125,11 +1331,7 @@ static void *consumer_thread( void *arg ) for ( i = 0; i < height; i ++ ) { n = ( width + 7 ) / 8; - p = output->data[ 0 ] + i * output->linesize[ 0 ]; - - #ifndef __DARWIN__ - p += 3; - #endif + p = output->data[ 0 ] + i * output->linesize[ 0 ] + 3; switch( width % 8 ) { @@ -1170,12 +1372,13 @@ static void *consumer_thread( void *arg ) // Set frame interlace hints output->interlaced_frame = !mlt_properties_get_int( frame_properties, "progressive" ); output->top_field_first = mlt_properties_get_int( frame_properties, "top_field_first" ); + output->pts = frame_count; // Encode the image out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, output ); // If zero size, it means the image was buffered - if (out_size > 0) + if ( out_size > 0 ) { AVPacket pkt; av_init_packet( &pkt ); @@ -1195,12 +1398,12 @@ static void *consumer_thread( void *arg ) video_pts = (double)video_st->pts.val * av_q2d( video_st->time_base ); // Dual pass logging - if ( mlt_properties_get_data( properties, "_logfile", NULL ) && c->stats_out) + if ( mlt_properties_get_data( properties, "_logfile", NULL ) && c->stats_out ) fprintf( mlt_properties_get_data( properties, "_logfile", NULL ), "%s", c->stats_out ); } - else + else if ( out_size < 0 ) { - fprintf( stderr, "%s: error with video encode\n", __FILE__ ); + mlt_log_warning( MLT_CONSUMER_SERVICE( this ), "error with video encode %d\n", frame_count ); } } frame_count++; @@ -1211,14 +1414,14 @@ static void *consumer_thread( void *arg ) break; } } - if ( audio_st ) - mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "audio pts %lld (%f) ", audio_st->pts.val, audio_pts ); + if ( audio_st[0] ) + mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "audio pts %lld (%f) ", audio_st[0]->pts.val, audio_pts ); if ( video_st ) mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "video pts %lld (%f) ", video_st->pts.val, video_pts ); mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "\n" ); } - if ( real_time_output == 1 && frames % 12 == 0 ) + if ( real_time_output == 1 && frames % 2 == 0 ) { long passed = time_difference( &ante ); if ( fifo != NULL ) @@ -1235,13 +1438,14 @@ static void *consumer_thread( void *arg ) } } -#ifdef FLUSH - if ( ! real_time_output ) + // Flush the encoder buffers + if ( real_time_output <= 0 ) { // Flush audio fifo - if ( audio_st && audio_st->codec->frame_size > 1 ) for (;;) + // TODO: flush all audio streams + if ( audio_st[0] && audio_st[0]->codec->frame_size > 1 ) for (;;) { - AVCodecContext *c = audio_st->codec; + AVCodecContext *c = audio_st[0]->codec; AVPacket pkt; av_init_packet( &pkt ); pkt.size = 0; @@ -1249,23 +1453,24 @@ static void *consumer_thread( void *arg ) if ( /*( c->capabilities & CODEC_CAP_SMALL_LAST_FRAME ) &&*/ ( channels * audio_input_frame_size < sample_fifo_used( fifo ) ) ) { - sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size ); - pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer ); + sample_fifo_fetch( fifo, audio_buf_1, channels * audio_input_frame_size ); + pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, audio_buf_1 ); } if ( pkt.size <= 0 ) pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, NULL ); + mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "flushing audio size %d\n", pkt.size ); if ( pkt.size <= 0 ) break; // Write the compressed frame in the media file if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE ) - pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base ); + pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st[0]->time_base ); pkt.flags |= PKT_FLAG_KEY; - pkt.stream_index = audio_st->index; + pkt.stream_index = audio_st[0]->index; pkt.data = audio_outbuf; if ( av_interleaved_write_frame( oc, &pkt ) != 0 ) { - fprintf( stderr, "%s: Error while writing flushed audio frame\n", __FILE__ ); + mlt_log_error( MLT_CONSUMER_SERVICE( this ), "%s: error writing flushed audio frame\n", __FILE__ ); break; } } @@ -1279,6 +1484,7 @@ static void *consumer_thread( void *arg ) // Encode the image pkt.size = avcodec_encode_video( c, video_outbuf, video_outbuf_size, NULL ); + mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "flushing video size %d\n", pkt.size ); if ( pkt.size <= 0 ) break; @@ -1292,32 +1498,34 @@ static void *consumer_thread( void *arg ) // write the compressed frame in the media file if ( av_interleaved_write_frame( oc, &pkt ) != 0 ) { - fprintf( stderr, "%s: Error while writing flushed video frame\n". __FILE__ ); + mlt_log_error( MLT_CONSUMER_SERVICE(this), "error writing flushed video frame\n" ); break; } + // Dual pass logging + if ( mlt_properties_get_data( properties, "_logfile", NULL ) && c->stats_out ) + fprintf( mlt_properties_get_data( properties, "_logfile", NULL ), "%s", c->stats_out ); } } -#endif - - // close each codec - if (video_st) - close_video(oc, video_st); - if (audio_st) - close_audio(oc, audio_st); // Write the trailer, if any - av_write_trailer(oc); + av_write_trailer( oc ); + + // close each codec + if ( video_st ) + close_video(oc, video_st); + for ( i = 0; i < MAX_AUDIO_STREAMS && audio_st[i]; i++ ) + close_audio( oc, audio_st[i] ); // Free the streams - for(i = 0; i < oc->nb_streams; i++) - av_freep(&oc->streams[i]); + for ( i = 0; i < oc->nb_streams; i++ ) + av_freep( &oc->streams[i] ); // Close the output file - if (!(fmt->flags & AVFMT_NOFILE)) + if ( !( fmt->flags & AVFMT_NOFILE ) ) #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(0<<8)+0) - url_fclose(oc->pb); + url_fclose( oc->pb ); #else - url_fclose(&oc->pb); + url_fclose( &oc->pb ); #endif // Clean up input and output frames @@ -1327,17 +1535,19 @@ static void *consumer_thread( void *arg ) av_free( input->data[0] ); av_free( input ); av_free( video_outbuf ); - av_free( buffer ); + av_free( audio_buf_1 ); + av_free( audio_buf_2 ); // Free the stream - av_free(oc); + av_free( oc ); // Just in case we terminated on pause mlt_properties_set_int( properties, "running", 0 ); mlt_consumer_stopped( this ); + mlt_properties_close( frame_meta_properties ); - if ( mlt_properties_get_int( properties, "pass" ) == 2 ) + if ( mlt_properties_get_int( properties, "pass" ) > 1 ) { // Remove the dual pass log file if ( mlt_properties_get( properties, "_logfilename" ) ) @@ -1355,6 +1565,11 @@ static void *consumer_thread( void *arg ) sprintf( full, "%s/%s", cwd, file ); remove( full ); free( full ); + file = "x264_2pass.log.mbtree"; + full = malloc( strlen( cwd ) + strlen( file ) + 2 ); + sprintf( full, "%s/%s", cwd, file ); + remove( full ); + free( full ); free( cwd ); remove( "x264_2pass.log.temp" ); }