X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=src%2Fmodules%2Favformat%2Fconsumer_avformat.c;h=9dacb50a52991f1a6ce3ca1af66b9fbc76c533d8;hb=e1729616b392a9ba47390264e4d25a414f79fd74;hp=87e3f5f8ddfbc097a3a2b77519a364e929613d17;hpb=b368e2047380b1de3fe454a011d7a84e4cf49cee;p=mlt diff --git a/src/modules/avformat/consumer_avformat.c b/src/modules/avformat/consumer_avformat.c index 87e3f5f8..9dacb50a 100644 --- a/src/modules/avformat/consumer_avformat.c +++ b/src/modules/avformat/consumer_avformat.c @@ -22,6 +22,8 @@ // mlt Header files #include #include +#include +#include // System header files #include @@ -30,7 +32,6 @@ #include #include #include -#include #include // avformat header files @@ -40,6 +41,13 @@ #endif #include +#if LIBAVUTIL_VERSION_INT < (50<<16) +#define PIX_FMT_RGB32 PIX_FMT_RGBA32 +#define PIX_FMT_YUYV422 PIX_FMT_YUV422 +#endif + +#define AUDIO_ENCODE_BUFFER_SIZE (48000 * 2) + // // This structure should be extended and made globally available in mlt // @@ -174,6 +182,7 @@ mlt_consumer consumer_avformat_init( mlt_profile profile, char *arg ) // Default to separate processing threads for producer and consumer with no frame dropping! mlt_properties_set_int( properties, "real_time", -1 ); + mlt_properties_set_int( properties, "prefill", 1 ); // Set up start/stop/terminated callbacks this->start = consumer_start; @@ -192,9 +201,44 @@ static int consumer_start( mlt_consumer this ) { // Get the properties mlt_properties properties = MLT_CONSUMER_PROPERTIES( this ); + int error = 0; + + // Report information about available muxers and codecs as YAML Tiny + char *s = mlt_properties_get( properties, "f" ); + if ( s && strcmp( s, "list" ) == 0 ) + { + fprintf( stderr, "---\nformats:\n" ); + AVOutputFormat *format = NULL; + while ( ( format = av_oformat_next( format ) ) ) + fprintf( stderr, " - %s\n", format->name ); + fprintf( stderr, "...\n" ); + error = 1; + } + s = mlt_properties_get( properties, "acodec" ); + if ( s && strcmp( s, "list" ) == 0 ) + { + fprintf( stderr, "---\naudio_codecs:\n" ); + AVCodec *codec = NULL; + while ( ( codec = av_codec_next( codec ) ) ) + if ( codec->encode && codec->type == CODEC_TYPE_AUDIO ) + fprintf( stderr, " - %s\n", codec->name ); + fprintf( stderr, "...\n" ); + error = 1; + } + s = mlt_properties_get( properties, "vcodec" ); + if ( s && strcmp( s, "list" ) == 0 ) + { + fprintf( stderr, "---\nvideo_codecs:\n" ); + AVCodec *codec = NULL; + while ( ( codec = av_codec_next( codec ) ) ) + if ( codec->encode && codec->type == CODEC_TYPE_VIDEO ) + fprintf( stderr, " - %s\n", codec->name ); + fprintf( stderr, "...\n" ); + error = 1; + } // Check that we're not already running - if ( !mlt_properties_get_int( properties, "running" ) ) + if ( !error && !mlt_properties_get_int( properties, "running" ) ) { // Allocate a thread pthread_t *thread = calloc( 1, sizeof( pthread_t ) ); @@ -217,7 +261,7 @@ static int consumer_start( mlt_consumer this ) } else { - fprintf( stderr, "%s: Invalid size property %s - ignoring.\n", __FILE__, size ); + mlt_log_warning( MLT_CONSUMER_SERVICE( this ), "Invalid size property %s - ignoring.\n", size ); } } @@ -266,7 +310,7 @@ static int consumer_start( mlt_consumer this ) // Create the thread pthread_create( thread, NULL, consumer_thread, this ); } - return 0; + return error; } /** Stop the consumer. @@ -306,7 +350,7 @@ static int consumer_is_stopped( mlt_consumer this ) /** Process properties as AVOptions and apply to AV context obj */ -static void apply_properties( void *obj, mlt_properties properties, int flags ) +static void apply_properties( void *obj, mlt_properties properties, int flags, int alloc ) { int i; int count = mlt_properties_count( properties ); @@ -316,9 +360,9 @@ static void apply_properties( void *obj, mlt_properties properties, int flags ) const AVOption *opt = av_find_opt( obj, opt_name, NULL, flags, flags ); if ( opt != NULL ) #if LIBAVCODEC_VERSION_INT >= ((52<<16)+(7<<8)+0) - av_set_string3( obj, opt_name, mlt_properties_get( properties, opt_name), 0, NULL ); + av_set_string3( obj, opt_name, mlt_properties_get( properties, opt_name), alloc, NULL ); #elif LIBAVCODEC_VERSION_INT >= ((51<<16)+(59<<8)+0) - av_set_string2( obj, opt_name, mlt_properties_get( properties, opt_name), 0 ); + av_set_string2( obj, opt_name, mlt_properties_get( properties, opt_name), alloc ); #else av_set_string( obj, opt_name, mlt_properties_get( properties, opt_name) ); #endif @@ -346,13 +390,16 @@ static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int c c->codec_id = codec_id; c->codec_type = CODEC_TYPE_AUDIO; + c->sample_fmt = SAMPLE_FMT_S16; +#if 0 // disabled until some audio codecs are multi-threaded // Setup multi-threading int thread_count = mlt_properties_get_int( properties, "threads" ); if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) ) thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) ); if ( thread_count > 1 ) avcodec_thread_init( c, thread_count ); +#endif if (oc->oformat->flags & AVFMT_GLOBALHEADER) c->flags |= CODEC_FLAG_GLOBAL_HEADER; @@ -369,7 +416,14 @@ static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int c } // Process properties as AVOptions - apply_properties( c, properties, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM ); + char *apre = mlt_properties_get( properties, "apre" ); + if ( apre ) + { + mlt_properties p = mlt_properties_load( apre ); + apply_properties( c, p, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, 1 ); + mlt_properties_close( p ); + } + apply_properties( c, properties, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, 0 ); int audio_qscale = mlt_properties_get_int( properties, "aq" ); if ( audio_qscale > QSCALE_NONE ) @@ -380,6 +434,7 @@ static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int c // Set parameters controlled by MLT c->sample_rate = mlt_properties_get_int( properties, "frequency" ); + c->time_base = ( AVRational ){ 1, c->sample_rate }; c->channels = mlt_properties_get_int( properties, "channels" ); if ( mlt_properties_get( properties, "alang" ) != NULL ) @@ -387,7 +442,7 @@ static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int c } else { - fprintf( stderr, "%s: Could not allocate a stream for audio\n", __FILE__ ); + mlt_log_error( MLT_CONSUMER_SERVICE( this ), "Could not allocate a stream for audio\n" ); } return st; @@ -430,14 +485,14 @@ static int open_audio( AVFormatContext *oc, AVStream *st, int audio_outbuf_size } // Some formats want stream headers to be seperate (hmm) - if( !strcmp( oc->oformat->name, "mp4" ) || - !strcmp( oc->oformat->name, "mov" ) || - !strcmp( oc->oformat->name, "3gp" ) ) + if ( !strcmp( oc->oformat->name, "mp4" ) || + !strcmp( oc->oformat->name, "mov" ) || + !strcmp( oc->oformat->name, "3gp" ) ) c->flags |= CODEC_FLAG_GLOBAL_HEADER; } else { - fprintf( stderr, "%s: Unable to encode audio - disabling audio output.\n", __FILE__ ); + mlt_log_warning( NULL, "%s: Unable to encode audio - disabling audio output.\n", __FILE__ ); } return audio_input_frame_size; @@ -445,7 +500,8 @@ static int open_audio( AVFormatContext *oc, AVStream *st, int audio_outbuf_size static void close_audio( AVFormatContext *oc, AVStream *st ) { - avcodec_close( st->codec ); + if ( st && st->codec ) + avcodec_close( st->codec ); } /** Add a video output stream @@ -478,47 +534,25 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c avcodec_thread_init( c, thread_count ); // Process properties as AVOptions - apply_properties( c, properties, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM ); + char *vpre = mlt_properties_get( properties, "vpre" ); + if ( vpre ) + { + mlt_properties p = mlt_properties_load( vpre ); + apply_properties( c, p, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, 1 ); + mlt_properties_close( p ); + } + apply_properties( c, properties, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, 0 ); // Set options controlled by MLT c->width = mlt_properties_get_int( properties, "width" ); c->height = mlt_properties_get_int( properties, "height" ); c->time_base.num = mlt_properties_get_int( properties, "frame_rate_den" ); c->time_base.den = mlt_properties_get_int( properties, "frame_rate_num" ); - st->time_base = c->time_base; + if ( st->time_base.den == 0 ) + st->time_base = c->time_base; c->pix_fmt = pix_fmt ? avcodec_get_pix_fmt( pix_fmt ) : PIX_FMT_YUV420P; - if ( codec_id == CODEC_ID_DVVIDEO ) - { - // Compensate for FFmpeg's notion of DV aspect ratios, which are - // based upon a width of 704. Since we do not have a normaliser - // that crops (nor is cropping 720 wide ITU-R 601 video always desirable) - // we just coerce the values to facilitate a passive behaviour through - // the rescale normaliser when using equivalent producers and consumers. - // = display_aspect / (width * height) - double ar = mlt_properties_get_double( properties, "aspect_ratio" ); - if ( ar == 8.0/9.0 ) // 4:3 NTSC - { - c->sample_aspect_ratio.num = 10; - c->sample_aspect_ratio.den = 11; - } - else if ( ar == 16.0/15.0 ) // 4:3 PAL - { - c->sample_aspect_ratio.num = 59; - c->sample_aspect_ratio.den = 54; - } - else if ( ar == 32.0/27.0 ) // 16:9 NTSC - { - c->sample_aspect_ratio.num = 40; - c->sample_aspect_ratio.den = 33; - } - else // 16:9 PAL - { - c->sample_aspect_ratio.num = 118; - c->sample_aspect_ratio.den = 82; - } - } - else if ( mlt_properties_get( properties, "aspect" ) ) + if ( mlt_properties_get( properties, "aspect" ) ) { // "-aspect" on ffmpeg command line is display aspect ratio double ar = mlt_properties_get_double( properties, "aspect" ); @@ -549,7 +583,6 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c profile->sample_aspect_den = rational.den; mlt_properties_set_double( properties, "aspect_ratio", mlt_profile_sar( profile ) ); } - } else { @@ -585,9 +618,9 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c if ( mlt_properties_get_int( properties, "progressive" ) == 0 && mlt_properties_get_int( properties, "deinterlace" ) == 0 ) { - if ( mlt_properties_get_int( properties, "ildct" ) ) + if ( ! mlt_properties_get( properties, "ildct" ) || mlt_properties_get_int( properties, "ildct" ) ) c->flags |= CODEC_FLAG_INTERLACED_DCT; - if ( mlt_properties_get_int( properties, "ilme" ) ) + if ( ! mlt_properties_get( properties, "ilme" ) || mlt_properties_get_int( properties, "ilme" ) ) c->flags |= CODEC_FLAG_INTERLACED_ME; } @@ -599,7 +632,7 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c int start, end, q; int e = sscanf( rc_override, "%d,%d,%d", &start, &end, &q ); if ( e != 3 ) - fprintf( stderr, "%s: Error parsing rc_override\n", __FILE__ ); + mlt_log_warning( MLT_CONSUMER_SERVICE( this ), "Error parsing rc_override\n" ); c->rc_override = av_realloc( c->rc_override, sizeof( RcOverride ) * ( i + 1 ) ); c->rc_override[i].start_frame = start; c->rc_override[i].end_frame = end; @@ -661,7 +694,7 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c fseek( f, 0, SEEK_SET ); logbuffer = av_malloc( size + 1 ); if ( !logbuffer ) - fprintf( stderr, "%s: Could not allocate log buffer\n", __FILE__ ); + mlt_log_fatal( MLT_CONSUMER_SERVICE( this ), "Could not allocate log buffer\n" ); else { size = fread( logbuffer, 1, size, f ); @@ -676,7 +709,7 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c } else { - fprintf( stderr, "%s: Could not allocate a stream for video\n", __FILE__ ); + mlt_log_error( MLT_CONSUMER_SERVICE( this ), "Could not allocate a stream for video\n" ); } return st; @@ -736,7 +769,8 @@ static int open_video(AVFormatContext *oc, AVStream *st) void close_video(AVFormatContext *oc, AVStream *st) { - avcodec_close(st->codec); + if ( st && st->codec ) + avcodec_close(st->codec); } static inline long time_difference( struct timeval *time1 ) @@ -777,14 +811,14 @@ static void *consumer_thread( void *arg ) int img_height = height; // Get default audio properties - mlt_audio_format aud_fmt = mlt_audio_pcm; + mlt_audio_format aud_fmt = mlt_audio_s16; int channels = mlt_properties_get_int( properties, "channels" ); int frequency = mlt_properties_get_int( properties, "frequency" ); int16_t *pcm = NULL; int samples = 0; // AVFormat audio buffer and frame size - int audio_outbuf_size = 10000; + int audio_outbuf_size = ( 1024 * 42 ); uint8_t *audio_outbuf = av_malloc( audio_outbuf_size ); int audio_input_frame_size = 0; @@ -803,18 +837,22 @@ static void *consumer_thread( void *arg ) // Need two av pictures for converting AVFrame *output = NULL; - AVFrame *input = alloc_picture( PIX_FMT_YUV422, width, height ); + AVFrame *input = alloc_picture( PIX_FMT_YUYV422, width, height ); // For receiving images from an mlt_frame uint8_t *image; mlt_image_format img_fmt = mlt_image_yuv422; // For receiving audio samples back from the fifo - int16_t *buffer = av_malloc( 48000 * 2 ); + int16_t *buffer = av_malloc( AUDIO_ENCODE_BUFFER_SIZE ); int count = 0; // Allocate the context +#if (LIBAVFORMAT_VERSION_INT >= ((52<<16)+(26<<8)+0)) + AVFormatContext *oc = avformat_alloc_context( ); +#else AVFormatContext *oc = av_alloc_format_context( ); +#endif // Streams AVStream *audio_st = NULL; @@ -833,7 +871,7 @@ static void *consumer_thread( void *arg ) // Determine the format AVOutputFormat *fmt = NULL; - char *filename = mlt_properties_get( properties, "target" ); + const char *filename = mlt_properties_get( properties, "target" ); char *format = mlt_properties_get( properties, "f" ); char *vcodec = mlt_properties_get( properties, "vcodec" ); char *acodec = mlt_properties_get( properties, "acodec" ); @@ -844,15 +882,27 @@ static void *consumer_thread( void *arg ) // Check for user selected format first if ( format != NULL ) +#if LIBAVFORMAT_VERSION_INT < ((52<<16)+(45<<8)+0) fmt = guess_format( format, NULL, NULL ); +#else + fmt = av_guess_format( format, NULL, NULL ); +#endif // Otherwise check on the filename if ( fmt == NULL && filename != NULL ) +#if LIBAVFORMAT_VERSION_INT < ((52<<16)+(45<<8)+0) fmt = guess_format( NULL, filename, NULL ); +#else + fmt = av_guess_format( NULL, filename, NULL ); +#endif // Otherwise default to mpeg if ( fmt == NULL ) +#if LIBAVFORMAT_VERSION_INT < ((52<<16)+(45<<8)+0) fmt = guess_format( "mpeg", NULL, NULL ); +#else + fmt = av_guess_format( "mpeg", NULL, NULL ); +#endif // We need a filename - default to stdout? if ( filename == NULL || !strcmp( filename, "" ) ) @@ -863,23 +913,27 @@ static void *consumer_thread( void *arg ) video_codec_id = fmt->video_codec; // Check for audio codec overides - if ( acodec != NULL ) + if ( ( acodec && strcmp( acodec, "none" ) == 0 ) || mlt_properties_get_int( properties, "an" ) ) + audio_codec_id = CODEC_ID_NONE; + else if ( acodec ) { AVCodec *p = avcodec_find_encoder_by_name( acodec ); if ( p != NULL ) audio_codec_id = p->id; else - fprintf( stderr, "%s: audio codec %s unrecognised - ignoring\n", __FILE__, acodec ); + mlt_log_warning( MLT_CONSUMER_SERVICE( this ), "audio codec %s unrecognised - ignoring\n", acodec ); } // Check for video codec overides - if ( vcodec != NULL ) + if ( ( vcodec && strcmp( vcodec, "none" ) == 0 ) || mlt_properties_get_int( properties, "vn" ) ) + video_codec_id = CODEC_ID_NONE; + else if ( vcodec ) { AVCodec *p = avcodec_find_encoder_by_name( vcodec ); if ( p != NULL ) video_codec_id = p->id; else - fprintf( stderr, "%s: video codec %s unrecognised - ignoring\n", __FILE__, vcodec ); + mlt_log_warning( MLT_CONSUMER_SERVICE( this ), "video codec %s unrecognised - ignoring\n", vcodec ); } // Write metadata @@ -911,9 +965,9 @@ static void *consumer_thread( void *arg ) snprintf( oc->filename, sizeof(oc->filename), "%s", filename ); // Add audio and video streams - if ( fmt->video_codec != CODEC_ID_NONE ) + if ( video_codec_id != CODEC_ID_NONE ) video_st = add_video_stream( this, oc, video_codec_id ); - if ( fmt->audio_codec != CODEC_ID_NONE ) + if ( audio_codec_id != CODEC_ID_NONE ) audio_st = add_audio_stream( this, oc, audio_codec_id ); // Set the parameters (even though we have none...) @@ -923,19 +977,30 @@ static void *consumer_thread( void *arg ) oc->max_delay= ( int )( mlt_properties_get_double( properties, "muxdelay" ) * AV_TIME_BASE ); // Process properties as AVOptions - apply_properties( oc, properties, AV_OPT_FLAG_ENCODING_PARAM ); + char *fpre = mlt_properties_get( properties, "fpre" ); + if ( fpre ) + { + mlt_properties p = mlt_properties_load( fpre ); + apply_properties( oc, p, AV_OPT_FLAG_ENCODING_PARAM, 1 ); + mlt_properties_close( p ); + } + apply_properties( oc, properties, AV_OPT_FLAG_ENCODING_PARAM, 0 ); if ( video_st && !open_video( oc, video_st ) ) video_st = NULL; if ( audio_st ) + { audio_input_frame_size = open_audio( oc, audio_st, audio_outbuf_size ); + if ( !audio_input_frame_size ) + audio_st = NULL; + } // Open the output file, if needed if ( !( fmt->flags & AVFMT_NOFILE ) ) { if ( url_fopen( &oc->pb, filename, URL_WRONLY ) < 0 ) { - fprintf( stderr, "%s: Could not open '%s'\n", __FILE__, filename ); + mlt_log_error( MLT_CONSUMER_SERVICE( this ), "Could not open '%s'\n", filename ); mlt_properties_set_int( properties, "running", 0 ); } } @@ -946,7 +1011,7 @@ static void *consumer_thread( void *arg ) } else { - fprintf( stderr, "%s: Invalid output format parameters\n", __FILE__ ); + mlt_log_error( MLT_CONSUMER_SERVICE( this ), "Invalid output format parameters\n" ); mlt_properties_set_int( properties, "running", 0 ); } @@ -962,7 +1027,8 @@ static void *consumer_thread( void *arg ) gettimeofday( &ante, NULL ); // Loop while running - while( mlt_properties_get_int( properties, "running" ) && !terminated ) + while( mlt_properties_get_int( properties, "running" ) && + ( !terminated || ( video_st && mlt_deque_count( queue ) ) ) ) { // Get the frame frame = mlt_consumer_rt_frame( this ); @@ -983,7 +1049,7 @@ static void *consumer_thread( void *arg ) if ( !terminated && audio_st ) { samples = mlt_sample_calculator( fps, frequency, count ++ ); - mlt_frame_get_audio( frame, &pcm, &aud_fmt, &frequency, &channels, &samples ); + mlt_frame_get_audio( frame, (void**) &pcm, &aud_fmt, &frequency, &channels, &samples ); // Create the fifo if we don't have one if ( fifo == NULL ) @@ -1010,42 +1076,42 @@ static void *consumer_thread( void *arg ) // While we have stuff to process, process... while ( 1 ) { - if (audio_st) - audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den; - else - audio_pts = 0.0; - - if (video_st) - video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den; - else - video_pts = 0.0; - // Write interleaved audio and video frames if ( !video_st || ( video_st && audio_st && audio_pts < video_pts ) ) { - if ( channels * audio_input_frame_size < sample_fifo_used( fifo ) ) + if ( ( video_st && terminated ) || ( channels * audio_input_frame_size ) < sample_fifo_used( fifo ) ) { - AVCodecContext *c; + AVCodecContext *c = audio_st->codec; AVPacket pkt; - av_init_packet( &pkt ); - - c = audio_st->codec; - - sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size ); + int n = FFMIN( FFMIN( channels * audio_input_frame_size, sample_fifo_used( fifo ) ), AUDIO_ENCODE_BUFFER_SIZE ); + if ( n > 0 ) + sample_fifo_fetch( fifo, buffer, n ); + else + memset( buffer, 0, AUDIO_ENCODE_BUFFER_SIZE ); + + av_init_packet( &pkt ); pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer ); + // Write the compressed frame in the media file if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE ) + { pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base ); + mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "audio pkt pts %lld frame pts %lld", pkt.pts, c->coded_frame->pts ); + } pkt.flags |= PKT_FLAG_KEY; pkt.stream_index= audio_st->index; pkt.data= audio_outbuf; - if ( pkt.size ) - if ( av_interleaved_write_frame( oc, &pkt ) != 0) - fprintf( stderr, "%s: Error while writing audio frame\n", __FILE__ ); + if ( pkt.size > 0 ) + if ( av_interleaved_write_frame( oc, &pkt ) != 0 ) + mlt_log_error( MLT_CONSUMER_SERVICE( this ), "error writing audio frame\n" ); - audio_pts += c->frame_size; + mlt_log_debug( MLT_CONSUMER_SERVICE( this ), " frame_size %d\n", c->frame_size ); + if ( audio_codec_id == CODEC_ID_VORBIS ) + audio_pts = (double)c->coded_frame->pts * av_q2d( audio_st->time_base ); + else + audio_pts = (double)audio_st->pts.val * av_q2d( audio_st->time_base ); } else { @@ -1091,17 +1157,17 @@ static void *consumer_thread( void *arg ) // Do the colour space conversion #ifdef SWSCALE - struct SwsContext *context = sws_getContext( width, height, PIX_FMT_YUV422, + struct SwsContext *context = sws_getContext( width, height, PIX_FMT_YUYV422, width, height, video_st->codec->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL); sws_scale( context, input->data, input->linesize, 0, height, output->data, output->linesize); sws_freeContext( context ); #else - img_convert( ( AVPicture * )output, video_st->codec->pix_fmt, ( AVPicture * )input, PIX_FMT_YUV422, width, height ); + img_convert( ( AVPicture * )output, video_st->codec->pix_fmt, ( AVPicture * )input, PIX_FMT_YUYV422, width, height ); #endif // Apply the alpha if applicable - if ( video_st->codec->pix_fmt == PIX_FMT_RGBA32 ) + if ( video_st->codec->pix_fmt == PIX_FMT_RGB32 ) { uint8_t *alpha = mlt_frame_get_alpha_mask( frame ); register int n; @@ -1109,11 +1175,7 @@ static void *consumer_thread( void *arg ) for ( i = 0; i < height; i ++ ) { n = ( width + 7 ) / 8; - p = output->data[ 0 ] + i * output->linesize[ 0 ]; - - #ifndef __DARWIN__ - p += 3; - #endif + p = output->data[ 0 ] + i * output->linesize[ 0 ] + 3; switch( width % 8 ) { @@ -1154,18 +1216,20 @@ static void *consumer_thread( void *arg ) // Set frame interlace hints output->interlaced_frame = !mlt_properties_get_int( frame_properties, "progressive" ); output->top_field_first = mlt_properties_get_int( frame_properties, "top_field_first" ); + output->pts = frame_count; // Encode the image out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, output ); // If zero size, it means the image was buffered - if (out_size > 0) + if ( out_size > 0 ) { AVPacket pkt; av_init_packet( &pkt ); if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE ) pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, video_st->time_base ); + mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "video pkt pts %lld frame pts %lld", pkt.pts, c->coded_frame->pts ); if( c->coded_frame && c->coded_frame->key_frame ) pkt.flags |= PKT_FLAG_KEY; pkt.stream_index= video_st->index; @@ -1174,15 +1238,16 @@ static void *consumer_thread( void *arg ) // write the compressed frame in the media file ret = av_interleaved_write_frame(oc, &pkt); - video_pts += c->frame_size; + mlt_log_debug( MLT_CONSUMER_SERVICE( this ), " frame_size %d\n", c->frame_size ); + video_pts = (double)video_st->pts.val * av_q2d( video_st->time_base ); // Dual pass logging - if ( mlt_properties_get_data( properties, "_logfile", NULL ) && c->stats_out) + if ( mlt_properties_get_data( properties, "_logfile", NULL ) && c->stats_out ) fprintf( mlt_properties_get_data( properties, "_logfile", NULL ), "%s", c->stats_out ); } - else + else if ( out_size < 0 ) { - fprintf( stderr, "%s: error with video encode\n", __FILE__ ); + mlt_log_warning( MLT_CONSUMER_SERVICE( this ), "error with video encode %d\n", frame_count ); } } frame_count++; @@ -1193,9 +1258,14 @@ static void *consumer_thread( void *arg ) break; } } + if ( audio_st ) + mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "audio pts %lld (%f) ", audio_st->pts.val, audio_pts ); + if ( video_st ) + mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "video pts %lld (%f) ", video_st->pts.val, video_pts ); + mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "\n" ); } - if ( real_time_output == 1 && frames % 12 == 0 ) + if ( real_time_output == 1 && frames % 2 == 0 ) { long passed = time_difference( &ante ); if ( fifo != NULL ) @@ -1269,15 +1339,22 @@ static void *consumer_thread( void *arg ) // write the compressed frame in the media file if ( av_interleaved_write_frame( oc, &pkt ) != 0 ) { - fprintf( stderr, "%s: Error while writing flushed video frame\n". __FILE__ ); + mlt_log_error( MLT_CONSUMER_SERVICE(this), "error while writing flushing video frame\n" ); break; } } } #endif - // close each codec - if (video_st) + // XXX ugly hack to prevent x264 from crashing on multi-threaded encoding + int pass = mlt_properties_get_int( properties, "pass" ); + int thread_count = mlt_properties_get_int( properties, "threads" ); + if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) ) + thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) ); + int multithreaded_x264 = ( video_codec_id == CODEC_ID_H264 && thread_count > 1 ); + + // close each codec + if ( video_st && !multithreaded_x264 ) close_video(oc, video_st); if (audio_st) close_audio(oc, audio_st); @@ -1314,7 +1391,7 @@ static void *consumer_thread( void *arg ) mlt_consumer_stopped( this ); - if ( mlt_properties_get_int( properties, "pass" ) == 2 ) + if ( pass == 2 ) { // Remove the dual pass log file if ( mlt_properties_get( properties, "_logfilename" ) ) @@ -1322,7 +1399,7 @@ static void *consumer_thread( void *arg ) // Remove the x264 dual pass logs char *cwd = getcwd( NULL, 0 ); - char *file = "x264_2pass.log"; + const char *file = "x264_2pass.log"; char *full = malloc( strlen( cwd ) + strlen( file ) + 2 ); sprintf( full, "%s/%s", cwd, file ); remove( full ); @@ -1332,6 +1409,11 @@ static void *consumer_thread( void *arg ) sprintf( full, "%s/%s", cwd, file ); remove( full ); free( full ); + file = "x264_2pass.log.mbtree"; + full = malloc( strlen( cwd ) + strlen( file ) + 2 ); + sprintf( full, "%s/%s", cwd, file ); + remove( full ); + free( full ); free( cwd ); remove( "x264_2pass.log.temp" ); }