]> git.sesse.net Git - mlt/blobdiff - src/modules/avformat/consumer_avformat.c
Expand colorspace support to explicit 601.
[mlt] / src / modules / avformat / consumer_avformat.c
index 226ef6493a4acb851dcc0210e7fffc7aeeb3c254..f26c3740da3e98436e7bb6547952e95dea13f6fb 100644 (file)
 #include <swscale.h>
 #endif
 #include <opt.h>
+#if LIBAVUTIL_VERSION_INT >= ((50<<16)+(8<<8)+0)
+#include <libavutil/pixdesc.h>
+#endif
 
 #if LIBAVUTIL_VERSION_INT < (50<<16)
 #define PIX_FMT_RGB32 PIX_FMT_RGBA32
 #define PIX_FMT_YUYV422 PIX_FMT_YUV422
 #endif
 
-#define AUDIO_ENCODE_BUFFER_SIZE (48000 * 2)
+#define MAX_AUDIO_STREAMS (8)
+#define AUDIO_ENCODE_BUFFER_SIZE (48000 * 2 * MAX_AUDIO_STREAMS)
 
 //
 // This structure should be extended and made globally available in mlt
@@ -142,7 +146,7 @@ static int consumer_is_stopped( mlt_consumer this );
 static void *consumer_thread( void *arg );
 static void consumer_close( mlt_consumer this );
 
-/** Initialise the dv consumer.
+/** Initialise the consumer.
 */
 
 mlt_consumer consumer_avformat_init( mlt_profile profile, char *arg )
@@ -372,13 +376,13 @@ static void apply_properties( void *obj, mlt_properties properties, int flags, i
 /** Add an audio output stream
 */
 
-static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int codec_id )
+static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int codec_id, int channels )
 {
        // Get the properties
        mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
 
        // Create a new stream
-       AVStream *st = av_new_stream( oc, 1 );
+       AVStream *st = av_new_stream( oc, oc->nb_streams );
 
        // If created, then initialise from properties
        if ( st != NULL ) 
@@ -435,7 +439,7 @@ static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int c
                // Set parameters controlled by MLT
                c->sample_rate = mlt_properties_get_int( properties, "frequency" );
                c->time_base = ( AVRational ){ 1, c->sample_rate };
-               c->channels = mlt_properties_get_int( properties, "channels" );
+               c->channels = channels;
 
                if ( mlt_properties_get( properties, "alang" ) != NULL )
                        strncpy( st->language, mlt_properties_get( properties, "alang" ), sizeof( st->language ) );
@@ -513,7 +517,7 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c
        mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
 
        // Create a new stream
-       AVStream *st = av_new_stream( oc, 0 );
+       AVStream *st = av_new_stream( oc, oc->nb_streams );
 
        if ( st != NULL ) 
        {
@@ -541,7 +545,10 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c
                        apply_properties( c, p, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, 1 );
                        mlt_properties_close( p );
                }
+               int colorspace = mlt_properties_get_int( properties, "colorspace" );
+               mlt_properties_set( properties, "colorspace", NULL );
                apply_properties( c, properties, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, 0 );
+               mlt_properties_set_int( properties, "colorspace", colorspace );
 
                // Set options controlled by MLT
                c->width = mlt_properties_get_int( properties, "width" );
@@ -550,7 +557,30 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c
                c->time_base.den = mlt_properties_get_int( properties, "frame_rate_num" );
                if ( st->time_base.den == 0 )
                        st->time_base = c->time_base;
+#if LIBAVUTIL_VERSION_INT >= ((50<<16)+(8<<8)+0)
+               c->pix_fmt = pix_fmt ? av_get_pix_fmt( pix_fmt ) : PIX_FMT_YUV420P;
+#else
                c->pix_fmt = pix_fmt ? avcodec_get_pix_fmt( pix_fmt ) : PIX_FMT_YUV420P;
+#endif
+               
+               switch ( colorspace )
+               {
+               case 170:
+                       c->colorspace = AVCOL_SPC_SMPTE170M;
+                       break;
+               case 240:
+                       c->colorspace = AVCOL_SPC_SMPTE240M;
+                       break;
+               case 470:
+                       c->colorspace = AVCOL_SPC_BT470BG;
+                       break;
+               case 601:
+                       c->colorspace = ( 576 % c->height ) ? AVCOL_SPC_SMPTE170M : AVCOL_SPC_BT470BG;
+                       break;
+               case 709:
+                       c->colorspace = AVCOL_SPC_BT709;
+                       break;
+               }
 
                if ( mlt_properties_get( properties, "aspect" ) )
                {
@@ -813,6 +843,7 @@ static void *consumer_thread( void *arg )
        // Get default audio properties
        mlt_audio_format aud_fmt = mlt_audio_s16;
        int channels = mlt_properties_get_int( properties, "channels" );
+       int total_channels = channels;
        int frequency = mlt_properties_get_int( properties, "frequency" );
        int16_t *pcm = NULL;
        int samples = 0;
@@ -844,7 +875,8 @@ static void *consumer_thread( void *arg )
        mlt_image_format img_fmt = mlt_image_yuv422;
 
        // For receiving audio samples back from the fifo
-       int16_t *buffer = av_malloc( AUDIO_ENCODE_BUFFER_SIZE );
+       int16_t *audio_buf_1 = av_malloc( AUDIO_ENCODE_BUFFER_SIZE );
+       int16_t *audio_buf_2 = NULL;
        int count = 0;
 
        // Allocate the context
@@ -855,17 +887,14 @@ static void *consumer_thread( void *arg )
 #endif
 
        // Streams
-       AVStream *audio_st = NULL;
        AVStream *video_st = NULL;
+       AVStream *audio_st[ MAX_AUDIO_STREAMS ];
 
        // Time stamps
        double audio_pts = 0;
        double video_pts = 0;
 
-       // Loop variable
-       int i;
-
-       // Frames despatched
+       // Frames dispatched
        long int frames = 0;
        long int total_time = 0;
 
@@ -880,6 +909,15 @@ static void *consumer_thread( void *arg )
        int audio_codec_id;
        int video_codec_id;
 
+       // Misc
+       char key[27];
+       mlt_properties frame_meta_properties = mlt_properties_new();
+
+       // Initialize audio_st
+       int i = MAX_AUDIO_STREAMS;
+       while ( i-- )
+               audio_st[i] = NULL;
+
        // Check for user selected format first
        if ( format != NULL )
 #if LIBAVFORMAT_VERSION_INT < ((52<<16)+(45<<8)+0)
@@ -964,11 +1002,33 @@ static void *consumer_thread( void *arg )
        oc->oformat = fmt;
        snprintf( oc->filename, sizeof(oc->filename), "%s", filename );
 
-       // Add audio and video streams 
+       // Add audio and video streams
        if ( video_codec_id != CODEC_ID_NONE )
                video_st = add_video_stream( this, oc, video_codec_id );
        if ( audio_codec_id != CODEC_ID_NONE )
-               audio_st = add_audio_stream( this, oc, audio_codec_id );
+       {
+               int is_multi = 0;
+
+               total_channels = 0;
+               // multitrack audio
+               for ( i = 0; i < MAX_AUDIO_STREAMS; i++ )
+               {
+                       sprintf( key, "channels.%d", i );
+                       int j = mlt_properties_get_int( properties, key );
+                       if ( j )
+                       {
+                               is_multi = 1;
+                               total_channels += j;
+                               audio_st[i] = add_audio_stream( this, oc, audio_codec_id, j );
+                       }
+               }
+               // single track
+               if ( !is_multi )
+               {
+                       audio_st[0] = add_audio_stream( this, oc, audio_codec_id, channels );
+                       total_channels = channels;
+               }
+       }
 
        // Set the parameters (even though we have none...)
        if ( av_set_parameters(oc, NULL) >= 0 ) 
@@ -988,11 +1048,11 @@ static void *consumer_thread( void *arg )
 
                if ( video_st && !open_video( oc, video_st ) )
                        video_st = NULL;
-               if ( audio_st )
+               for ( i = 0; i < MAX_AUDIO_STREAMS && audio_st[i]; i++ )
                {
-                       audio_input_frame_size = open_audio( oc, audio_st, audio_outbuf_size );
+                       audio_input_frame_size = open_audio( oc, audio_st[i], audio_outbuf_size );
                        if ( !audio_input_frame_size )
-                               audio_st = NULL;
+                               audio_st[i] = NULL;
                }
 
                // Open the output file, if needed
@@ -1005,7 +1065,7 @@ static void *consumer_thread( void *arg )
                        }
                }
        
-               // Write the stream header, if any
+               // Write the stream header.
                if ( mlt_properties_get_int( properties, "running" ) )
                        av_write_header( oc );
        }
@@ -1020,7 +1080,7 @@ static void *consumer_thread( void *arg )
                output = alloc_picture( video_st->codec->pix_fmt, width, height );
 
        // Last check - need at least one stream
-       if ( audio_st == NULL && video_st == NULL )
+       if ( !audio_st[0] && !video_st )
                mlt_properties_set_int( properties, "running", 0 );
 
        // Get the starting time (can ignore the times above)
@@ -1030,13 +1090,12 @@ static void *consumer_thread( void *arg )
        while( mlt_properties_get_int( properties, "running" ) &&
               ( !terminated || ( video_st && mlt_deque_count( queue ) ) ) )
        {
-               // Get the frame
                frame = mlt_consumer_rt_frame( this );
 
                // Check that we have a frame to work with
                if ( frame != NULL )
                {
-                       // Increment frames despatched
+                       // Increment frames dispatched
                        frames ++;
 
                        // Default audio args
@@ -1046,11 +1105,14 @@ static void *consumer_thread( void *arg )
                        terminated = terminate_on_pause && mlt_properties_get_double( frame_properties, "_speed" ) == 0.0;
 
                        // Get audio and append to the fifo
-                       if ( !terminated && audio_st )
+                       if ( !terminated && audio_st[0] )
                        {
                                samples = mlt_sample_calculator( fps, frequency, count ++ );
                                mlt_frame_get_audio( frame, (void**) &pcm, &aud_fmt, &frequency, &channels, &samples );
 
+                               // Save the audio channel remap properties for later
+                               mlt_properties_pass( frame_meta_properties, frame_properties, "meta.map.audio." );
+
                                // Create the fifo if we don't have one
                                if ( fifo == NULL )
                                {
@@ -1058,6 +1120,7 @@ static void *consumer_thread( void *arg )
                                        mlt_properties_set_data( properties, "sample_fifo", fifo, 0, ( mlt_destructor )sample_fifo_close, NULL );
                                }
 
+                               // Silence if not normal forward speed
                                if ( mlt_properties_get_double( frame_properties, "_speed" ) != 1.0 )
                                        memset( pcm, 0, samples * channels * 2 );
 
@@ -1077,41 +1140,126 @@ static void *consumer_thread( void *arg )
                while ( 1 )
                {
                        // Write interleaved audio and video frames
-                       if ( !video_st || ( video_st && audio_st && audio_pts < video_pts ) )
+                       if ( !video_st || ( video_st && audio_st[0] && audio_pts < video_pts ) )
                        {
+                               // Write audio
                                if ( ( video_st && terminated ) || ( channels * audio_input_frame_size ) < sample_fifo_used( fifo ) )
                                {
-                                       AVCodecContext *c = audio_st->codec;
-                                       AVPacket pkt;
+                                       int j = 0; // channel offset into interleaved source buffer
                                        int n = FFMIN( FFMIN( channels * audio_input_frame_size, sample_fifo_used( fifo ) ), AUDIO_ENCODE_BUFFER_SIZE );
 
+                                       // Get the audio samples
                                        if ( n > 0 )
-                                               sample_fifo_fetch( fifo, buffer, n );
+                                               sample_fifo_fetch( fifo, audio_buf_1, n );
                                        else
-                                               memset( buffer, 0, AUDIO_ENCODE_BUFFER_SIZE );
-                                       
-                                       av_init_packet( &pkt );
-                                       pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer );
-                                       
-                                       // Write the compressed frame in the media file
-                                       if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
+                                               memset( audio_buf_1, 0, AUDIO_ENCODE_BUFFER_SIZE );
+                                       samples = n / channels;
+
+                                       // For each output stream
+                                       for ( i = 0; i < MAX_AUDIO_STREAMS && audio_st[i] && j < total_channels; i++ )
                                        {
-                                               pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base );
-                                               mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "audio pkt pts %lld frame pts %lld", pkt.pts, c->coded_frame->pts );
-                                       }
-                                       pkt.flags |= PKT_FLAG_KEY;
-                                       pkt.stream_index= audio_st->index;
-                                       pkt.data= audio_outbuf;
+                                               AVStream *stream = audio_st[i];
+                                               AVCodecContext *codec = stream->codec;
+                                               AVPacket pkt;
 
-                                       if ( pkt.size > 0 )
-                                               if ( av_interleaved_write_frame( oc, &pkt ) != 0 )
-                                                       mlt_log_error( MLT_CONSUMER_SERVICE( this ), "error writing audio frame\n" );
+                                               av_init_packet( &pkt );
 
-                                       mlt_log_debug( MLT_CONSUMER_SERVICE( this ), " frame_size %d\n", c->frame_size );
-                                       if ( audio_codec_id == CODEC_ID_VORBIS )
-                                               audio_pts = (double)c->coded_frame->pts * av_q2d( audio_st->time_base );
-                                       else
-                                               audio_pts = (double)audio_st->pts.val * av_q2d( audio_st->time_base );
+                                               // Optimized for single track and no channel remap
+                                               if ( !audio_st[1] && !mlt_properties_count( frame_meta_properties ) )
+                                               {
+                                                       pkt.size = avcodec_encode_audio( codec, audio_outbuf, audio_outbuf_size, audio_buf_1 );
+                                               }
+                                               else
+                                               {
+                                                       // Extract the audio channels according to channel mapping
+                                                       int dest_offset = 0; // channel offset into interleaved dest buffer
+
+                                                       // Get the number of channels for this stream
+                                                       sprintf( key, "channels.%d", i );
+                                                       int current_channels = mlt_properties_get_int( properties, key );
+
+                                                       // Clear the destination audio buffer.
+                                                       if ( !audio_buf_2 )
+                                                               audio_buf_2 = av_mallocz( AUDIO_ENCODE_BUFFER_SIZE );
+                                                       else
+                                                               memset( audio_buf_2, 0, AUDIO_ENCODE_BUFFER_SIZE );
+
+                                                       // For each output channel
+                                                       while ( dest_offset < current_channels && j < total_channels )
+                                                       {
+                                                               int map_start = -1, map_channels = 0;
+                                                               int source_offset = 0;
+                                                               int k;
+
+                                                               // Look for a mapping that starts at j
+                                                               for ( k = 0; k < (MAX_AUDIO_STREAMS * 2) && map_start != j; k++ )
+                                                               {
+                                                                       sprintf( key, "%d.channels", k );
+                                                                       map_channels = mlt_properties_get_int( frame_meta_properties, key );
+                                                                       sprintf( key, "%d.start", k );
+                                                                       if ( mlt_properties_get( frame_meta_properties, key ) )
+                                                                               map_start = mlt_properties_get_int( frame_meta_properties, key );
+                                                                       if ( map_start != j )
+                                                                               source_offset += map_channels;
+                                                               }
+
+                                                               // If no mapping
+                                                               if ( map_start != j )
+                                                               {
+                                                                       map_channels = current_channels;
+                                                                       source_offset = j;
+                                                               }
+
+                                                               // Copy samples if source offset valid
+                                                               if ( source_offset < channels )
+                                                               {
+                                                                       // Interleave the audio buffer with the # channels for this stream/mapping.
+                                                                       for ( k = 0; k < map_channels; k++, j++, source_offset++, dest_offset++ )
+                                                                       {
+                                                                               int16_t *src = audio_buf_1 + source_offset;
+                                                                               int16_t *dest = audio_buf_2 + dest_offset;
+                                                                               int s = samples + 1;
+
+                                                                               while ( --s ) {
+                                                                                       *dest = *src;
+                                                                                       dest += current_channels;
+                                                                                       src += channels;
+                                                                               }
+                                                                       }
+                                                               }
+                                                               // Otherwise silence
+                                                               else
+                                                               {
+                                                                       j += current_channels;
+                                                                       dest_offset += current_channels;
+                                                               }
+                                                       }
+                                                       pkt.size = avcodec_encode_audio( codec, audio_outbuf, audio_outbuf_size, audio_buf_2 );
+                                               }
+
+                                               // Write the compressed frame in the media file
+                                               if ( codec->coded_frame && codec->coded_frame->pts != AV_NOPTS_VALUE )
+                                               {
+                                                       pkt.pts = av_rescale_q( codec->coded_frame->pts, codec->time_base, stream->time_base );
+                                                       mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "audio stream %d pkt pts %lld frame pts %lld",
+                                                               stream->index, pkt.pts, codec->coded_frame->pts );
+                                               }
+                                               pkt.flags |= PKT_FLAG_KEY;
+                                               pkt.stream_index = stream->index;
+                                               pkt.data = audio_outbuf;
+
+                                               if ( pkt.size > 0 )
+                                               {
+                                                       if ( av_interleaved_write_frame( oc, &pkt ) )
+                                                               mlt_log_error( MLT_CONSUMER_SERVICE( this ), "error writing audio frame %d\n", frames - 1 );
+                                               }
+
+                                               mlt_log_debug( MLT_CONSUMER_SERVICE( this ), " frame_size %d\n", codec->frame_size );
+                                               if ( i == 0 )
+                                               {
+                                                       audio_pts = (double)stream->pts.val * av_q2d( stream->time_base );
+                                               }
+                                       }
                                }
                                else
                                {
@@ -1120,6 +1268,7 @@ static void *consumer_thread( void *arg )
                        }
                        else if ( video_st )
                        {
+                               // Write video
                                if ( mlt_deque_count( queue ) )
                                {
                                        int out_size, ret;
@@ -1137,8 +1286,6 @@ static void *consumer_thread( void *arg )
                                                uint8_t *p;
                                                uint8_t *q;
 
-                                               mlt_events_fire( properties, "consumer-frame-show", frame, NULL );
-
                                                mlt_frame_get_image( frame, &image, &img_fmt, &img_width, &img_height, 0 );
 
                                                q = image;
@@ -1157,8 +1304,15 @@ static void *consumer_thread( void *arg )
 
                                                // Do the colour space conversion
 #ifdef SWSCALE
+                                               int flags = SWS_BILINEAR;
+#ifdef USE_MMX
+                                               flags |= SWS_CPU_CAPS_MMX;
+#endif
+#ifdef USE_SSE
+                                               flags |= SWS_CPU_CAPS_MMX2;
+#endif
                                                struct SwsContext *context = sws_getContext( width, height, PIX_FMT_YUYV422,
-                                                       width, height, video_st->codec->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL);
+                                                       width, height, video_st->codec->pix_fmt, flags, NULL, NULL, NULL);
                                                sws_scale( context, input->data, input->linesize, 0, height,
                                                        output->data, output->linesize);
                                                sws_freeContext( context );
@@ -1166,6 +1320,8 @@ static void *consumer_thread( void *arg )
                                                img_convert( ( AVPicture * )output, video_st->codec->pix_fmt, ( AVPicture * )input, PIX_FMT_YUYV422, width, height );
 #endif
 
+                                               mlt_events_fire( properties, "consumer-frame-show", frame, NULL );
+
                                                // Apply the alpha if applicable
                                                if ( video_st->codec->pix_fmt == PIX_FMT_RGB32 )
                                                {
@@ -1175,11 +1331,7 @@ static void *consumer_thread( void *arg )
                                                        for ( i = 0; i < height; i ++ )
                                                        {
                                                                n = ( width + 7 ) / 8;
-                                                               p = output->data[ 0 ] + i * output->linesize[ 0 ];
-
-                                                               #ifndef __DARWIN__
-                                                               p += 3;
-                                                               #endif
+                                                               p = output->data[ 0 ] + i * output->linesize[ 0 ] + 3;
 
                                                                switch( width % 8 )
                                                                {
@@ -1262,8 +1414,8 @@ static void *consumer_thread( void *arg )
                                        break;
                                }
                        }
-                       if ( audio_st )
-                               mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "audio pts %lld (%f) ", audio_st->pts.val, audio_pts );
+                       if ( audio_st[0] )
+                               mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "audio pts %lld (%f) ", audio_st[0]->pts.val, audio_pts );
                        if ( video_st )
                                mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "video pts %lld (%f) ", video_st->pts.val, video_pts );
                        mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "\n" );
@@ -1286,13 +1438,14 @@ static void *consumer_thread( void *arg )
                }
        }
 
-#ifdef FLUSH
-       if ( ! real_time_output )
+       // Flush the encoder buffers
+       if ( real_time_output <= 0 )
        {
                // Flush audio fifo
-               if ( audio_st && audio_st->codec->frame_size > 1 ) for (;;)
+               // TODO: flush all audio streams
+               if ( audio_st[0] && audio_st[0]->codec->frame_size > 1 ) for (;;)
                {
-                       AVCodecContext *c = audio_st->codec;
+                       AVCodecContext *c = audio_st[0]->codec;
                        AVPacket pkt;
                        av_init_packet( &pkt );
                        pkt.size = 0;
@@ -1300,23 +1453,24 @@ static void *consumer_thread( void *arg )
                        if ( /*( c->capabilities & CODEC_CAP_SMALL_LAST_FRAME ) &&*/
                                ( channels * audio_input_frame_size < sample_fifo_used( fifo ) ) )
                        {
-                               sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size );
-                               pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer );
+                               sample_fifo_fetch( fifo, audio_buf_1, channels * audio_input_frame_size );
+                               pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, audio_buf_1 );
                        }
                        if ( pkt.size <= 0 )
                                pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, NULL );
+                       mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "flushing audio size %d\n", pkt.size );
                        if ( pkt.size <= 0 )
                                break;
 
                        // Write the compressed frame in the media file
                        if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
-                               pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base );
+                               pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st[0]->time_base );
                        pkt.flags |= PKT_FLAG_KEY;
-                       pkt.stream_index = audio_st->index;
+                       pkt.stream_index = audio_st[0]->index;
                        pkt.data = audio_outbuf;
                        if ( av_interleaved_write_frame( oc, &pkt ) != 0 )
                        {
-                               fprintf( stderr, "%s: Error while writing flushed audio frame\n", __FILE__ );
+                               mlt_log_error( MLT_CONSUMER_SERVICE( this ), "%s: error writing flushed audio frame\n", __FILE__ );
                                break;
                        }
                }
@@ -1330,6 +1484,7 @@ static void *consumer_thread( void *arg )
 
                        // Encode the image
                        pkt.size = avcodec_encode_video( c, video_outbuf, video_outbuf_size, NULL );
+                       mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "flushing video size %d\n", pkt.size );
                        if ( pkt.size <= 0 )
                                break;
 
@@ -1343,39 +1498,34 @@ static void *consumer_thread( void *arg )
                        // write the compressed frame in the media file
                        if ( av_interleaved_write_frame( oc, &pkt ) != 0 )
                        {
-                               mlt_log_error( MLT_CONSUMER_SERVICE(this), "error while writing flushing video frame\n" );
+                               mlt_log_error( MLT_CONSUMER_SERVICE(this), "error writing flushed video frame\n" );
                                break;
                        }
+                       // Dual pass logging
+                       if ( mlt_properties_get_data( properties, "_logfile", NULL ) && c->stats_out )
+                               fprintf( mlt_properties_get_data( properties, "_logfile", NULL ), "%s", c->stats_out );
                }
        }
-#endif
 
-       // XXX ugly hack to prevent x264 from crashing on multi-threaded encoding
-       int pass = mlt_properties_get_int( properties, "pass" );
-       int thread_count = mlt_properties_get_int( properties, "threads" );
-       if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) )
-               thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) );
-       int multithreaded_x264 = ( video_codec_id == CODEC_ID_H264 && thread_count > 1 );
-       
+       // Write the trailer, if any
+       av_write_trailer( oc );
+
        // close each codec
-       if ( video_st && !multithreaded_x264 )
+       if ( video_st )
                close_video(oc, video_st);
-       if (audio_st)
-               close_audio(oc, audio_st);
-
-       // Write the trailer, if any
-       av_write_trailer(oc);
+       for ( i = 0; i < MAX_AUDIO_STREAMS && audio_st[i]; i++ )
+               close_audio( oc, audio_st[i] );
 
        // Free the streams
-       for(i = 0; i < oc->nb_streams; i++)
-               av_freep(&oc->streams[i]);
+       for ( i = 0; i < oc->nb_streams; i++ )
+               av_freep( &oc->streams[i] );
 
        // Close the output file
-       if (!(fmt->flags & AVFMT_NOFILE))
+       if ( !( fmt->flags & AVFMT_NOFILE ) )
 #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(0<<8)+0)
-               url_fclose(oc->pb);
+               url_fclose( oc->pb );
 #else
-               url_fclose(&oc->pb);
+               url_fclose( &oc->pb );
 #endif
 
        // Clean up input and output frames
@@ -1385,17 +1535,19 @@ static void *consumer_thread( void *arg )
        av_free( input->data[0] );
        av_free( input );
        av_free( video_outbuf );
-       av_free( buffer );
+       av_free( audio_buf_1 );
+       av_free( audio_buf_2 );
 
        // Free the stream
-       av_free(oc);
+       av_free( oc );
 
        // Just in case we terminated on pause
        mlt_properties_set_int( properties, "running", 0 );
 
        mlt_consumer_stopped( this );
+       mlt_properties_close( frame_meta_properties );
 
-       if ( pass == 2 )
+       if ( mlt_properties_get_int( properties, "pass" ) > 1 )
        {
                // Remove the dual pass log file
                if ( mlt_properties_get( properties, "_logfilename" ) )