]> git.sesse.net Git - mlt/blobdiff - src/modules/avformat/consumer_avformat.c
framework: remove global profile, rather share one mlt_profile across a service netwo...
[mlt] / src / modules / avformat / consumer_avformat.c
index a7fd977a499980ce6849fe08cf1c60d56a764c85..5a1ab27201c5faf0709a8cdee2eae8f2e7f9f9ca 100644 (file)
@@ -3,37 +3,39 @@
  * Copyright (C) 2003-2004 Ushodaya Enterprises Limited
  * Author: Charles Yates <charles.yates@pandora.be>
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
  *
- * This program is distributed in the hope that it will be useful,
+ * This library is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
-// Local header files
-#include "consumer_avformat.h"
-
 // mlt Header files
+#include <framework/mlt_consumer.h>
 #include <framework/mlt_frame.h>
 
 // System header files
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <limits.h>
 #include <pthread.h>
 #include <sys/time.h>
 #include <math.h>
 
 // avformat header files
 #include <avformat.h>
+#ifdef SWSCALE
+#include <swscale.h>
+#endif
 
 //
 // This structure should be extended and made globally available in mlt
@@ -132,16 +134,16 @@ static void consumer_close( mlt_consumer this );
 /** Initialise the dv consumer.
 */
 
-mlt_consumer consumer_avformat_init( char *arg )
+mlt_consumer consumer_avformat_init( mlt_profile profile, char *arg )
 {
        // Allocate the consumer
-       mlt_consumer this = mlt_consumer_new( );
+       mlt_consumer this = mlt_consumer_new( profile );
 
        // If memory allocated and initialises without error
        if ( this != NULL )
        {
                // Get properties from the consumer
-               mlt_properties properties = mlt_consumer_properties( this );
+               mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
 
                // Assign close callback
                this->close = consumer_close;
@@ -157,7 +159,6 @@ mlt_consumer consumer_avformat_init( char *arg )
                mlt_properties_set_int( properties, "audio_bit_rate", 128000 );
                mlt_properties_set_int( properties, "video_bit_rate", 200 * 1000 );
                mlt_properties_set_int( properties, "video_bit_rate_tolerance", 4000 * 1000 );
-               mlt_properties_set_int( properties, "frame_rate_base", 1 );
                mlt_properties_set_int( properties, "gop_size", 12 );
                mlt_properties_set_int( properties, "b_frames", 0 );
                mlt_properties_set_int( properties, "mb_decision", FF_MB_DECISION_SIMPLE );
@@ -189,10 +190,10 @@ mlt_consumer consumer_avformat_init( char *arg )
                mlt_properties_set_int( properties, "video_rc_buffer_size", 0 );
                mlt_properties_set_double( properties, "video_rc_buffer_aggressivity", 1.0 );
                mlt_properties_set_double( properties, "video_rc_initial_cplx", 0 );
-               mlt_properties_set_double( properties, "video_i_qfactor", 1.25 );
+               mlt_properties_set_double( properties, "video_i_qfactor", -0.8 );
                mlt_properties_set_double( properties, "video_b_qfactor", 1.25 );
-               mlt_properties_set_double( properties, "video_i_qoffset", -0.8 );
-               mlt_properties_set_double( properties, "video_b_qoffset", 0 );
+               mlt_properties_set_double( properties, "video_i_qoffset", 0 );
+               mlt_properties_set_double( properties, "video_b_qoffset", 1.25 );
                mlt_properties_set_int( properties, "video_intra_quant_bias", FF_DEFAULT_QUANT_BIAS );
                mlt_properties_set_int( properties, "video_inter_quant_bias", FF_DEFAULT_QUANT_BIAS );
                mlt_properties_set_int( properties, "dct_algo", 0 );
@@ -208,6 +209,8 @@ mlt_consumer consumer_avformat_init( char *arg )
                mlt_properties_set_int( properties, "coder", 0 );
                mlt_properties_set_int( properties, "context", 0 );
                mlt_properties_set_int( properties, "predictor", 0 );
+               mlt_properties_set_int( properties, "ildct", 0 );
+               mlt_properties_set_int( properties, "ilme", 0 );
 
                // Ensure termination at end of the stream
                mlt_properties_set_int( properties, "terminate_on_pause", 1 );
@@ -228,14 +231,13 @@ mlt_consumer consumer_avformat_init( char *arg )
 static int consumer_start( mlt_consumer this )
 {
        // Get the properties
-       mlt_properties properties = mlt_consumer_properties( this );
+       mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
 
        // Check that we're not already running
        if ( !mlt_properties_get_int( properties, "running" ) )
        {
                // Allocate a thread
                pthread_t *thread = calloc( 1, sizeof( pthread_t ) );
-               pthread_attr_t thread_attributes;
 
                // Get the width and height
                int width = mlt_properties_get_int( properties, "width" );
@@ -269,12 +271,8 @@ static int consumer_start( mlt_consumer this )
                // Set the running state
                mlt_properties_set_int( properties, "running", 1 );
 
-               // Inherit the scheduling priority
-               pthread_attr_init( &thread_attributes );
-               pthread_attr_setinheritsched( &thread_attributes, PTHREAD_INHERIT_SCHED );
-               
                // Create the thread
-               pthread_create( thread, &thread_attributes, consumer_thread, this );
+               pthread_create( thread, NULL, consumer_thread, this );
        }
        return 0;
 }
@@ -285,7 +283,7 @@ static int consumer_start( mlt_consumer this )
 static int consumer_stop( mlt_consumer this )
 {
        // Get the properties
-       mlt_properties properties = mlt_consumer_properties( this );
+       mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
 
        // Check that we're running
        if ( mlt_properties_get_int( properties, "running" ) )
@@ -309,7 +307,7 @@ static int consumer_stop( mlt_consumer this )
 static int consumer_is_stopped( mlt_consumer this )
 {
        // Get the properties
-       mlt_properties properties = mlt_consumer_properties( this );
+       mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
        return !mlt_properties_get_int( properties, "running" );
 }
 
@@ -319,7 +317,7 @@ static int consumer_is_stopped( mlt_consumer this )
 static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int codec_id )
 {
        // Get the properties
-       mlt_properties properties = mlt_consumer_properties( this );
+       mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
 
        // Create a new stream
        AVStream *st = av_new_stream( oc, 1 );
@@ -327,7 +325,7 @@ static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int c
        // If created, then initialise from properties
        if ( st != NULL ) 
        {
-               AVCodecContext *c = &st->codec;
+               AVCodecContext *c = st->codec;
                c->codec_id = codec_id;
                c->codec_type = CODEC_TYPE_AUDIO;
 
@@ -335,6 +333,20 @@ static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int c
                c->bit_rate = mlt_properties_get_int( properties, "audio_bit_rate" );
                c->sample_rate = mlt_properties_get_int( properties, "frequency" );
                c->channels = mlt_properties_get_int( properties, "channels" );
+
+       if (oc->oformat->flags & AVFMT_GLOBALHEADER) 
+               c->flags |= CODEC_FLAG_GLOBAL_HEADER;
+
+               // Allow the user to override the audio fourcc
+               if ( mlt_properties_get( properties, "afourcc" ) )
+               {
+                       char *tail = NULL;
+                       char *arg = mlt_properties_get( properties, "afourcc" );
+               int tag = strtol( arg, &tail, 0);
+               if( !tail || *tail )
+                       tag = arg[ 0 ] + ( arg[ 1 ] << 8 ) + ( arg[ 2 ] << 16 ) + ( arg[ 3 ] << 24 );
+                       c->codec_tag = tag;
+               }
        }
        else
        {
@@ -350,7 +362,7 @@ static int open_audio( AVFormatContext *oc, AVStream *st, int audio_outbuf_size
        int audio_input_frame_size = 0;
 
        // Get the context
-       AVCodecContext *c = &st->codec;
+       AVCodecContext *c = st->codec;
 
        // Find the encoder
        AVCodec *codec = avcodec_find_encoder( c->codec_id );
@@ -363,7 +375,7 @@ static int open_audio( AVFormatContext *oc, AVStream *st, int audio_outbuf_size
                if ( c->frame_size <= 1 ) 
                {
                        audio_input_frame_size = audio_outbuf_size / c->channels;
-                       switch(st->codec.codec_id) 
+                       switch(st->codec->codec_id) 
                        {
                                case CODEC_ID_PCM_S16LE:
                                case CODEC_ID_PCM_S16BE:
@@ -396,7 +408,7 @@ static int open_audio( AVFormatContext *oc, AVStream *st, int audio_outbuf_size
 
 static void close_audio( AVFormatContext *oc, AVStream *st )
 {
-       avcodec_close( &st->codec );
+       avcodec_close( st->codec );
 }
 
 /** Add a video output stream 
@@ -405,14 +417,16 @@ static void close_audio( AVFormatContext *oc, AVStream *st )
 static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int codec_id )
 {
        // Get the properties
-       mlt_properties properties = mlt_consumer_properties( this );
+       mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
 
        // Create a new stream
        AVStream *st = av_new_stream( oc, 0 );
 
        if ( st != NULL ) 
        {
-               AVCodecContext *c = &st->codec;
+               char *pix_fmt = mlt_properties_get( properties, "pix_fmt" );
+               double ar = mlt_properties_get_double( properties, "display_ratio" );
+               AVCodecContext *c = st->codec;
                c->codec_id = codec_id;
                c->codec_type = CODEC_TYPE_VIDEO;
 
@@ -421,10 +435,10 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c
                c->bit_rate_tolerance = mlt_properties_get_int( properties, "video_bit_rate_tolerance" );
                c->width = mlt_properties_get_int( properties, "width" );
                c->height = mlt_properties_get_int( properties, "height" );
-               c->frame_rate = mlt_properties_get_double( properties, "fps" );
-               c->frame_rate_base = mlt_properties_get_double( properties, "frame_rate_base" );
-               c->frame_rate_base = 1;
+               c->time_base.num = mlt_properties_get_int( properties, "frame_rate_den" );
+               c->time_base.den = mlt_properties_get_int( properties, "frame_rate_num" );
                c->gop_size = mlt_properties_get_int( properties, "gop_size" );
+               c->pix_fmt = pix_fmt ? avcodec_get_pix_fmt( pix_fmt ) : PIX_FMT_YUV420P;
 
                if ( mlt_properties_get_int( properties, "b_frames" ) )
                {
@@ -434,7 +448,7 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c
                }
 
                c->mb_decision = mlt_properties_get_int( properties, "mb_decision" );
-               c->sample_aspect_ratio = av_d2q( mlt_properties_get_double( properties, "aspect_ratio" ), 255 );
+               c->sample_aspect_ratio = av_d2q( ar * c->height / c->width , 255);
                c->mb_cmp = mlt_properties_get_int( properties, "mb_cmp" );
                c->ildct_cmp = mlt_properties_get_int( properties, "ildct_cmp" );
                c->me_sub_cmp = mlt_properties_get_int( properties, "sub_cmp" );
@@ -463,15 +477,25 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c
                        st->quality = FF_QP2LAMBDA * mlt_properties_get_double( properties, "qscale" );
                }
 
-               // Some formats want stream headers to be seperate (hmm)
-               if( !strcmp( oc->oformat->name, "mp4" ) || 
-                       !strcmp( oc->oformat->name, "mov" ) || 
-                       !strcmp( oc->oformat->name, "3gp" ) )
+               // Allow the user to override the video fourcc
+               if ( mlt_properties_get( properties, "vfourcc" ) )
+               {
+                       char *tail = NULL;
+                       const char *arg = mlt_properties_get( properties, "vfourcc" );
+               int tag = strtol( arg, &tail, 0);
+               if( !tail || *tail )
+                       tag = arg[ 0 ] + ( arg[ 1 ] << 8 ) + ( arg[ 2 ] << 16 ) + ( arg[ 3 ] << 24 );
+                       c->codec_tag = tag;
+               }
+
+               // Some formats want stream headers to be seperate
+               if ( oc->oformat->flags & AVFMT_GLOBALHEADER ) 
                        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
 
                c->rc_max_rate = mlt_properties_get_int( properties, "video_rc_max_rate" );
                c->rc_min_rate = mlt_properties_get_int( properties, "video_rc_min_rate" );
                c->rc_buffer_size = mlt_properties_get_int( properties, "video_rc_buffer_size" );
+               c->rc_initial_buffer_occupancy = c->rc_buffer_size*3/4;
                c->rc_buffer_aggressivity= mlt_properties_get_double( properties, "video_rc_buffer_aggressivity" );
                c->rc_initial_cplx= mlt_properties_get_double( properties, "video_rc_initial_cplx" );
                c->i_quant_factor = mlt_properties_get_double( properties, "video_i_qfactor" );
@@ -494,6 +518,14 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c
                c->context_model= mlt_properties_get_int( properties, "context" );
                c->prediction_method= mlt_properties_get_int( properties, "predictor" );
                c->me_method = mlt_properties_get_int( properties, "me_method" );
+               if ( mlt_properties_get_int( properties, "progressive" ) == 0 &&
+                    mlt_properties_get_int( properties, "deinterlace" ) == 0 )
+               {
+                       if ( mlt_properties_get_int( properties, "ildct" ) )
+                               c->flags |= CODEC_FLAG_INTERLACED_DCT;
+                       if ( mlt_properties_get_int( properties, "ilme" ) )
+                               c->flags |= CODEC_FLAG_INTERLACED_ME;
+               }
        }
        else
        {
@@ -534,18 +566,30 @@ static AVFrame *alloc_picture( int pix_fmt, int width, int height )
 static int open_video(AVFormatContext *oc, AVStream *st)
 {
        // Get the codec
-       AVCodecContext *c = &st->codec;
+       AVCodecContext *video_enc = st->codec;
 
        // find the video encoder
-       AVCodec *codec = avcodec_find_encoder(c->codec_id);
+       AVCodec *codec = avcodec_find_encoder( video_enc->codec_id );
+
+       if( codec && codec->pix_fmts )
+       {
+               const enum PixelFormat *p = codec->pix_fmts;
+               for( ; *p!=-1; p++ )
+               {
+                       if( *p == video_enc->pix_fmt )
+                               break;
+               }
+               if( *p == -1 )
+                       video_enc->pix_fmt = codec->pix_fmts[ 0 ];
+       }
 
        // Open the codec safely
-       return codec != NULL && avcodec_open(c, codec) >= 0;
+       return codec != NULL && avcodec_open( video_enc, codec ) >= 0;
 }
 
 void close_video(AVFormatContext *oc, AVStream *st)
 {
-       avcodec_close(&st->codec);
+       avcodec_close(st->codec);
 }
 
 static inline long time_difference( struct timeval *time1 )
@@ -564,7 +608,7 @@ static void *consumer_thread( void *arg )
        mlt_consumer this = arg;
 
        // Get the properties
-       mlt_properties properties = mlt_consumer_properties( this );
+       mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
 
        // Get the terminate on pause property
        int terminate_on_pause = mlt_properties_get_int( properties, "terminate_on_pause" );
@@ -577,7 +621,7 @@ static void *consumer_thread( void *arg )
        struct timeval ante;
 
        // Get the frame rate
-       int fps = mlt_properties_get_double( properties, "fps" );
+       double fps = mlt_properties_get_double( properties, "fps" );
 
        // Get width and height
        int width = mlt_properties_get_int( properties, "width" );
@@ -611,7 +655,7 @@ static void *consumer_thread( void *arg )
        sample_fifo fifo = mlt_properties_get_data( properties, "sample_fifo", NULL );
 
        // Need two av pictures for converting
-       AVFrame *output = alloc_picture( PIX_FMT_YUV420P, width, height );
+       AVFrame *output = NULL;
        AVFrame *input = alloc_picture( PIX_FMT_YUV422, width, height );
 
        // For receiving images from an mlt_frame
@@ -630,7 +674,8 @@ static void *consumer_thread( void *arg )
        AVStream *video_st = NULL;
 
        // Time stamps
-       double audio_pts, video_pts;
+       double audio_pts = 0;
+       double video_pts = 0;
 
        // Loop variable
        int i;
@@ -703,6 +748,32 @@ static void *consumer_thread( void *arg )
        }
 
        // Update the output context
+
+       // Write metadata
+       char *tmp = NULL;
+       int metavalue;
+
+       tmp = mlt_properties_get( properties, "meta.attr.title.markup");
+       if (tmp != NULL) snprintf( oc->title, sizeof(oc->title), "%s", tmp );
+
+       tmp = mlt_properties_get( properties, "meta.attr.comment.markup");
+       if (tmp != NULL) snprintf( oc->comment, sizeof(oc->comment), "%s", tmp );
+
+       tmp = mlt_properties_get( properties, "meta.attr.author.markup");
+       if (tmp != NULL) snprintf( oc->author, sizeof(oc->author), "%s", tmp );
+
+       tmp = mlt_properties_get( properties, "meta.attr.copyright.markup");
+       if (tmp != NULL) snprintf( oc->copyright, sizeof(oc->copyright), "%s", tmp );
+
+       tmp = mlt_properties_get( properties, "meta.attr.album.markup");
+       if (tmp != NULL) snprintf( oc->album, sizeof(oc->album), "%s", tmp );
+
+       metavalue = mlt_properties_get_int( properties, "meta.attr.year.markup");
+       if (metavalue != 0) oc->year = metavalue;
+
+       metavalue = mlt_properties_get_int( properties, "meta.attr.track.markup");
+       if (metavalue != 0) oc->track = metavalue;
+
        oc->oformat = fmt;
        snprintf( oc->filename, sizeof(oc->filename), "%s", filename );
 
@@ -740,6 +811,10 @@ static void *consumer_thread( void *arg )
                mlt_properties_set_int( properties, "running", 0 );
        }
 
+       // Allocate picture
+       if ( video_st )
+               output = alloc_picture( video_st->codec->pix_fmt, width, height );
+
        // Last check - need at least one stream
        if ( audio_st == NULL && video_st == NULL )
                mlt_properties_set_int( properties, "running", 0 );
@@ -760,15 +835,15 @@ static void *consumer_thread( void *arg )
                        frames ++;
 
                        // Default audio args
-                       frame_properties = mlt_frame_properties( frame );
+                       frame_properties = MLT_FRAME_PROPERTIES( frame );
 
                        // Check for the terminated condition
                        terminated = terminate_on_pause && mlt_properties_get_double( frame_properties, "_speed" ) == 0.0;
 
                        // Get audio and append to the fifo
-                       if ( audio_st )
+                       if ( !terminated && audio_st )
                        {
-                               samples = mlt_sample_calculator( fps, frequency, count );
+                               samples = mlt_sample_calculator( fps, frequency, count ++ );
                                mlt_frame_get_audio( frame, &pcm, &aud_fmt, &frequency, &channels, &samples );
 
                                // Create the fifo if we don't have one
@@ -778,13 +853,16 @@ static void *consumer_thread( void *arg )
                                        mlt_properties_set_data( properties, "sample_fifo", fifo, 0, ( mlt_destructor )sample_fifo_close, NULL );
                                }
 
+                               if ( mlt_properties_get_double( frame_properties, "_speed" ) != 1.0 )
+                                       memset( pcm, 0, samples * channels * 2 );
+
                                // Append the samples
                                sample_fifo_append( fifo, pcm, samples * channels );
                                total_time += ( samples * 1000000 ) / frequency;
                        }
 
                        // Encode the image
-                       if ( video_st )
+                       if ( !terminated && video_st )
                                mlt_deque_push_back( queue, frame );
                        else
                                mlt_frame_close( frame );
@@ -793,16 +871,15 @@ static void *consumer_thread( void *arg )
                // While we have stuff to process, process...
                while ( 1 )
                {
-                       // Compute current audio and video time
-                       if (audio_st)
-                               audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
+                       if (audio_st)
+                               audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
                        else
-                               audio_pts = 0.0;
-       
+                               audio_pts = 0.0;
+        
                        if (video_st)
-                               video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
-                       else
-                               video_pts = 0.0;
+                               video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
+                       else
+                               video_pts = 0.0;
 
                        // Write interleaved audio and video frames
                        if ( !video_st || ( video_st && audio_st && audio_pts < video_pts ) )
@@ -813,19 +890,23 @@ static void *consumer_thread( void *arg )
                                        AVPacket pkt;
                                        av_init_packet( &pkt );
 
-                                       c = &audio_st->codec;
+                                       c = audio_st->codec;
 
                                        sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size );
 
                                        pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer );
                                        // Write the compressed frame in the media file
-                                       pkt.pts= c->coded_frame->pts;
+                                       if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
+                                               pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base );
                                        pkt.flags |= PKT_FLAG_KEY;
                                        pkt.stream_index= audio_st->index;
                                        pkt.data= audio_outbuf;
 
-                                       if ( av_write_frame( oc, &pkt ) != 0) 
-                                               fprintf(stderr, "Error while writing audio frame\n");
+                                       if ( pkt.size )
+                                               if ( av_interleaved_write_frame( oc, &pkt ) != 0) 
+                                                       fprintf(stderr, "Error while writing audio frame\n");
+
+                                       audio_pts += c->frame_size;
                                }
                                else
                                {
@@ -840,9 +921,9 @@ static void *consumer_thread( void *arg )
                                        AVCodecContext *c;
 
                                        frame = mlt_deque_pop_front( queue );
-                                       frame_properties = mlt_frame_properties( frame );
+                                       frame_properties = MLT_FRAME_PROPERTIES( frame );
 
-                                       c = &video_st->codec;
+                                       c = video_st->codec;
                                        
                                        if ( mlt_properties_get_int( frame_properties, "rendered" ) )
                                        {
@@ -851,10 +932,13 @@ static void *consumer_thread( void *arg )
                                                uint8_t *p;
                                                uint8_t *q;
 
+                                               mlt_events_fire( properties, "consumer-frame-show", frame, NULL );
+
                                                mlt_frame_get_image( frame, &image, &img_fmt, &img_width, &img_height, 0 );
 
                                                q = image;
 
+                                               // Convert the mlt frame to an AVPicture
                                                for ( i = 0; i < height; i ++ )
                                                {
                                                        p = input->data[ 0 ] + i * input->linesize[ 0 ];
@@ -866,7 +950,47 @@ static void *consumer_thread( void *arg )
                                                        }
                                                }
 
-                                               img_convert( ( AVPicture * )output, PIX_FMT_YUV420P, ( AVPicture * )input, PIX_FMT_YUV422, width, height );
+                                               // Do the colour space conversion
+#ifdef SWSCALE
+                                               struct SwsContext *context = sws_getContext( width, height, PIX_FMT_YUV422,
+                                                       width, height, video_st->codec->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL);
+                                               sws_scale( context, input->data, input->linesize, 0, height,
+                                                       output->data, output->linesize);
+                                               sws_freeContext( context );
+#else
+                                               img_convert( ( AVPicture * )output, video_st->codec->pix_fmt, ( AVPicture * )input, PIX_FMT_YUV422, width, height );
+#endif
+
+                                               // Apply the alpha if applicable
+                                               if ( video_st->codec->pix_fmt == PIX_FMT_RGBA32 )
+                                               {
+                                                       uint8_t *alpha = mlt_frame_get_alpha_mask( frame );
+                                                       register int n;
+
+                                                       for ( i = 0; i < height; i ++ )
+                                                       {
+                                                               n = ( width + 7 ) / 8;
+                                                               p = output->data[ 0 ] + i * output->linesize[ 0 ];
+
+                                                               #ifndef __DARWIN__
+                                                               p += 3;
+                                                               #endif
+
+                                                               switch( width % 8 )
+                                                               {
+                                                                       case 0: do { *p = *alpha++; p += 4;
+                                                                       case 7:          *p = *alpha++; p += 4;
+                                                                       case 6:          *p = *alpha++; p += 4;
+                                                                       case 5:          *p = *alpha++; p += 4;
+                                                                       case 4:          *p = *alpha++; p += 4;
+                                                                       case 3:          *p = *alpha++; p += 4;
+                                                                       case 2:          *p = *alpha++; p += 4;
+                                                                       case 1:          *p = *alpha++; p += 4;
+                                                                                       }
+                                                                                       while( --n );
+                                                               }
+                                                       }
+                                               }
                                        }
  
                                        if (oc->oformat->flags & AVFMT_RAWPICTURE) 
@@ -881,31 +1005,42 @@ static void *consumer_thread( void *arg )
                                                pkt.size= sizeof(AVPicture);
 
                                                ret = av_write_frame(oc, &pkt);
+                                               video_pts += c->frame_size;
                                        } 
                                        else 
                                        {
                                                // Set the quality
                                                output->quality = video_st->quality;
 
+                                               // Set frame interlace hints
+                                               output->interlaced_frame = !mlt_properties_get_int( frame_properties, "progressive" );
+                                               output->top_field_first = mlt_properties_get_int( frame_properties, "top_field_first" );
+
                                                // Encode the image
                                                out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, output );
 
                                                // If zero size, it means the image was buffered
-                                               if (out_size != 0) 
+                                               if (out_size > 0) 
                                                {
                                                        AVPacket pkt;
                                                        av_init_packet( &pkt );
 
-                                                       pkt.pts= c->coded_frame->pts;
-                                                       if(c->coded_frame->key_frame)
+                                                       if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
+                                                               pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, video_st->time_base );
+                                                       if( c->coded_frame && c->coded_frame->key_frame )
                                                                pkt.flags |= PKT_FLAG_KEY;
                                                        pkt.stream_index= video_st->index;
                                                        pkt.data= video_outbuf;
                                                        pkt.size= out_size;
 
                                        // write the compressed frame in the media file
-                                                       ret = av_write_frame(oc, &pkt);
+                                                       ret = av_interleaved_write_frame(oc, &pkt);
+                                                       video_pts += c->frame_size;
                                                } 
+                                               else
+                                               {
+                                                       fprintf( stderr, "Error with video encode\n" );
+                                               }
                                        }
                                        frame_count++;
                                        mlt_frame_close( frame );
@@ -917,7 +1052,7 @@ static void *consumer_thread( void *arg )
                        }
                }
 
-               if ( real_time_output && frames % 25 == 0 )
+               if ( real_time_output && frames % 12 == 0 )
                {
                        long passed = time_difference( &ante );
                        if ( fifo != NULL )
@@ -934,6 +1069,70 @@ static void *consumer_thread( void *arg )
                }
        }
 
+#ifdef FLUSH
+       if ( ! real_time_output )
+       {
+               // Flush audio fifo
+               if ( audio_st && audio_st->codec->frame_size > 1 ) for (;;)
+               {
+                       AVCodecContext *c = audio_st->codec;
+                       AVPacket pkt;
+                       av_init_packet( &pkt );
+                       pkt.size = 0;
+
+                       if ( /*( c->capabilities & CODEC_CAP_SMALL_LAST_FRAME ) &&*/
+                               ( channels * audio_input_frame_size < sample_fifo_used( fifo ) ) )
+                       {
+                               sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size );
+                               pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer );
+                       }
+                       if ( pkt.size <= 0 )
+                               pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, NULL );
+                       if ( pkt.size <= 0 )
+                               break;
+
+                       // Write the compressed frame in the media file
+                       if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
+                               pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base );
+                       pkt.flags |= PKT_FLAG_KEY;
+                       pkt.stream_index = audio_st->index;
+                       pkt.data = audio_outbuf;
+                       if ( av_interleaved_write_frame( oc, &pkt ) != 0 )
+                       {
+                               fprintf(stderr, "Error while writing flushed audio frame\n");
+                               break;
+                       }
+               }
+
+               // Flush video
+               if ( video_st && !( oc->oformat->flags & AVFMT_RAWPICTURE ) ) for (;;)
+               {
+                       AVCodecContext *c = video_st->codec;
+                       AVPacket pkt;
+                       av_init_packet( &pkt );
+
+                       // Encode the image
+                       pkt.size = avcodec_encode_video( c, video_outbuf, video_outbuf_size, NULL );
+                       if ( pkt.size <= 0 )
+                               break;
+
+                       if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
+                               pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, video_st->time_base );
+                       if( c->coded_frame && c->coded_frame->key_frame )
+                               pkt.flags |= PKT_FLAG_KEY;
+                       pkt.stream_index = video_st->index;
+                       pkt.data = video_outbuf;
+
+                       // write the compressed frame in the media file
+                       if ( av_interleaved_write_frame( oc, &pkt ) != 0 )
+                       {
+                               fprintf(stderr, "Error while writing flushed video frame\n");
+                               break;
+                       }
+               }
+       }
+#endif
+
        // close each codec 
        if (video_st)
                close_video(oc, video_st);
@@ -949,10 +1148,15 @@ static void *consumer_thread( void *arg )
 
        // Close the output file
        if (!(fmt->flags & AVFMT_NOFILE))
+#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(0<<8)+0)
+               url_fclose(oc->pb);
+#else
                url_fclose(&oc->pb);
+#endif
 
        // Clean up input and output frames
-       av_free( output->data[0] );
+       if ( output )
+               av_free( output->data[0] );
        av_free( output );
        av_free( input->data[0] );
        av_free( input );