]> git.sesse.net Git - mlt/blobdiff - src/modules/avformat/consumer_avformat.c
framework: remove global profile, rather share one mlt_profile across a service netwo...
[mlt] / src / modules / avformat / consumer_avformat.c
index 4918a77ccd8682e5d665caeb6f31c967b60d5609..5a1ab27201c5faf0709a8cdee2eae8f2e7f9f9ca 100644 (file)
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
-// Local header files
-#include "consumer_avformat.h"
-
 // mlt Header files
+#include <framework/mlt_consumer.h>
 #include <framework/mlt_frame.h>
 
 // System header files
@@ -136,10 +134,10 @@ static void consumer_close( mlt_consumer this );
 /** Initialise the dv consumer.
 */
 
-mlt_consumer consumer_avformat_init( char *arg )
+mlt_consumer consumer_avformat_init( mlt_profile profile, char *arg )
 {
        // Allocate the consumer
-       mlt_consumer this = mlt_consumer_new( );
+       mlt_consumer this = mlt_consumer_new( profile );
 
        // If memory allocated and initialises without error
        if ( this != NULL )
@@ -164,7 +162,7 @@ mlt_consumer consumer_avformat_init( char *arg )
                mlt_properties_set_int( properties, "gop_size", 12 );
                mlt_properties_set_int( properties, "b_frames", 0 );
                mlt_properties_set_int( properties, "mb_decision", FF_MB_DECISION_SIMPLE );
-               mlt_properties_set_double( properties, "qscale", 1 );
+               mlt_properties_set_double( properties, "qscale", 0 );
                mlt_properties_set_int( properties, "me_method", ME_EPZS );
                mlt_properties_set_int( properties, "mb_cmp", FF_CMP_SAD );
                mlt_properties_set_int( properties, "ildct_cmp", FF_CMP_VSAD );
@@ -192,10 +190,10 @@ mlt_consumer consumer_avformat_init( char *arg )
                mlt_properties_set_int( properties, "video_rc_buffer_size", 0 );
                mlt_properties_set_double( properties, "video_rc_buffer_aggressivity", 1.0 );
                mlt_properties_set_double( properties, "video_rc_initial_cplx", 0 );
-               mlt_properties_set_double( properties, "video_i_qfactor", 1.25 );
+               mlt_properties_set_double( properties, "video_i_qfactor", -0.8 );
                mlt_properties_set_double( properties, "video_b_qfactor", 1.25 );
-               mlt_properties_set_double( properties, "video_i_qoffset", -0.8 );
-               mlt_properties_set_double( properties, "video_b_qoffset", 0 );
+               mlt_properties_set_double( properties, "video_i_qoffset", 0 );
+               mlt_properties_set_double( properties, "video_b_qoffset", 1.25 );
                mlt_properties_set_int( properties, "video_intra_quant_bias", FF_DEFAULT_QUANT_BIAS );
                mlt_properties_set_int( properties, "video_inter_quant_bias", FF_DEFAULT_QUANT_BIAS );
                mlt_properties_set_int( properties, "dct_algo", 0 );
@@ -211,6 +209,8 @@ mlt_consumer consumer_avformat_init( char *arg )
                mlt_properties_set_int( properties, "coder", 0 );
                mlt_properties_set_int( properties, "context", 0 );
                mlt_properties_set_int( properties, "predictor", 0 );
+               mlt_properties_set_int( properties, "ildct", 0 );
+               mlt_properties_set_int( properties, "ilme", 0 );
 
                // Ensure termination at end of the stream
                mlt_properties_set_int( properties, "terminate_on_pause", 1 );
@@ -495,6 +495,7 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c
                c->rc_max_rate = mlt_properties_get_int( properties, "video_rc_max_rate" );
                c->rc_min_rate = mlt_properties_get_int( properties, "video_rc_min_rate" );
                c->rc_buffer_size = mlt_properties_get_int( properties, "video_rc_buffer_size" );
+               c->rc_initial_buffer_occupancy = c->rc_buffer_size*3/4;
                c->rc_buffer_aggressivity= mlt_properties_get_double( properties, "video_rc_buffer_aggressivity" );
                c->rc_initial_cplx= mlt_properties_get_double( properties, "video_rc_initial_cplx" );
                c->i_quant_factor = mlt_properties_get_double( properties, "video_i_qfactor" );
@@ -517,6 +518,14 @@ static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int c
                c->context_model= mlt_properties_get_int( properties, "context" );
                c->prediction_method= mlt_properties_get_int( properties, "predictor" );
                c->me_method = mlt_properties_get_int( properties, "me_method" );
+               if ( mlt_properties_get_int( properties, "progressive" ) == 0 &&
+                    mlt_properties_get_int( properties, "deinterlace" ) == 0 )
+               {
+                       if ( mlt_properties_get_int( properties, "ildct" ) )
+                               c->flags |= CODEC_FLAG_INTERLACED_DCT;
+                       if ( mlt_properties_get_int( properties, "ilme" ) )
+                               c->flags |= CODEC_FLAG_INTERLACED_ME;
+               }
        }
        else
        {
@@ -612,7 +621,7 @@ static void *consumer_thread( void *arg )
        struct timeval ante;
 
        // Get the frame rate
-       int fps = mlt_properties_get_double( properties, "fps" );
+       double fps = mlt_properties_get_double( properties, "fps" );
 
        // Get width and height
        int width = mlt_properties_get_int( properties, "width" );
@@ -1003,6 +1012,10 @@ static void *consumer_thread( void *arg )
                                                // Set the quality
                                                output->quality = video_st->quality;
 
+                                               // Set frame interlace hints
+                                               output->interlaced_frame = !mlt_properties_get_int( frame_properties, "progressive" );
+                                               output->top_field_first = mlt_properties_get_int( frame_properties, "top_field_first" );
+
                                                // Encode the image
                                                out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, output );
 
@@ -1056,6 +1069,70 @@ static void *consumer_thread( void *arg )
                }
        }
 
+#ifdef FLUSH
+       if ( ! real_time_output )
+       {
+               // Flush audio fifo
+               if ( audio_st && audio_st->codec->frame_size > 1 ) for (;;)
+               {
+                       AVCodecContext *c = audio_st->codec;
+                       AVPacket pkt;
+                       av_init_packet( &pkt );
+                       pkt.size = 0;
+
+                       if ( /*( c->capabilities & CODEC_CAP_SMALL_LAST_FRAME ) &&*/
+                               ( channels * audio_input_frame_size < sample_fifo_used( fifo ) ) )
+                       {
+                               sample_fifo_fetch( fifo, buffer, channels * audio_input_frame_size );
+                               pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, buffer );
+                       }
+                       if ( pkt.size <= 0 )
+                               pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, NULL );
+                       if ( pkt.size <= 0 )
+                               break;
+
+                       // Write the compressed frame in the media file
+                       if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
+                               pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st->time_base );
+                       pkt.flags |= PKT_FLAG_KEY;
+                       pkt.stream_index = audio_st->index;
+                       pkt.data = audio_outbuf;
+                       if ( av_interleaved_write_frame( oc, &pkt ) != 0 )
+                       {
+                               fprintf(stderr, "Error while writing flushed audio frame\n");
+                               break;
+                       }
+               }
+
+               // Flush video
+               if ( video_st && !( oc->oformat->flags & AVFMT_RAWPICTURE ) ) for (;;)
+               {
+                       AVCodecContext *c = video_st->codec;
+                       AVPacket pkt;
+                       av_init_packet( &pkt );
+
+                       // Encode the image
+                       pkt.size = avcodec_encode_video( c, video_outbuf, video_outbuf_size, NULL );
+                       if ( pkt.size <= 0 )
+                               break;
+
+                       if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
+                               pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, video_st->time_base );
+                       if( c->coded_frame && c->coded_frame->key_frame )
+                               pkt.flags |= PKT_FLAG_KEY;
+                       pkt.stream_index = video_st->index;
+                       pkt.data = video_outbuf;
+
+                       // write the compressed frame in the media file
+                       if ( av_interleaved_write_frame( oc, &pkt ) != 0 )
+                       {
+                               fprintf(stderr, "Error while writing flushed video frame\n");
+                               break;
+                       }
+               }
+       }
+#endif
+
        // close each codec 
        if (video_st)
                close_video(oc, video_st);
@@ -1071,7 +1148,11 @@ static void *consumer_thread( void *arg )
 
        // Close the output file
        if (!(fmt->flags & AVFMT_NOFILE))
+#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(0<<8)+0)
+               url_fclose(oc->pb);
+#else
                url_fclose(&oc->pb);
+#endif
 
        // Clean up input and output frames
        if ( output )