#define AV_CODEC_ID_NONE CODEC_ID_NONE
#define AV_CODEC_ID_AC3 CODEC_ID_AC3
#define AV_CODEC_ID_VORBIS CODEC_ID_VORBIS
+#define AV_CODEC_ID_RAWVIDEO CODEC_ID_RAWVIDEO
+#define AV_CODEC_ID_MJPEG CODEC_ID_MJPEG
#endif
#define MAX_AUDIO_STREAMS (8)
mlt_properties_set_int( properties, "display_aspect_den", rational.den );
// Now compute the sample aspect ratio
- rational = av_d2q( ar * height / width, 255 );
+ rational = av_d2q( ar * height / FFMAX(width, 1), 255 );
// Update the profile and properties as well since this is an alias
// for mlt properties that correspond to profile settings
static int mlt_write(void *h, uint8_t *buf, int size)
{
mlt_properties properties = (mlt_properties) h;
- mlt_events_fire( properties, "avformat-write", buf, size, NULL );
+ mlt_events_fire( properties, "avformat-write", buf, &size, NULL );
return 0;
}
static void write_transmitter( mlt_listener listener, mlt_properties owner, mlt_service service, void **args )
{
- listener( owner, service, (uint8_t*) args[0], (int) args[1] );
+ int *p_size = (int*) args[1];
+ listener( owner, service, (uint8_t*) args[0], *p_size );
}
/** The main thread - the argument is simply the consumer.
int count = 0;
// Allocate the context
- AVFormatContext *oc = avformat_alloc_context( );
+ AVFormatContext *oc = NULL;
// Streams
AVStream *video_st = NULL;
char key[27];
mlt_properties frame_meta_properties = mlt_properties_new();
int error_count = 0;
+ int64_t synth_audio_pts = 0;
// Initialize audio_st
int i = MAX_AUDIO_STREAMS;
if ( filename == NULL || !strcmp( filename, "" ) )
filename = "pipe:";
+#if LIBAVUTIL_VERSION_INT >= ((53<<16)+(2<<8)+0)
+ avformat_alloc_output_context2( &oc, fmt, format, filename );
+#else
+ oc = avformat_alloc_context( );
+ oc->oformat = fmt;
+ snprintf( oc->filename, sizeof(oc->filename), "%s", filename );
+
+ if ( oc->oformat && oc->oformat->priv_class && !oc->priv_data && oc->oformat->priv_data_size ) {
+ oc->priv_data = av_mallocz( oc->oformat->priv_data_size );
+ if ( oc->priv_data ) {
+ *(const AVClass**)oc->priv_data = oc->oformat->priv_class;
+ av_opt_set_defaults( oc->priv_data );
+ }
+ }
+#endif
+
// Get the codec ids selected
audio_codec_id = fmt->audio_codec;
video_codec_id = fmt->video_codec;
}
}
- oc->oformat = fmt;
- snprintf( oc->filename, sizeof(oc->filename), "%s", filename );
-
// Get a frame now, so we can set some AVOptions from properties.
frame = mlt_consumer_rt_frame( consumer );
if ( video_st )
converted_avframe = alloc_picture( video_st->codec->pix_fmt, width, height );
-#if LIBAVCODEC_VERSION_MAJOR >= 55
+#if LIBAVCODEC_VERSION_MAJOR >= 54
// Allocate audio AVFrame
if ( audio_st[0] )
{
else if ( codec->sample_fmt == AV_SAMPLE_FMT_U8P )
p = interleaved_to_planar( samples, channels, p, sizeof( uint8_t ) );
#endif
-#if LIBAVCODEC_VERSION_MAJOR >= 55
+#if LIBAVCODEC_VERSION_MAJOR >= 54
audio_avframe->nb_samples = FFMAX( samples, audio_input_nb_samples );
+ if ( audio_codec_id == AV_CODEC_ID_VORBIS )
+ audio_avframe->pts = synth_audio_pts;
+ synth_audio_pts += audio_avframe->nb_samples;
avcodec_fill_audio_frame( audio_avframe, codec->channels, codec->sample_fmt,
(const uint8_t*) p, AUDIO_ENCODE_BUFFER_SIZE, 0 );
int got_packet = 0;
dest_offset += current_channels;
}
}
-#if LIBAVCODEC_VERSION_MAJOR >= 55
+#if LIBAVCODEC_VERSION_MAJOR >= 54
audio_avframe->nb_samples = FFMAX( samples, audio_input_nb_samples );
+ if ( audio_codec_id == AV_CODEC_ID_VORBIS )
+ audio_avframe->pts = synth_audio_pts;
+ synth_audio_pts += audio_avframe->nb_samples;
avcodec_fill_audio_frame( audio_avframe, codec->channels, codec->sample_fmt,
(const uint8_t*) audio_buf_2, AUDIO_ENCODE_BUFFER_SIZE, 0 );
int got_packet = 0;
goto on_fatal_error;
}
error_count = 0;
+ mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "audio stream %d pkt pts %"PRId64" frame_size %d stream pts %"PRId64"\n",
+ stream->index, pkt.pts, codec->frame_size, stream->pts.val );
}
else if ( pkt.size < 0 )
{
goto on_fatal_error;
}
- mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), " frame_size %d\n", codec->frame_size );
if ( i == 0 )
{
audio_pts = (double)stream->pts.val * av_q2d( stream->time_base );
{
AVPacket pkt;
av_init_packet( &pkt );
- pkt.data = video_outbuf;
- pkt.size = video_outbuf_size;
+ if ( c->codec->id == AV_CODEC_ID_RAWVIDEO ) {
+ pkt.data = NULL;
+ pkt.size = 0;
+ } else {
+ pkt.data = video_outbuf;
+ pkt.size = video_outbuf_size;
+ }
// Set the quality
converted_avframe->quality = c->global_quality;
+ converted_avframe->pts = frame_count;
// Set frame interlace hints
converted_avframe->interlaced_frame = !mlt_properties_get_int( frame_properties, "progressive" );
converted_avframe->top_field_first = mlt_properties_get_int( frame_properties, "top_field_first" );
- converted_avframe->pts = frame_count;
+#if LIBAVCODEC_VERSION_INT >= ((53<<16)+(61<<8)+100)
+ if ( mlt_properties_get_int( frame_properties, "progressive" ) )
+ c->field_order = AV_FIELD_PROGRESSIVE;
+ else if ( c->codec_id == AV_CODEC_ID_MJPEG )
+ c->field_order = (mlt_properties_get_int( frame_properties, "top_field_first" )) ? AV_FIELD_TT : AV_FIELD_BB;
+ else
+ c->field_order = (mlt_properties_get_int( frame_properties, "top_field_first" )) ? AV_FIELD_TB : AV_FIELD_BT;
+#endif
// Encode the image
#if LIBAVCODEC_VERSION_MAJOR >= 55
pkt.data = audio_outbuf;
pkt.size = 0;
- if ( fifo &&
- ( channels * audio_input_nb_samples < sample_fifo_used( fifo ) / sample_bytes ) )
+ if ( fifo && sample_fifo_used( fifo ) > 0 )
{
- sample_fifo_fetch( fifo, audio_buf_1, channels * audio_input_nb_samples * sample_bytes );
+ // Drain the MLT FIFO
+ int samples = FFMIN( FFMIN( channels * audio_input_nb_samples, sample_fifo_used( fifo ) / sample_bytes ), AUDIO_ENCODE_BUFFER_SIZE );
+ sample_fifo_fetch( fifo, audio_buf_1, samples * sample_bytes );
void* p = audio_buf_1;
#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(17<<8)+0)
if ( c->sample_fmt == AV_SAMPLE_FMT_FLTP )
else if ( c->sample_fmt == AV_SAMPLE_FMT_U8P )
p = interleaved_to_planar( audio_input_nb_samples, channels, p, sizeof( uint8_t ) );
#endif
-#if LIBAVCODEC_VERSION_MAJOR >= 55
+#if LIBAVCODEC_VERSION_MAJOR >= 54
pkt.size = audio_outbuf_size;
- audio_avframe->nb_samples = audio_input_nb_samples;
+ audio_avframe->nb_samples = FFMAX( samples / channels, audio_input_nb_samples );
+ if ( audio_codec_id == AV_CODEC_ID_VORBIS )
+ audio_avframe->pts = synth_audio_pts;
+ synth_audio_pts += audio_avframe->nb_samples;
avcodec_fill_audio_frame( audio_avframe, c->channels, c->sample_fmt,
(const uint8_t*) p, AUDIO_ENCODE_BUFFER_SIZE, 0 );
int got_packet = 0;
else if ( !got_packet )
pkt.size = 0;
#else
- c->frame_size = audio_input_nb_samples;
+ c->frame_size = FFMAX( samples / channels, audio_input_nb_samples );
pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, p );
#endif
#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(17<<8)+0)
if ( p != audio_buf_1 )
mlt_pool_release( p );
#endif
+ mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "flushing audio size %d\n", pkt.size );
}
- if ( pkt.size <= 0 ) {
-#if LIBAVCODEC_VERSION_MAJOR >= 55
- pkt.size = audio_outbuf_size;
- int got_packet = 0;
- int ret = avcodec_encode_audio2( c, &pkt, NULL, &got_packet );
- if ( ret < 0 )
- pkt.size = ret;
- else if ( !got_packet )
- pkt.size = 0;
+ else
+ {
+ // Drain the codec
+ if ( pkt.size <= 0 ) {
+#if LIBAVCODEC_VERSION_MAJOR >= 54
+ pkt.size = audio_outbuf_size;
+ int got_packet = 0;
+ int ret = avcodec_encode_audio2( c, &pkt, NULL, &got_packet );
+ if ( ret < 0 )
+ pkt.size = ret;
+ else if ( !got_packet )
+ pkt.size = 0;
#else
- pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, NULL );
- pkt.pts = c->coded_frame? c->coded_frame->pts : AV_NOPTS_VALUE;
- pkt.flags |= PKT_FLAG_KEY;
+ pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, NULL );
+ pkt.pts = c->coded_frame? c->coded_frame->pts : AV_NOPTS_VALUE;
+ pkt.flags |= PKT_FLAG_KEY;
#endif
+ }
+ mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "flushing audio size %d\n", pkt.size );
+ if ( pkt.size <= 0 )
+ break;
}
- mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "flushing audio size %d\n", pkt.size );
- if ( pkt.size <= 0 )
- break;
// Write the compressed frame in the media file
if ( pkt.pts != AV_NOPTS_VALUE )
pkt.stream_index = audio_st[0]->index;
if ( av_interleaved_write_frame( oc, &pkt ) != 0 )
{
- mlt_log_fatal( MLT_CONSUMER_SERVICE( consumer ), "error writing flushed audio frame\n" );
- mlt_events_fire( properties, "consumer-fatal-error", NULL );
- goto on_fatal_error;
+ mlt_log_warning( MLT_CONSUMER_SERVICE( consumer ), "error writing flushed audio frame\n" );
+ break;
}
}
AVCodecContext *c = video_st->codec;
AVPacket pkt;
av_init_packet( &pkt );
- pkt.data = video_outbuf;
- pkt.size = video_outbuf_size;
+ if ( c->codec->id == AV_CODEC_ID_RAWVIDEO ) {
+ pkt.data = NULL;
+ pkt.size = 0;
+ } else {
+ pkt.data = video_outbuf;
+ pkt.size = video_outbuf_size;
+ }
// Encode the image
#if LIBAVCODEC_VERSION_MAJOR >= 55
free( full );
free( cwd );
remove( "x264_2pass.log.temp" );
+
+ // Recent versions of libavcodec/x264 support passlogfile and need cleanup if specified.
+ if ( !mlt_properties_get( properties, "_logfilename" ) &&
+ mlt_properties_get( properties, "passlogfile" ) )
+ {
+ file = mlt_properties_get( properties, "passlogfile" );
+ remove( file );
+ full = malloc( strlen( file ) + strlen( ".mbtree" ) + 1 );
+ sprintf( full, "%s.mbtree", file );
+ remove( full );
+ free( full );
+ }
}
while ( ( frame = mlt_deque_pop_back( queue ) ) )