// avformat header files
#include <libavformat/avformat.h>
+#include <libavformat/avio.h>
#ifdef SWSCALE
#include <libswscale/swscale.h>
#endif
-#include <libavcodec/opt.h>
#if LIBAVUTIL_VERSION_INT >= ((50<<16)+(8<<8)+0)
#include <libavutil/pixdesc.h>
#endif
#define PIX_FMT_YUYV422 PIX_FMT_YUV422
#endif
+#if LIBAVCODEC_VERSION_MAJOR >= 53
+#include <libavutil/opt.h>
+#define CODEC_TYPE_VIDEO AVMEDIA_TYPE_VIDEO
+#define CODEC_TYPE_AUDIO AVMEDIA_TYPE_AUDIO
+#define PKT_FLAG_KEY AV_PKT_FLAG_KEY
+#else
+#include <libavcodec/opt.h>
+#endif
+
#define MAX_AUDIO_STREAMS (8)
#define AUDIO_ENCODE_BUFFER_SIZE (48000 * 2 * MAX_AUDIO_STREAMS)
+#define AUDIO_BUFFER_SIZE (1024 * 42)
+#define VIDEO_BUFFER_SIZE (2048 * 1024)
void avformat_lock( );
void avformat_unlock( );
sample_fifo sample_fifo_init( int frequency, int channels )
{
- sample_fifo this = calloc( 1, sizeof( sample_fifo_s ) );
- this->frequency = frequency;
- this->channels = channels;
- return this;
+ sample_fifo fifo = calloc( 1, sizeof( sample_fifo_s ) );
+ fifo->frequency = frequency;
+ fifo->channels = channels;
+ return fifo;
}
// sample_fifo_clear and check are temporarily aborted (not working as intended)
-void sample_fifo_clear( sample_fifo this, double time )
+void sample_fifo_clear( sample_fifo fifo, double time )
{
- int words = ( float )( time - this->time ) * this->frequency * this->channels;
- if ( ( int )( ( float )time * 100 ) < ( int )( ( float )this->time * 100 ) && this->used > words && words > 0 )
+ int words = ( float )( time - fifo->time ) * fifo->frequency * fifo->channels;
+ if ( ( int )( ( float )time * 100 ) < ( int )( ( float )fifo->time * 100 ) && fifo->used > words && words > 0 )
{
- memmove( this->buffer, &this->buffer[ words ], ( this->used - words ) * sizeof( int16_t ) );
- this->used -= words;
- this->time = time;
+ memmove( fifo->buffer, &fifo->buffer[ words ], ( fifo->used - words ) * sizeof( int16_t ) );
+ fifo->used -= words;
+ fifo->time = time;
}
- else if ( ( int )( ( float )time * 100 ) != ( int )( ( float )this->time * 100 ) )
+ else if ( ( int )( ( float )time * 100 ) != ( int )( ( float )fifo->time * 100 ) )
{
- this->used = 0;
- this->time = time;
+ fifo->used = 0;
+ fifo->time = time;
}
}
-void sample_fifo_check( sample_fifo this, double time )
+void sample_fifo_check( sample_fifo fifo, double time )
{
- if ( this->used == 0 )
+ if ( fifo->used == 0 )
{
- if ( ( int )( ( float )time * 100 ) < ( int )( ( float )this->time * 100 ) )
- this->time = time;
+ if ( ( int )( ( float )time * 100 ) < ( int )( ( float )fifo->time * 100 ) )
+ fifo->time = time;
}
}
-void sample_fifo_append( sample_fifo this, int16_t *samples, int count )
+void sample_fifo_append( sample_fifo fifo, int16_t *samples, int count )
{
- if ( ( this->size - this->used ) < count )
+ if ( ( fifo->size - fifo->used ) < count )
{
- this->size += count * 5;
- this->buffer = realloc( this->buffer, this->size * sizeof( int16_t ) );
+ fifo->size += count * 5;
+ fifo->buffer = realloc( fifo->buffer, fifo->size * sizeof( int16_t ) );
}
- memcpy( &this->buffer[ this->used ], samples, count * sizeof( int16_t ) );
- this->used += count;
+ memcpy( &fifo->buffer[ fifo->used ], samples, count * sizeof( int16_t ) );
+ fifo->used += count;
}
-int sample_fifo_used( sample_fifo this )
+int sample_fifo_used( sample_fifo fifo )
{
- return this->used;
+ return fifo->used;
}
-int sample_fifo_fetch( sample_fifo this, int16_t *samples, int count )
+int sample_fifo_fetch( sample_fifo fifo, int16_t *samples, int count )
{
- if ( count > this->used )
- count = this->used;
+ if ( count > fifo->used )
+ count = fifo->used;
- memcpy( samples, this->buffer, count * sizeof( int16_t ) );
- this->used -= count;
- memmove( this->buffer, &this->buffer[ count ], this->used * sizeof( int16_t ) );
+ memcpy( samples, fifo->buffer, count * sizeof( int16_t ) );
+ fifo->used -= count;
+ memmove( fifo->buffer, &fifo->buffer[ count ], fifo->used * sizeof( int16_t ) );
- this->time += ( double )count / this->channels / this->frequency;
+ fifo->time += ( double )count / fifo->channels / fifo->frequency;
return count;
}
-void sample_fifo_close( sample_fifo this )
+void sample_fifo_close( sample_fifo fifo )
{
- free( this->buffer );
- free( this );
+ free( fifo->buffer );
+ free( fifo );
}
// Forward references.
-static int consumer_start( mlt_consumer this );
-static int consumer_stop( mlt_consumer this );
-static int consumer_is_stopped( mlt_consumer this );
+static int consumer_start( mlt_consumer consumer );
+static int consumer_stop( mlt_consumer consumer );
+static int consumer_is_stopped( mlt_consumer consumer );
static void *consumer_thread( void *arg );
-static void consumer_close( mlt_consumer this );
+static void consumer_close( mlt_consumer consumer );
/** Initialise the consumer.
*/
mlt_consumer consumer_avformat_init( mlt_profile profile, char *arg )
{
// Allocate the consumer
- mlt_consumer this = mlt_consumer_new( profile );
+ mlt_consumer consumer = mlt_consumer_new( profile );
// If memory allocated and initialises without error
- if ( this != NULL )
+ if ( consumer != NULL )
{
// Get properties from the consumer
- mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
+ mlt_properties properties = MLT_CONSUMER_PROPERTIES( consumer );
// Assign close callback
- this->close = consumer_close;
+ consumer->close = consumer_close;
// Interpret the argument
if ( arg != NULL )
mlt_properties_set_int( properties, "prefill", 1 );
// Set up start/stop/terminated callbacks
- this->start = consumer_start;
- this->stop = consumer_stop;
- this->is_stopped = consumer_is_stopped;
+ consumer->start = consumer_start;
+ consumer->stop = consumer_stop;
+ consumer->is_stopped = consumer_is_stopped;
mlt_events_register( properties, "consumer-fatal-error", NULL );
}
- // Return this
- return this;
+ // Return consumer
+ return consumer;
}
/** Start the consumer.
*/
-static int consumer_start( mlt_consumer this )
+static int consumer_start( mlt_consumer consumer )
{
// Get the properties
- mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
+ mlt_properties properties = MLT_CONSUMER_PROPERTIES( consumer );
int error = 0;
// Report information about available muxers and codecs as YAML Tiny
snprintf( key, sizeof(key), "%d", mlt_properties_count( formats ) );
mlt_properties_set( formats, key, format->name );
}
- fprintf( stderr, "%s", mlt_properties_serialise_yaml( doc ) );
+ fprintf( stdout, "%s", mlt_properties_serialise_yaml( doc ) );
mlt_properties_close( doc );
error = 1;
}
snprintf( key, sizeof(key), "%d", mlt_properties_count( codecs ) );
mlt_properties_set( codecs, key, codec->name );
}
- fprintf( stderr, "%s", mlt_properties_serialise_yaml( doc ) );
+ fprintf( stdout, "%s", mlt_properties_serialise_yaml( doc ) );
mlt_properties_close( doc );
error = 1;
}
snprintf( key, sizeof(key), "%d", mlt_properties_count( codecs ) );
mlt_properties_set( codecs, key, codec->name );
}
- fprintf( stderr, "%s", mlt_properties_serialise_yaml( doc ) );
+ fprintf( stdout, "%s", mlt_properties_serialise_yaml( doc ) );
mlt_properties_close( doc );
error = 1;
}
}
else
{
- mlt_log_warning( MLT_CONSUMER_SERVICE( this ), "Invalid size property %s - ignoring.\n", size );
+ mlt_log_warning( MLT_CONSUMER_SERVICE( consumer ), "Invalid size property %s - ignoring.\n", size );
}
}
// We need to set these on the profile as well because the s property is
// an alias to mlt properties that correspond to profile settings.
- mlt_profile profile = mlt_service_profile( MLT_CONSUMER_SERVICE( this ) );
+ mlt_profile profile = mlt_service_profile( MLT_CONSUMER_SERVICE( consumer ) );
if ( profile )
{
profile->width = width;
mlt_properties_set_int( properties, "running", 1 );
// Create the thread
- pthread_create( thread, NULL, consumer_thread, this );
+ pthread_create( thread, NULL, consumer_thread, consumer );
}
return error;
}
/** Stop the consumer.
*/
-static int consumer_stop( mlt_consumer this )
+static int consumer_stop( mlt_consumer consumer )
{
// Get the properties
- mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
+ mlt_properties properties = MLT_CONSUMER_PROPERTIES( consumer );
// Check that we're running
if ( mlt_properties_get_int( properties, "running" ) )
/** Determine if the consumer is stopped.
*/
-static int consumer_is_stopped( mlt_consumer this )
+static int consumer_is_stopped( mlt_consumer consumer )
{
// Get the properties
- mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
+ mlt_properties properties = MLT_CONSUMER_PROPERTIES( consumer );
return !mlt_properties_get_int( properties, "running" );
}
/** Process properties as AVOptions and apply to AV context obj
*/
-static void apply_properties( void *obj, mlt_properties properties, int flags, int alloc )
+static void apply_properties( void *obj, mlt_properties properties, int flags )
{
int i;
- int count = mlt_properties_count( properties );
+ int count = mlt_properties_count( properties );
+ int alloc = 1;
+
for ( i = 0; i < count; i++ )
{
const char *opt_name = mlt_properties_get_name( properties, i );
const AVOption *opt = av_find_opt( obj, opt_name, NULL, flags, flags );
- if ( opt != NULL )
+
+ // If option not found, see if it was prefixed with a or v (-vb)
+ if ( !opt && (
+ ( opt_name[0] == 'v' && ( flags & AV_OPT_FLAG_VIDEO_PARAM ) ) ||
+ ( opt_name[0] == 'a' && ( flags & AV_OPT_FLAG_AUDIO_PARAM ) ) ) )
+ opt = av_find_opt( obj, ++opt_name, NULL, flags, flags );
+ // Apply option if found
+ if ( opt )
#if LIBAVCODEC_VERSION_INT >= ((52<<16)+(7<<8)+0)
- av_set_string3( obj, opt_name, mlt_properties_get( properties, opt_name), alloc, NULL );
+ av_set_string3( obj, opt_name, mlt_properties_get_value( properties, i), alloc, NULL );
#elif LIBAVCODEC_VERSION_INT >= ((51<<16)+(59<<8)+0)
- av_set_string2( obj, opt_name, mlt_properties_get( properties, opt_name), alloc );
+ av_set_string2( obj, opt_name, mlt_properties_get_value( properties, i), alloc );
#else
- av_set_string( obj, opt_name, mlt_properties_get( properties, opt_name) );
+ av_set_string( obj, opt_name, mlt_properties_get_value( properties, i) );
#endif
}
}
/** Add an audio output stream
*/
-static AVStream *add_audio_stream( mlt_consumer this, AVFormatContext *oc, int codec_id, int channels )
+static AVStream *add_audio_stream( mlt_consumer consumer, AVFormatContext *oc, AVCodec *codec, int channels )
{
// Get the properties
- mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
+ mlt_properties properties = MLT_CONSUMER_PROPERTIES( consumer );
// Create a new stream
AVStream *st = av_new_stream( oc, oc->nb_streams );
AVCodecContext *c = st->codec;
// Establish defaults from AVOptions
+#if LIBAVCODEC_VERSION_MAJOR >= 53
+ avcodec_get_context_defaults3( c, codec );
+#else
avcodec_get_context_defaults2( c, CODEC_TYPE_AUDIO );
+#endif
- c->codec_id = codec_id;
+ c->codec_id = codec->id;
c->codec_type = CODEC_TYPE_AUDIO;
c->sample_fmt = SAMPLE_FMT_S16;
if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) )
thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) );
if ( thread_count > 1 )
- avcodec_thread_init( c, thread_count );
+ c->thread_count = thread_count;
#endif
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
if ( apre )
{
mlt_properties p = mlt_properties_load( apre );
- apply_properties( c, p, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, 1 );
+ apply_properties( c, p, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM );
mlt_properties_close( p );
}
- apply_properties( c, properties, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, 0 );
+ apply_properties( c, properties, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM );
int audio_qscale = mlt_properties_get_int( properties, "aq" );
if ( audio_qscale > QSCALE_NONE )
c->channels = channels;
if ( mlt_properties_get( properties, "alang" ) != NULL )
+#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(43<<8)+0)
+ av_metadata_set2( &oc->metadata, "language", mlt_properties_get( properties, "alang" ), 0 );
+#else
+
strncpy( st->language, mlt_properties_get( properties, "alang" ), sizeof( st->language ) );
+#endif
}
else
{
- mlt_log_error( MLT_CONSUMER_SERVICE( this ), "Could not allocate a stream for audio\n" );
+ mlt_log_error( MLT_CONSUMER_SERVICE( consumer ), "Could not allocate a stream for audio\n" );
}
return st;
}
-static int open_audio( AVFormatContext *oc, AVStream *st, int audio_outbuf_size )
+static int open_audio( mlt_properties properties, AVFormatContext *oc, AVStream *st, int audio_outbuf_size, const char *codec_name )
{
// We will return the audio input size from here
int audio_input_frame_size = 0;
AVCodecContext *c = st->codec;
// Find the encoder
- AVCodec *codec = avcodec_find_encoder( c->codec_id );
+ AVCodec *codec;
+ if ( codec_name )
+ codec = avcodec_find_encoder_by_name( codec_name );
+ else
+ codec = avcodec_find_encoder( c->codec_id );
+
+#if LIBAVCODEC_VERSION_MAJOR >= 53
+ // Process properties as AVOptions on the AVCodec
+ if ( codec && codec->priv_class )
+ {
+ char *apre = mlt_properties_get( properties, "apre" );
+ if ( !c->priv_data && codec->priv_data_size )
+ {
+ c->priv_data = av_mallocz( codec->priv_data_size );
+ *(const AVClass **) c->priv_data = codec->priv_class;
+// av_opt_set_defaults( c );
+ }
+ if ( apre )
+ {
+ mlt_properties p = mlt_properties_load( apre );
+ apply_properties( c->priv_data, p, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM );
+ mlt_properties_close( p );
+ }
+ apply_properties( c->priv_data, properties, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM );
+ }
+#endif
avformat_lock();
/** Add a video output stream
*/
-static AVStream *add_video_stream( mlt_consumer this, AVFormatContext *oc, int codec_id )
+static AVStream *add_video_stream( mlt_consumer consumer, AVFormatContext *oc, AVCodec *codec )
{
// Get the properties
- mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
+ mlt_properties properties = MLT_CONSUMER_PROPERTIES( consumer );
// Create a new stream
AVStream *st = av_new_stream( oc, oc->nb_streams );
AVCodecContext *c = st->codec;
// Establish defaults from AVOptions
+#if LIBAVCODEC_VERSION_MAJOR >= 53
+ avcodec_get_context_defaults3( c, codec );
+#else
avcodec_get_context_defaults2( c, CODEC_TYPE_VIDEO );
+#endif
- c->codec_id = codec_id;
+ c->codec_id = codec->id;
c->codec_type = CODEC_TYPE_VIDEO;
// Setup multi-threading
if ( thread_count == 0 && getenv( "MLT_AVFORMAT_THREADS" ) )
thread_count = atoi( getenv( "MLT_AVFORMAT_THREADS" ) );
if ( thread_count > 1 )
- avcodec_thread_init( c, thread_count );
-
+#if LIBAVCODEC_VERSION_MAJOR >= 53
+ c->thread_count = thread_count;
+#else
+ avcodec_thread_init( c, thread_count );
+#endif
+
// Process properties as AVOptions
char *vpre = mlt_properties_get( properties, "vpre" );
if ( vpre )
mlt_properties_close( p );
p = mlt_properties_load( path );
- mlt_properties_debug( p, path, stderr );
+ if ( mlt_properties_count( p ) > 0 )
+ mlt_properties_debug( p, path, stderr );
free( path );
}
}
mlt_properties_debug( p, vpre, stderr );
}
#endif
- apply_properties( c, p, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, 1 );
+ apply_properties( c, p, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM );
mlt_properties_close( p );
}
int colorspace = mlt_properties_get_int( properties, "colorspace" );
mlt_properties_set( properties, "colorspace", NULL );
- apply_properties( c, properties, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, 0 );
+ apply_properties( c, properties, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM );
mlt_properties_set_int( properties, "colorspace", colorspace );
// Set options controlled by MLT
// for mlt properties that correspond to profile settings
mlt_properties_set_int( properties, "display_aspect_num", rational.num );
mlt_properties_set_int( properties, "display_aspect_den", rational.den );
- mlt_profile profile = mlt_service_profile( MLT_CONSUMER_SERVICE( this ) );
+ mlt_profile profile = mlt_service_profile( MLT_CONSUMER_SERVICE( consumer ) );
if ( profile )
{
profile->display_aspect_num = rational.num;
int start, end, q;
int e = sscanf( rc_override, "%d,%d,%d", &start, &end, &q );
if ( e != 3 )
- mlt_log_warning( MLT_CONSUMER_SERVICE( this ), "Error parsing rc_override\n" );
+ mlt_log_warning( MLT_CONSUMER_SERVICE( consumer ), "Error parsing rc_override\n" );
c->rc_override = av_realloc( c->rc_override, sizeof( RcOverride ) * ( i + 1 ) );
c->rc_override[i].start_frame = start;
c->rc_override[i].end_frame = end;
c->flags |= CODEC_FLAG_PASS1;
else if ( i == 2 )
c->flags |= CODEC_FLAG_PASS2;
- if ( codec_id != CODEC_ID_H264 && ( c->flags & ( CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2 ) ) )
+ if ( codec->id != CODEC_ID_H264 && ( c->flags & ( CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2 ) ) )
{
char logfilename[1024];
FILE *f;
fseek( f, 0, SEEK_SET );
logbuffer = av_malloc( size + 1 );
if ( !logbuffer )
- mlt_log_fatal( MLT_CONSUMER_SERVICE( this ), "Could not allocate log buffer\n" );
+ mlt_log_fatal( MLT_CONSUMER_SERVICE( consumer ), "Could not allocate log buffer\n" );
else
{
size = fread( logbuffer, 1, size, f );
fclose( f );
logbuffer[size] = '\0';
c->stats_in = logbuffer;
- mlt_properties_set_data( properties, "_logbuffer", logbuffer, 0, ( mlt_destructor )av_free, NULL );
}
}
}
}
else
{
- mlt_log_error( MLT_CONSUMER_SERVICE( this ), "Could not allocate a stream for video\n" );
+ mlt_log_error( MLT_CONSUMER_SERVICE( consumer ), "Could not allocate a stream for video\n" );
}
return st;
return picture;
}
-
-static int open_video(AVFormatContext *oc, AVStream *st)
+
+static int open_video( mlt_properties properties, AVFormatContext *oc, AVStream *st, const char *codec_name )
{
// Get the codec
AVCodecContext *video_enc = st->codec;
// find the video encoder
- AVCodec *codec = avcodec_find_encoder( video_enc->codec_id );
+ AVCodec *codec;
+ if ( codec_name )
+ codec = avcodec_find_encoder_by_name( codec_name );
+ else
+ codec = avcodec_find_encoder( video_enc->codec_id );
+
+#if LIBAVCODEC_VERSION_MAJOR >= 53
+ // Process properties as AVOptions on the AVCodec
+ if ( codec && codec->priv_class )
+ {
+ char *vpre = mlt_properties_get( properties, "vpre" );
+ if ( !video_enc->priv_data && codec->priv_data_size )
+ {
+ video_enc->priv_data = av_mallocz( codec->priv_data_size );
+ *(const AVClass **) video_enc->priv_data = codec->priv_class;
+// av_opt_set_defaults( video_enc );
+ }
+ if ( vpre )
+ {
+ mlt_properties p = mlt_properties_load( vpre );
+ apply_properties( video_enc->priv_data, p, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM );
+ mlt_properties_close( p );
+ }
+ apply_properties( video_enc->priv_data, properties, AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM );
+ }
+#endif
if( codec && codec->pix_fmts )
{
if ( st && st->codec )
{
avformat_lock();
+ av_freep( &st->codec->stats_in );
avcodec_close(st->codec);
avformat_unlock();
}
return time2.tv_sec * 1000000 + time2.tv_usec - time1->tv_sec * 1000000 - time1->tv_usec;
}
+static int mlt_write(void *h, uint8_t *buf, int size)
+{
+ mlt_properties properties = (mlt_properties) h;
+ mlt_events_fire( properties, "avformat-write", buf, size, NULL );
+ return 0;
+}
+
+static void write_transmitter( mlt_listener listener, mlt_properties owner, mlt_service service, void **args )
+{
+ listener( owner, service, (uint8_t*) args[0], (int) args[1] );
+}
+
+
/** The main thread - the argument is simply the consumer.
*/
static void *consumer_thread( void *arg )
{
// Map the argument to the object
- mlt_consumer this = arg;
+ mlt_consumer consumer = arg;
// Get the properties
- mlt_properties properties = MLT_CONSUMER_PROPERTIES( this );
+ mlt_properties properties = MLT_CONSUMER_PROPERTIES( consumer );
// Get the terminate on pause property
int terminate_on_pause = mlt_properties_get_int( properties, "terminate_on_pause" );
int samples = 0;
// AVFormat audio buffer and frame size
- int audio_outbuf_size = ( 1024 * 42 );
+ int audio_outbuf_size = AUDIO_BUFFER_SIZE;
uint8_t *audio_outbuf = av_malloc( audio_outbuf_size );
int audio_input_frame_size = 0;
// AVFormat video buffer and frame count
int frame_count = 0;
- int video_outbuf_size = ( 1024 * 1024 );
+ int video_outbuf_size = VIDEO_BUFFER_SIZE;
uint8_t *video_outbuf = av_malloc( video_outbuf_size );
// Used for the frame properties
char *format = mlt_properties_get( properties, "f" );
char *vcodec = mlt_properties_get( properties, "vcodec" );
char *acodec = mlt_properties_get( properties, "acodec" );
+ AVCodec *audio_codec = NULL;
+ AVCodec *video_codec = NULL;
// Used to store and override codec ids
int audio_codec_id;
audio_codec_id = CODEC_ID_NONE;
else if ( acodec )
{
- AVCodec *p = avcodec_find_encoder_by_name( acodec );
- if ( p != NULL )
- audio_codec_id = p->id;
+ audio_codec = avcodec_find_encoder_by_name( acodec );
+ if ( audio_codec )
+ {
+ audio_codec_id = audio_codec->id;
+ if ( audio_codec_id == CODEC_ID_AC3 && avcodec_find_encoder_by_name( "ac3_fixed" ) )
+ {
+ mlt_properties_set( properties, "_acodec", "ac3_fixed" );
+ acodec = mlt_properties_get( properties, "_acodec" );
+ audio_codec = avcodec_find_encoder_by_name( acodec );
+ }
+ }
else
- mlt_log_warning( MLT_CONSUMER_SERVICE( this ), "audio codec %s unrecognised - ignoring\n", acodec );
+ {
+ audio_codec_id = CODEC_ID_NONE;
+ mlt_log_warning( MLT_CONSUMER_SERVICE( consumer ), "audio codec %s unrecognised - ignoring\n", acodec );
+ }
+ }
+ else
+ {
+ audio_codec = avcodec_find_encoder( audio_codec_id );
}
// Check for video codec overides
video_codec_id = CODEC_ID_NONE;
else if ( vcodec )
{
- AVCodec *p = avcodec_find_encoder_by_name( vcodec );
- if ( p != NULL )
- video_codec_id = p->id;
+ video_codec = avcodec_find_encoder_by_name( vcodec );
+ if ( video_codec )
+ {
+ video_codec_id = video_codec->id;
+ }
else
- mlt_log_warning( MLT_CONSUMER_SERVICE( this ), "video codec %s unrecognised - ignoring\n", vcodec );
+ {
+ video_codec_id = CODEC_ID_NONE;
+ mlt_log_warning( MLT_CONSUMER_SERVICE( consumer ), "video codec %s unrecognised - ignoring\n", vcodec );
+ }
+ }
+ else
+ {
+ video_codec = avcodec_find_encoder( video_codec_id );
}
// Write metadata
+#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(31<<8)+0)
+ for ( i = 0; i < mlt_properties_count( properties ); i++ )
+ {
+ char *name = mlt_properties_get_name( properties, i );
+ if ( name && !strncmp( name, "meta.attr.", 10 ) )
+ {
+ char *key = strdup( name + 10 );
+ char *markup = strrchr( key, '.' );
+ if ( markup && !strcmp( markup, ".markup") )
+ {
+ markup[0] = '\0';
+ if ( !strstr( key, ".stream." ) )
+#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(43<<8)+0)
+ av_metadata_set2( &oc->metadata, key, mlt_properties_get_value( properties, i ), 0 );
+#else
+ av_metadata_set( &oc->metadata, key, mlt_properties_get_value( properties, i ) );
+#endif
+ }
+ free( key );
+ }
+ }
+#else
char *tmp = NULL;
int metavalue;
metavalue = mlt_properties_get_int( properties, "meta.attr.track.markup");
if (metavalue != 0) oc->track = metavalue;
+#endif
oc->oformat = fmt;
snprintf( oc->filename, sizeof(oc->filename), "%s", filename );
+ // Get a frame now, so we can set some AVOptions from properties.
+ frame = mlt_consumer_rt_frame( consumer );
+
+ // Set the timecode from the MLT metadata if available.
+ const char *timecode = mlt_properties_get( MLT_FRAME_PROPERTIES(frame), "meta.attr.vitc.markup" );
+ if ( timecode && strcmp( timecode, "" ) )
+ {
+ mlt_properties_set( properties, "timecode", timecode );
+ if ( strchr( timecode, ';' ) )
+ mlt_properties_set_int( properties, "drop_frame_timecode", 1 );
+ }
+
// Add audio and video streams
if ( video_codec_id != CODEC_ID_NONE )
- video_st = add_video_stream( this, oc, video_codec_id );
+ video_st = add_video_stream( consumer, oc, video_codec );
if ( audio_codec_id != CODEC_ID_NONE )
{
int is_multi = 0;
{
is_multi = 1;
total_channels += j;
- audio_st[i] = add_audio_stream( this, oc, audio_codec_id, j );
+ audio_st[i] = add_audio_stream( consumer, oc, audio_codec, j );
}
}
// single track
if ( !is_multi )
{
- audio_st[0] = add_audio_stream( this, oc, audio_codec_id, channels );
+ audio_st[0] = add_audio_stream( consumer, oc, audio_codec, channels );
total_channels = channels;
}
}
+ mlt_properties_set_int( properties, "channels", total_channels );
// Set the parameters (even though we have none...)
if ( av_set_parameters(oc, NULL) >= 0 )
if ( fpre )
{
mlt_properties p = mlt_properties_load( fpre );
- apply_properties( oc, p, AV_OPT_FLAG_ENCODING_PARAM, 1 );
+ apply_properties( oc, p, AV_OPT_FLAG_ENCODING_PARAM );
+#if LIBAVFORMAT_VERSION_MAJOR >= 53
+ if ( oc->oformat && oc->oformat->priv_class && oc->priv_data )
+ apply_properties( oc->priv_data, p, AV_OPT_FLAG_ENCODING_PARAM );
+#endif
mlt_properties_close( p );
}
- apply_properties( oc, properties, AV_OPT_FLAG_ENCODING_PARAM, 0 );
+ apply_properties( oc, properties, AV_OPT_FLAG_ENCODING_PARAM );
+#if LIBAVFORMAT_VERSION_MAJOR >= 53
+ if ( oc->oformat && oc->oformat->priv_class && oc->priv_data )
+ apply_properties( oc->priv_data, properties, AV_OPT_FLAG_ENCODING_PARAM );
+#endif
- if ( video_st && !open_video( oc, video_st ) )
+ if ( video_st && !open_video( properties, oc, video_st, vcodec? vcodec : NULL ) )
video_st = NULL;
for ( i = 0; i < MAX_AUDIO_STREAMS && audio_st[i]; i++ )
{
- audio_input_frame_size = open_audio( oc, audio_st[i], audio_outbuf_size );
+ audio_input_frame_size = open_audio( properties, oc, audio_st[i], audio_outbuf_size,
+ acodec? acodec : NULL );
if ( !audio_input_frame_size )
audio_st[i] = NULL;
}
+ // Setup custom I/O if redirecting
+ if ( mlt_properties_get_int( properties, "redirect" ) )
+ {
+ int buffer_size = 32768;
+ unsigned char *buffer = av_malloc( buffer_size );
+#if LIBAVFORMAT_VERSION_MAJOR >= 53
+ AVIOContext* io = avio_alloc_context( buffer, buffer_size, 1, properties, NULL, mlt_write, NULL );
+#else
+ ByteIOContext* io = av_alloc_put_byte( buffer, buffer_size, 1, properties, NULL, mlt_write, NULL );
+#endif
+ if ( buffer && io )
+ {
+ oc->pb = io;
+#if LIBAVFORMAT_VERSION_MAJOR >= 53
+ oc->flags |= AVFMT_FLAG_CUSTOM_IO;
+#endif
+ mlt_properties_set_data( properties, "avio_buffer", buffer, buffer_size, av_free, NULL );
+ mlt_properties_set_data( properties, "avio_context", io, 0, av_free, NULL );
+ mlt_events_register( properties, "avformat-write", (mlt_transmitter) write_transmitter );
+ }
+ else
+ {
+ av_free( buffer );
+ mlt_log_error( MLT_CONSUMER_SERVICE(consumer), "failed to setup output redirection\n" );
+ }
+ }
// Open the output file, if needed
- if ( !( fmt->flags & AVFMT_NOFILE ) )
+ else if ( !( fmt->flags & AVFMT_NOFILE ) )
{
- if ( url_fopen( &oc->pb, filename, URL_WRONLY ) < 0 )
+#if LIBAVFORMAT_VERSION_MAJOR >= 53
+ if ( avio_open( &oc->pb, filename, AVIO_FLAG_WRITE ) < 0 )
+#else
+ if ( url_fopen( &oc->pb, filename, URL_WRONLY ) < 0 )
+#endif
{
- mlt_log_error( MLT_CONSUMER_SERVICE( this ), "Could not open '%s'\n", filename );
+ mlt_log_error( MLT_CONSUMER_SERVICE( consumer ), "Could not open '%s'\n", filename );
mlt_properties_set_int( properties, "running", 0 );
}
}
}
else
{
- mlt_log_error( MLT_CONSUMER_SERVICE( this ), "Invalid output format parameters\n" );
+ mlt_log_error( MLT_CONSUMER_SERVICE( consumer ), "Invalid output format parameters\n" );
mlt_properties_set_int( properties, "running", 0 );
}
while( mlt_properties_get_int( properties, "running" ) &&
( !terminated || ( video_st && mlt_deque_count( queue ) ) ) )
{
- frame = mlt_consumer_rt_frame( this );
+ if ( !frame )
+ frame = mlt_consumer_rt_frame( consumer );
// Check that we have a frame to work with
if ( frame != NULL )
if ( !terminated && audio_st[0] )
{
samples = mlt_sample_calculator( fps, frequency, count ++ );
+ channels = total_channels;
mlt_frame_get_audio( frame, (void**) &pcm, &aud_fmt, &frequency, &channels, &samples );
// Save the audio channel remap properties for later
// Append the samples
sample_fifo_append( fifo, pcm, samples * channels );
total_time += ( samples * 1000000 ) / frequency;
+
+ if ( !video_st )
+ mlt_events_fire( properties, "consumer-frame-show", frame, NULL );
}
// Encode the image
mlt_deque_push_back( queue, frame );
else
mlt_frame_close( frame );
+ frame = NULL;
}
// While we have stuff to process, process...
// Get the audio samples
if ( n > 0 )
+ {
sample_fifo_fetch( fifo, audio_buf_1, n );
+ }
+ else if ( audio_codec_id == CODEC_ID_VORBIS && terminated )
+ {
+ // This prevents an infinite loop when some versions of vorbis do not
+ // increment pts when encoding silence.
+ audio_pts = video_pts;
+ break;
+ }
else
+ {
memset( audio_buf_1, 0, AUDIO_ENCODE_BUFFER_SIZE );
+ }
samples = n / channels;
// For each output stream
if ( codec->coded_frame && codec->coded_frame->pts != AV_NOPTS_VALUE )
{
pkt.pts = av_rescale_q( codec->coded_frame->pts, codec->time_base, stream->time_base );
- mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "audio stream %d pkt pts %lld frame pts %lld",
+ mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "audio stream %d pkt pts %"PRId64" frame pts %"PRId64,
stream->index, pkt.pts, codec->coded_frame->pts );
}
pkt.flags |= PKT_FLAG_KEY;
{
if ( av_interleaved_write_frame( oc, &pkt ) )
{
- mlt_log_fatal( MLT_CONSUMER_SERVICE( this ), "error writing audio frame\n" );
+ mlt_log_fatal( MLT_CONSUMER_SERVICE( consumer ), "error writing audio frame\n" );
mlt_events_fire( properties, "consumer-fatal-error", NULL );
goto on_fatal_error;
}
}
- mlt_log_debug( MLT_CONSUMER_SERVICE( this ), " frame_size %d\n", codec->frame_size );
+ mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), " frame_size %d\n", codec->frame_size );
if ( i == 0 )
{
audio_pts = (double)stream->pts.val * av_q2d( stream->time_base );
if ( mlt_properties_get_int( frame_properties, "rendered" ) )
{
int i = 0;
- int j = 0;
uint8_t *p;
uint8_t *q;
for ( i = 0; i < height; i ++ )
{
p = input->data[ 0 ] + i * input->linesize[ 0 ];
- j = width;
- while( j -- )
- {
- *p ++ = *q ++;
- *p ++ = *q ++;
- }
+ memcpy( p, q, width * 2 );
+ q += width * 2;
}
// Do the colour space conversion
#endif
struct SwsContext *context = sws_getContext( width, height, PIX_FMT_YUYV422,
width, height, video_st->codec->pix_fmt, flags, NULL, NULL, NULL);
- sws_scale( context, input->data, input->linesize, 0, height,
+ sws_scale( context, (const uint8_t* const*) input->data, input->linesize, 0, height,
output->data, output->linesize);
sws_freeContext( context );
#else
if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, video_st->time_base );
- mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "video pkt pts %lld frame pts %lld", pkt.pts, c->coded_frame->pts );
+ mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "video pkt pts %"PRId64" frame pts %"PRId64, pkt.pts, c->coded_frame->pts );
if( c->coded_frame && c->coded_frame->key_frame )
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= video_st->index;
// write the compressed frame in the media file
ret = av_interleaved_write_frame(oc, &pkt);
- mlt_log_debug( MLT_CONSUMER_SERVICE( this ), " frame_size %d\n", c->frame_size );
+ mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), " frame_size %d\n", c->frame_size );
video_pts = (double)video_st->pts.val * av_q2d( video_st->time_base );
// Dual pass logging
}
else if ( out_size < 0 )
{
- mlt_log_warning( MLT_CONSUMER_SERVICE( this ), "error with video encode %d\n", frame_count );
+ mlt_log_warning( MLT_CONSUMER_SERVICE( consumer ), "error with video encode %d\n", frame_count );
}
}
frame_count++;
if ( ret )
{
- mlt_log_fatal( MLT_CONSUMER_SERVICE( this ), "error writing video frame\n" );
+ mlt_log_fatal( MLT_CONSUMER_SERVICE( consumer ), "error writing video frame\n" );
mlt_events_fire( properties, "consumer-fatal-error", NULL );
goto on_fatal_error;
}
mlt_frame_close( frame );
+ frame = NULL;
}
else
{
}
}
if ( audio_st[0] )
- mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "audio pts %lld (%f) ", audio_st[0]->pts.val, audio_pts );
+ mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "audio pts %"PRId64" (%f) ", audio_st[0]->pts.val, audio_pts );
if ( video_st )
- mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "video pts %lld (%f) ", video_st->pts.val, video_pts );
- mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "\n" );
+ mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "video pts %"PRId64" (%f) ", video_st->pts.val, video_pts );
+ mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "\n" );
}
if ( real_time_output == 1 && frames % 2 == 0 )
}
if ( pkt.size <= 0 )
pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, NULL );
- mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "flushing audio size %d\n", pkt.size );
+ mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "flushing audio size %d\n", pkt.size );
if ( pkt.size <= 0 )
break;
pkt.data = audio_outbuf;
if ( av_interleaved_write_frame( oc, &pkt ) != 0 )
{
- mlt_log_fatal( MLT_CONSUMER_SERVICE( this ), "error writing flushed audio frame\n" );
+ mlt_log_fatal( MLT_CONSUMER_SERVICE( consumer ), "error writing flushed audio frame\n" );
mlt_events_fire( properties, "consumer-fatal-error", NULL );
goto on_fatal_error;
}
// Encode the image
pkt.size = avcodec_encode_video( c, video_outbuf, video_outbuf_size, NULL );
- mlt_log_debug( MLT_CONSUMER_SERVICE( this ), "flushing video size %d\n", pkt.size );
+ mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "flushing video size %d\n", pkt.size );
if ( pkt.size <= 0 )
break;
// write the compressed frame in the media file
if ( av_interleaved_write_frame( oc, &pkt ) != 0 )
{
- mlt_log_fatal( MLT_CONSUMER_SERVICE(this), "error writing flushed video frame\n" );
+ mlt_log_fatal( MLT_CONSUMER_SERVICE(consumer), "error writing flushed video frame\n" );
mlt_events_fire( properties, "consumer-fatal-error", NULL );
goto on_fatal_error;
}
// Close the output file
if ( !( fmt->flags & AVFMT_NOFILE ) )
-#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(0<<8)+0)
+ {
+#if LIBAVFORMAT_VERSION_MAJOR >= 53
+ if ( !mlt_properties_get_int( properties, "redirect" ) )
+ avio_close( oc->pb );
+#elif LIBAVFORMAT_VERSION_MAJOR >= 52
url_fclose( oc->pb );
#else
url_fclose( &oc->pb );
#endif
+ }
// Clean up input and output frames
if ( output )
// Just in case we terminated on pause
mlt_properties_set_int( properties, "running", 0 );
- mlt_consumer_stopped( this );
+ mlt_consumer_stopped( consumer );
mlt_properties_close( frame_meta_properties );
if ( mlt_properties_get_int( properties, "pass" ) > 1 )
remove( "x264_2pass.log.temp" );
}
+ while ( ( frame = mlt_deque_pop_back( queue ) ) )
+ mlt_frame_close( frame );
+
return NULL;
}
/** Close the consumer.
*/
-static void consumer_close( mlt_consumer this )
+static void consumer_close( mlt_consumer consumer )
{
// Stop the consumer
- mlt_consumer_stop( this );
+ mlt_consumer_stop( consumer );
// Close the parent
- mlt_consumer_close( this );
+ mlt_consumer_close( consumer );
// Free the memory
- free( this );
+ free( consumer );
}