#include <libavcodec/opt.h>
#endif
+#if LIBAVCODEC_VERSION_MAJOR < 55
+#define AV_CODEC_ID_PCM_S16LE CODEC_ID_PCM_S16LE
+#define AV_CODEC_ID_PCM_S16BE CODEC_ID_PCM_S16BE
+#define AV_CODEC_ID_PCM_U16LE CODEC_ID_PCM_U16LE
+#define AV_CODEC_ID_PCM_U16BE CODEC_ID_PCM_U16BE
+#define AV_CODEC_ID_H264 CODEC_ID_H264
+#define AV_CODEC_ID_NONE CODEC_ID_NONE
+#define AV_CODEC_ID_AC3 CODEC_ID_AC3
+#define AV_CODEC_ID_VORBIS CODEC_ID_VORBIS
+#endif
+
#define MAX_AUDIO_STREAMS (8)
#define AUDIO_ENCODE_BUFFER_SIZE (48000 * 2 * MAX_AUDIO_STREAMS)
#define AUDIO_BUFFER_SIZE (1024 * 42)
static uint8_t* interleaved_to_planar( int samples, int channels, uint8_t* audio, int bytes_per_sample )
{
- int size = samples * channels * bytes_per_sample;
uint8_t *buffer = mlt_pool_alloc( AUDIO_ENCODE_BUFFER_SIZE );
uint8_t *p = buffer;
int c;
audio_input_frame_size = audio_outbuf_size / c->channels;
switch(st->codec->codec_id)
{
- case CODEC_ID_PCM_S16LE:
- case CODEC_ID_PCM_S16BE:
- case CODEC_ID_PCM_U16LE:
- case CODEC_ID_PCM_U16BE:
+ case AV_CODEC_ID_PCM_S16LE:
+ case AV_CODEC_ID_PCM_S16BE:
+ case AV_CODEC_ID_PCM_U16LE:
+ case AV_CODEC_ID_PCM_U16BE:
audio_input_frame_size >>= 1;
break;
default:
c->flags |= CODEC_FLAG_PASS1;
else if ( i == 2 )
c->flags |= CODEC_FLAG_PASS2;
- if ( codec->id != CODEC_ID_H264 && ( c->flags & ( CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2 ) ) )
+ if ( codec->id != AV_CODEC_ID_H264 && ( c->flags & ( CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2 ) ) )
{
char logfilename[1024];
FILE *f;
// AVFormat audio buffer and frame size
int audio_outbuf_size = AUDIO_BUFFER_SIZE;
uint8_t *audio_outbuf = av_malloc( audio_outbuf_size );
- int audio_input_frame_size = 0;
+ int audio_input_nb_samples = 0;
// AVFormat video buffer and frame count
int frame_count = 0;
sample_fifo fifo = mlt_properties_get_data( properties, "sample_fifo", NULL );
// Need two av pictures for converting
- AVFrame *output = NULL;
- AVFrame *input = alloc_picture( PIX_FMT_YUYV422, width, height );
+ AVFrame *converted_avframe = NULL;
+ AVFrame *audio_avframe = NULL;
+ AVFrame *video_avframe = alloc_picture( PIX_FMT_YUYV422, width, height );
// For receiving images from an mlt_frame
uint8_t *image;
// Check for audio codec overides
if ( ( acodec && strcmp( acodec, "none" ) == 0 ) || mlt_properties_get_int( properties, "an" ) )
- audio_codec_id = CODEC_ID_NONE;
+ audio_codec_id = AV_CODEC_ID_NONE;
else if ( acodec )
{
audio_codec = avcodec_find_encoder_by_name( acodec );
if ( audio_codec )
{
audio_codec_id = audio_codec->id;
- if ( audio_codec_id == CODEC_ID_AC3 && avcodec_find_encoder_by_name( "ac3_fixed" ) )
+ if ( audio_codec_id == AV_CODEC_ID_AC3 && avcodec_find_encoder_by_name( "ac3_fixed" ) )
{
mlt_properties_set( properties, "_acodec", "ac3_fixed" );
acodec = mlt_properties_get( properties, "_acodec" );
}
else
{
- audio_codec_id = CODEC_ID_NONE;
+ audio_codec_id = AV_CODEC_ID_NONE;
mlt_log_warning( MLT_CONSUMER_SERVICE( consumer ), "audio codec %s unrecognised - ignoring\n", acodec );
}
}
// Check for video codec overides
if ( ( vcodec && strcmp( vcodec, "none" ) == 0 ) || mlt_properties_get_int( properties, "vn" ) )
- video_codec_id = CODEC_ID_NONE;
+ video_codec_id = AV_CODEC_ID_NONE;
else if ( vcodec )
{
video_codec = avcodec_find_encoder_by_name( vcodec );
}
else
{
- video_codec_id = CODEC_ID_NONE;
+ video_codec_id = AV_CODEC_ID_NONE;
mlt_log_warning( MLT_CONSUMER_SERVICE( consumer ), "video codec %s unrecognised - ignoring\n", vcodec );
}
}
}
// Add audio and video streams
- if ( video_codec_id != CODEC_ID_NONE )
+ if ( video_codec_id != AV_CODEC_ID_NONE )
video_st = add_video_stream( consumer, oc, video_codec );
- if ( audio_codec_id != CODEC_ID_NONE )
+ if ( audio_codec_id != AV_CODEC_ID_NONE )
{
int is_multi = 0;
video_st = NULL;
for ( i = 0; i < MAX_AUDIO_STREAMS && audio_st[i]; i++ )
{
- audio_input_frame_size = open_audio( properties, oc, audio_st[i], audio_outbuf_size,
+ audio_input_nb_samples = open_audio( properties, oc, audio_st[i], audio_outbuf_size,
acodec? acodec : NULL );
- if ( !audio_input_frame_size )
+ if ( !audio_input_nb_samples )
{
// Remove the audio stream from the output context
int j;
}
#endif
- // Allocate picture
- if ( video_st )
- output = alloc_picture( video_st->codec->pix_fmt, width, height );
-
// Last check - need at least one stream
if ( !audio_st[0] && !video_st )
{
goto on_fatal_error;
}
+ // Allocate picture
+ if ( video_st )
+ converted_avframe = alloc_picture( video_st->codec->pix_fmt, width, height );
+
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ // Allocate audio AVFrame
+ if ( audio_st[0] )
+ {
+ audio_avframe = avcodec_alloc_frame();
+ if ( audio_avframe ) {
+ AVCodecContext *c = audio_st[0]->codec;
+ audio_avframe->format = c->sample_fmt;
+ audio_avframe->nb_samples = audio_input_nb_samples;
+ audio_avframe->channel_layout = c->channel_layout;
+ } else {
+ mlt_log_error( MLT_CONSUMER_SERVICE(consumer), "failed to allocate audio AVFrame\n" );
+ mlt_events_fire( properties, "consumer-fatal-error", NULL );
+ goto on_fatal_error;
+ }
+ }
+#endif
+
// Get the starting time (can ignore the times above)
gettimeofday( &ante, NULL );
if ( !video_st || ( video_st && audio_st[0] && audio_pts < video_pts ) )
{
// Write audio
- if ( ( video_st && terminated ) || ( channels * audio_input_frame_size ) < sample_fifo_used( fifo ) / sample_bytes )
+ if ( ( video_st && terminated ) || ( channels * audio_input_nb_samples ) < sample_fifo_used( fifo ) / sample_bytes )
{
int j = 0; // channel offset into interleaved source buffer
- int n = FFMIN( FFMIN( channels * audio_input_frame_size, sample_fifo_used( fifo ) / sample_bytes ), AUDIO_ENCODE_BUFFER_SIZE );
+ int n = FFMIN( FFMIN( channels * audio_input_nb_samples, sample_fifo_used( fifo ) / sample_bytes ), AUDIO_ENCODE_BUFFER_SIZE );
// Get the audio samples
if ( n > 0 )
{
sample_fifo_fetch( fifo, audio_buf_1, n * sample_bytes );
}
- else if ( audio_codec_id == CODEC_ID_VORBIS && terminated )
+ else if ( audio_codec_id == AV_CODEC_ID_VORBIS && terminated )
{
// This prevents an infinite loop when some versions of vorbis do not
// increment pts when encoding silence.
AVPacket pkt;
av_init_packet( &pkt );
+ pkt.data = audio_outbuf;
+ pkt.size = audio_outbuf_size;
// Optimized for single track and no channel remap
if ( !audio_st[1] && !mlt_properties_count( frame_meta_properties ) )
else if ( codec->sample_fmt == AV_SAMPLE_FMT_U8P )
p = interleaved_to_planar( samples, channels, p, sizeof( uint8_t ) );
#endif
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ avcodec_fill_audio_frame( audio_avframe, codec->channels, codec->sample_fmt,
+ (const uint8_t*) p, AUDIO_ENCODE_BUFFER_SIZE, 0 );
+ int got_packet = 0;
+ int ret = avcodec_encode_audio2( codec, &pkt, audio_avframe, &got_packet );
+ if ( ret < 0 )
+ pkt.size = ret;
+ else if ( !got_packet )
+ pkt.size = 0;
+#else
pkt.size = avcodec_encode_audio( codec, audio_outbuf, audio_outbuf_size, p );
+ pkt.pts = codec->coded_frame? codec->coded_frame->pts : AV_NOPTS_VALUE;
+ pkt.flags |= PKT_FLAG_KEY;
+#endif
#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(17<<8)+0)
if ( p != audio_buf_1 )
dest_offset += current_channels;
}
}
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ avcodec_fill_audio_frame( audio_avframe, codec->channels, codec->sample_fmt,
+ (const uint8_t*) audio_buf_2, AUDIO_ENCODE_BUFFER_SIZE, 0 );
+ int got_packet = 0;
+ int ret = avcodec_encode_audio2( codec, &pkt, audio_avframe, &got_packet );
+ if ( ret < 0 )
+ pkt.size = ret;
+ else if ( !got_packet )
+ pkt.size = 0;
+#else
pkt.size = avcodec_encode_audio( codec, audio_outbuf, audio_outbuf_size, (short*) audio_buf_2 );
+ pkt.pts = codec->coded_frame? codec->coded_frame->pts : AV_NOPTS_VALUE;
+ pkt.flags |= PKT_FLAG_KEY;
+#endif
}
- // Write the compressed frame in the media file
- if ( codec->coded_frame && codec->coded_frame->pts != AV_NOPTS_VALUE )
- {
- pkt.pts = av_rescale_q( codec->coded_frame->pts, codec->time_base, stream->time_base );
- mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "audio stream %d pkt pts %"PRId64" frame pts %"PRId64,
- stream->index, pkt.pts, codec->coded_frame->pts );
- }
- pkt.flags |= PKT_FLAG_KEY;
- pkt.stream_index = stream->index;
- pkt.data = audio_outbuf;
-
if ( pkt.size > 0 )
{
+ // Write the compressed frame in the media file
+ if ( pkt.pts != AV_NOPTS_VALUE )
+ pkt.pts = av_rescale_q( pkt.pts, codec->time_base, stream->time_base );
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ if ( pkt.dts != AV_NOPTS_VALUE )
+ pkt.dts = av_rescale_q( pkt.dts, codec->time_base, stream->time_base );
+ if ( pkt.duration > 0 )
+ pkt.duration = av_rescale_q( pkt.duration, codec->time_base, stream->time_base );
+#endif
+ pkt.stream_index = stream->index;
if ( av_interleaved_write_frame( oc, &pkt ) )
{
mlt_log_fatal( MLT_CONSUMER_SERVICE( consumer ), "error writing audio frame\n" );
goto on_fatal_error;
}
}
+ else if ( pkt.size < 0 )
+ {
+ mlt_log_warning( MLT_CONSUMER_SERVICE( consumer ), "error with audio encode %d\n", frame_count );
+ }
mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), " frame_size %d\n", codec->frame_size );
if ( i == 0 )
// Write video
if ( mlt_deque_count( queue ) )
{
- int out_size, ret = 0;
+ int ret = 0;
AVCodecContext *c;
frame = mlt_deque_pop_front( queue );
// Convert the mlt frame to an AVPicture
for ( i = 0; i < height; i ++ )
{
- p = input->data[ 0 ] + i * input->linesize[ 0 ];
+ p = video_avframe->data[ 0 ] + i * video_avframe->linesize[ 0 ];
memcpy( p, q, width * 2 );
q += width * 2;
}
#endif
struct SwsContext *context = sws_getContext( width, height, PIX_FMT_YUYV422,
width, height, video_st->codec->pix_fmt, flags, NULL, NULL, NULL);
- sws_scale( context, (const uint8_t* const*) input->data, input->linesize, 0, height,
- output->data, output->linesize);
+ sws_scale( context, (const uint8_t* const*) video_avframe->data, video_avframe->linesize, 0, height,
+ converted_avframe->data, converted_avframe->linesize);
sws_freeContext( context );
mlt_events_fire( properties, "consumer-frame-show", frame, NULL );
for ( i = 0; i < height; i ++ )
{
n = ( width + 7 ) / 8;
- p = output->data[ 0 ] + i * output->linesize[ 0 ] + 3;
+ p = converted_avframe->data[ 0 ] + i * converted_avframe->linesize[ 0 ] + 3;
switch( width % 8 )
{
c->field_order = (mlt_properties_get_int( frame_properties, "top_field_first" )) ? AV_FIELD_TT : AV_FIELD_BB;
#endif
pkt.flags |= PKT_FLAG_KEY;
- pkt.stream_index= video_st->index;
- pkt.data= (uint8_t *)output;
- pkt.size= sizeof(AVPicture);
+ pkt.stream_index = video_st->index;
+ pkt.data = (uint8_t *)converted_avframe;
+ pkt.size = sizeof(AVPicture);
ret = av_write_frame(oc, &pkt);
video_pts += c->frame_size;
}
else
{
+ AVPacket pkt;
+ av_init_packet( &pkt );
+ pkt.data = video_outbuf;
+ pkt.size = video_outbuf_size;
+
// Set the quality
- output->quality = c->global_quality;
+ converted_avframe->quality = c->global_quality;
// Set frame interlace hints
- output->interlaced_frame = !mlt_properties_get_int( frame_properties, "progressive" );
- output->top_field_first = mlt_properties_get_int( frame_properties, "top_field_first" );
- output->pts = frame_count;
+ converted_avframe->interlaced_frame = !mlt_properties_get_int( frame_properties, "progressive" );
+ converted_avframe->top_field_first = mlt_properties_get_int( frame_properties, "top_field_first" );
+ converted_avframe->pts = frame_count;
// Encode the image
- out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, output );
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ int got_packet;
+ ret = avcodec_encode_video2( c, &pkt, converted_avframe, &got_packet );
+ if ( ret < 0 )
+ pkt.size = ret;
+ else if ( !got_packet )
+ pkt.size = 0;
+#else
+ pkt.size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, converted_avframe );
+ pkt.pts = c->coded_frame? c->coded_frame->pts : AV_NOPTS_VALUE;
+ if ( c->coded_frame && c->coded_frame->key_frame )
+ pkt.flags |= PKT_FLAG_KEY;
+#endif
// If zero size, it means the image was buffered
- if ( out_size > 0 )
+ if ( pkt.size > 0 )
{
- AVPacket pkt;
- av_init_packet( &pkt );
-
- if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
- pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, video_st->time_base );
- mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "video pkt pts %"PRId64" frame pts %"PRId64, pkt.pts, c->coded_frame->pts );
- if( c->coded_frame && c->coded_frame->key_frame )
- pkt.flags |= PKT_FLAG_KEY;
- pkt.stream_index= video_st->index;
- pkt.data= video_outbuf;
- pkt.size= out_size;
+ if ( pkt.pts != AV_NOPTS_VALUE )
+ pkt.pts = av_rescale_q( pkt.pts, c->time_base, video_st->time_base );
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ if ( pkt.dts != AV_NOPTS_VALUE )
+ pkt.dts = av_rescale_q( pkt.dts, c->time_base, video_st->time_base );
+#endif
+ pkt.stream_index = video_st->index;
// write the compressed frame in the media file
ret = av_interleaved_write_frame(oc, &pkt);
if ( mlt_properties_get_data( properties, "_logfile", NULL ) && c->stats_out )
fprintf( mlt_properties_get_data( properties, "_logfile", NULL ), "%s", c->stats_out );
}
- else if ( out_size < 0 )
+ else if ( pkt.size < 0 )
{
mlt_log_warning( MLT_CONSUMER_SERVICE( consumer ), "error with video encode %d\n", frame_count );
+ ret = 0;
}
}
frame_count++;
AVCodecContext *c = audio_st[0]->codec;
AVPacket pkt;
av_init_packet( &pkt );
+ pkt.data = audio_outbuf;
pkt.size = 0;
if ( fifo &&
- ( channels * audio_input_frame_size < sample_fifo_used( fifo ) / sample_bytes ) )
+ ( channels * audio_input_nb_samples < sample_fifo_used( fifo ) / sample_bytes ) )
{
- sample_fifo_fetch( fifo, audio_buf_1, channels * audio_input_frame_size * sample_bytes );
+ sample_fifo_fetch( fifo, audio_buf_1, channels * audio_input_nb_samples * sample_bytes );
void* p = audio_buf_1;
#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(17<<8)+0)
if ( c->sample_fmt == AV_SAMPLE_FMT_FLTP )
- p = interleaved_to_planar( audio_input_frame_size, channels, p, sizeof( float ) );
+ p = interleaved_to_planar( audio_input_nb_samples, channels, p, sizeof( float ) );
else if ( c->sample_fmt == AV_SAMPLE_FMT_S16P )
- p = interleaved_to_planar( audio_input_frame_size, channels, p, sizeof( int16_t ) );
+ p = interleaved_to_planar( audio_input_nb_samples, channels, p, sizeof( int16_t ) );
else if ( c->sample_fmt == AV_SAMPLE_FMT_S32P )
- p = interleaved_to_planar( audio_input_frame_size, channels, p, sizeof( int32_t ) );
+ p = interleaved_to_planar( audio_input_nb_samples, channels, p, sizeof( int32_t ) );
else if ( c->sample_fmt == AV_SAMPLE_FMT_U8P )
- p = interleaved_to_planar( audio_input_frame_size, channels, p, sizeof( uint8_t ) );
+ p = interleaved_to_planar( audio_input_nb_samples, channels, p, sizeof( uint8_t ) );
#endif
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ pkt.size = audio_outbuf_size;
+ avcodec_fill_audio_frame( audio_avframe, c->channels, c->sample_fmt,
+ (const uint8_t*) p, AUDIO_ENCODE_BUFFER_SIZE, 0 );
+ int got_packet = 0;
+ int ret = avcodec_encode_audio2( c, &pkt, audio_avframe, &got_packet );
+ if ( ret < 0 )
+ pkt.size = ret;
+ else if ( !got_packet )
+ pkt.size = 0;
+#else
pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, p );
+#endif
#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(17<<8)+0)
if ( p != audio_buf_1 )
mlt_pool_release( p );
#endif
}
- if ( pkt.size <= 0 )
+ if ( pkt.size <= 0 ) {
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ pkt.size = audio_outbuf_size;
+ int got_packet = 0;
+ int ret = avcodec_encode_audio2( c, &pkt, NULL, &got_packet );
+ if ( ret < 0 )
+ pkt.size = ret;
+ else if ( !got_packet )
+ pkt.size = 0;
+#else
pkt.size = avcodec_encode_audio( c, audio_outbuf, audio_outbuf_size, NULL );
+ pkt.pts = c->coded_frame? c->coded_frame->pts : AV_NOPTS_VALUE;
+ pkt.flags |= PKT_FLAG_KEY;
+#endif
+ }
mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "flushing audio size %d\n", pkt.size );
if ( pkt.size <= 0 )
break;
// Write the compressed frame in the media file
- if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
- pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, audio_st[0]->time_base );
- pkt.flags |= PKT_FLAG_KEY;
+ if ( pkt.pts != AV_NOPTS_VALUE )
+ pkt.pts = av_rescale_q( pkt.pts, c->time_base, audio_st[0]->time_base );
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ if ( pkt.dts != AV_NOPTS_VALUE )
+ pkt.dts = av_rescale_q( pkt.dts, c->time_base, audio_st[0]->time_base );
+ if ( pkt.duration > 0 )
+ pkt.duration = av_rescale_q( pkt.duration, c->time_base, audio_st[0]->time_base );
+#endif
pkt.stream_index = audio_st[0]->index;
- pkt.data = audio_outbuf;
if ( av_interleaved_write_frame( oc, &pkt ) != 0 )
{
mlt_log_fatal( MLT_CONSUMER_SERVICE( consumer ), "error writing flushed audio frame\n" );
AVCodecContext *c = video_st->codec;
AVPacket pkt;
av_init_packet( &pkt );
+ pkt.data = video_outbuf;
+ pkt.size = video_outbuf_size;
// Encode the image
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ int got_packet = 0;
+ int ret = avcodec_encode_video2( c, &pkt, NULL, &got_packet );
+ if ( ret < 0 )
+ pkt.size = ret;
+ else if ( !got_packet )
+ pkt.size = 0;
+#else
pkt.size = avcodec_encode_video( c, video_outbuf, video_outbuf_size, NULL );
+ pkt.pts = c->coded_frame? c->coded_frame->pts : AV_NOPTS_VALUE;
+ if( c->coded_frame && c->coded_frame->key_frame )
+ pkt.flags |= PKT_FLAG_KEY;
+#endif
mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "flushing video size %d\n", pkt.size );
if ( pkt.size <= 0 )
break;
- if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
- pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, video_st->time_base );
- if( c->coded_frame && c->coded_frame->key_frame )
- pkt.flags |= PKT_FLAG_KEY;
+ if ( pkt.pts != AV_NOPTS_VALUE )
+ pkt.pts = av_rescale_q( pkt.pts, c->time_base, video_st->time_base );
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ if ( pkt.dts != AV_NOPTS_VALUE )
+ pkt.dts = av_rescale_q( pkt.dts, c->time_base, video_st->time_base );
+#endif
pkt.stream_index = video_st->index;
- pkt.data = video_outbuf;
// write the compressed frame in the media file
if ( av_interleaved_write_frame( oc, &pkt ) != 0 )
}
// Clean up input and output frames
- if ( output )
- av_free( output->data[0] );
- av_free( output );
- av_free( input->data[0] );
- av_free( input );
+ if ( converted_avframe )
+ av_free( converted_avframe->data[0] );
+ av_free( converted_avframe );
+ av_free( video_avframe->data[0] );
+ av_free( video_avframe );
av_free( video_outbuf );
+ av_free( audio_avframe );
av_free( audio_buf_1 );
av_free( audio_buf_2 );
#include <libavcodec/opt.h>
#endif
+#if LIBAVCODEC_VERSION_MAJOR < 55
+#define AV_CODEC_ID_DVVIDEO CODEC_ID_DVVIDEO
+#define AV_CODEC_ID_H264 CODEC_ID_H264
+#endif
+
#define POSITION_INITIAL (-2)
#define POSITION_INVALID (-1)
AVFormatContext *video_format;
AVCodecContext *audio_codec[ MAX_AUDIO_STREAMS ];
AVCodecContext *video_codec;
- AVFrame *av_frame;
+ AVFrame *video_frame;
+ AVFrame *audio_frame;
AVPacket pkt;
mlt_position audio_expected;
mlt_position video_expected;
mlt_properties_set( meta_media, key, "video" );
snprintf( key, sizeof(key), "meta.media.%d.stream.frame_rate", i );
double ffmpeg_fps = av_q2d( context->streams[ i ]->avg_frame_rate );
+#if LIBAVFORMAT_VERSION_MAJOR < 55
if ( isnan( ffmpeg_fps ) || ffmpeg_fps == 0 )
ffmpeg_fps = av_q2d( context->streams[ i ]->r_frame_rate );
+#endif
mlt_properties_set_double( meta_media, key, ffmpeg_fps );
snprintf( key, sizeof(key), "meta.media.%d.stream.sample_aspect_ratio", i );
{
double aspect_ratio = 1.0;
- if ( codec_context->codec_id == CODEC_ID_DVVIDEO )
+ if ( codec_context->codec_id == AV_CODEC_ID_DVVIDEO )
{
if ( pkt )
{
mlt_properties_set_int( properties, "width", codec_context->width );
mlt_properties_set_int( properties, "height", codec_context->height );
- if ( codec_context->codec_id == CODEC_ID_DVVIDEO )
+ if ( codec_context->codec_id == AV_CODEC_ID_DVVIDEO )
{
// Fetch the first frame of DV so we can read it directly
AVPacket pkt;
if ( self->last_position == POSITION_INITIAL )
find_first_pts( self, self->video_index );
- if ( self->av_frame && position + 1 == self->video_expected )
+ if ( self->video_frame && position + 1 == self->video_expected )
{
// We're paused - use last image
paused = 1;
// Remove the cached info relating to the previous position
self->current_position = POSITION_INVALID;
self->last_position = POSITION_INVALID;
- av_freep( &self->av_frame );
+ av_freep( &self->video_frame );
}
}
return paused;
*format = pick_pix_format( codec_context->pix_fmt );
// Duplicate the last image if necessary
- if ( self->av_frame && self->av_frame->linesize[0]
+ if ( self->video_frame && self->video_frame->linesize[0]
&& ( paused || self->current_position >= req_position ) )
{
// Duplicate it
}
else
#endif
- convert_image( self, self->av_frame, *buffer, codec_context->pix_fmt,
+ convert_image( self, self->video_frame, *buffer, codec_context->pix_fmt,
format, *width, *height, &alpha );
got_picture = 1;
}
int decode_errors = 0;
// Construct an AVFrame for YUV422 conversion
- if ( !self->av_frame )
- self->av_frame = avcodec_alloc_frame( );
+ if ( !self->video_frame )
+ self->video_frame = avcodec_alloc_frame( );
while( ret >= 0 && !got_picture )
{
if ( int_position >= req_position )
codec_context->skip_loop_filter = AVDISCARD_NONE;
#if (LIBAVCODEC_VERSION_INT >= ((52<<16)+(26<<8)+0))
- ret = avcodec_decode_video2( codec_context, self->av_frame, &got_picture, &self->pkt );
+ ret = avcodec_decode_video2( codec_context, self->video_frame, &got_picture, &self->pkt );
#else
- ret = avcodec_decode_video( codec_context, self->av_frame, &got_picture, self->pkt.data, self->pkt.size );
+ ret = avcodec_decode_video( codec_context, self->video_frame, &got_picture, self->pkt.data, self->pkt.size );
#endif
// Note: decode may fail at the beginning of MPEGfile (B-frames referencing before first I-frame), so allow a few errors.
if ( ret < 0 )
if ( got_picture )
{
// Get position of reordered frame
- int_position = self->av_frame->reordered_opaque;
+ int_position = self->video_frame->reordered_opaque;
#if (LIBAVCODEC_VERSION_INT >= ((52<<16)+(106<<8)+0))
- pts = best_pts( self, self->av_frame->pkt_pts, self->av_frame->pkt_dts );
+ pts = best_pts( self, self->video_frame->pkt_pts, self->video_frame->pkt_dts );
if ( pts != AV_NOPTS_VALUE )
{
if ( self->first_pts != AV_NOPTS_VALUE )
{
if ( self->vdpau->is_decoded )
{
- struct vdpau_render_state *render = (struct vdpau_render_state*) self->av_frame->data[0];
+ struct vdpau_render_state *render = (struct vdpau_render_state*) self->video_frame->data[0];
void *planes[3];
uint32_t pitches[3];
VdpYCbCrFormat dest_format = VDP_YCBCR_FORMAT_YV12;
if ( !self->vdpau->buffer )
self->vdpau->buffer = mlt_pool_alloc( codec_context->width * codec_context->height * 3 / 2 );
- self->av_frame->data[0] = planes[0] = self->vdpau->buffer;
- self->av_frame->data[2] = planes[1] = self->vdpau->buffer + codec_context->width * codec_context->height;
- self->av_frame->data[1] = planes[2] = self->vdpau->buffer + codec_context->width * codec_context->height * 5 / 4;
- self->av_frame->linesize[0] = pitches[0] = codec_context->width;
- self->av_frame->linesize[1] = pitches[1] = codec_context->width / 2;
- self->av_frame->linesize[2] = pitches[2] = codec_context->width / 2;
+ self->video_frame->data[0] = planes[0] = self->vdpau->buffer;
+ self->video_frame->data[2] = planes[1] = self->vdpau->buffer + codec_context->width * codec_context->height;
+ self->video_frame->data[1] = planes[2] = self->vdpau->buffer + codec_context->width * codec_context->height * 5 / 4;
+ self->video_frame->linesize[0] = pitches[0] = codec_context->width;
+ self->video_frame->linesize[1] = pitches[1] = codec_context->width / 2;
+ self->video_frame->linesize[2] = pitches[2] = codec_context->width / 2;
VdpStatus status = vdp_surface_get_bits( render->surface, dest_format, planes, pitches );
if ( status == VDP_STATUS_OK )
{
- convert_image( self, self->av_frame, *buffer, PIX_FMT_YUV420P,
+ convert_image( self, self->video_frame, *buffer, PIX_FMT_YUV420P,
format, *width, *height, &alpha );
}
else
}
else
#endif
- convert_image( self, self->av_frame, *buffer, codec_context->pix_fmt,
+ convert_image( self, self->video_frame, *buffer, codec_context->pix_fmt,
format, *width, *height, &alpha );
- self->top_field_first |= self->av_frame->top_field_first;
+ self->top_field_first |= self->video_frame->top_field_first;
self->current_position = int_position;
}
else
// Try to duplicate last image if there was a decoding failure
// TODO: with multithread decoding a partial frame decoding resulting
// in failure also resets av_frame making test below fail.
- if ( !image_size && self->av_frame && self->av_frame->linesize[0] )
+ if ( !image_size && self->video_frame && self->video_frame->linesize[0] )
{
// Duplicate it
if ( ( image_size = allocate_buffer( frame, codec_context, buffer, format, width, height ) ) )
}
else
#endif
- convert_image( self, self->av_frame, *buffer, codec_context->pix_fmt,
+ convert_image( self, self->video_frame, *buffer, codec_context->pix_fmt,
format, *width, *height, &alpha );
got_picture = 1;
}
// Set the progressive flag
if ( mlt_properties_get( properties, "force_progressive" ) )
mlt_properties_set_int( frame_properties, "progressive", !!mlt_properties_get_int( properties, "force_progressive" ) );
- else if ( self->av_frame )
- mlt_properties_set_int( frame_properties, "progressive", !self->av_frame->interlaced_frame );
+ else if ( self->video_frame )
+ mlt_properties_set_int( frame_properties, "progressive", !self->video_frame->interlaced_frame );
// Set the field order property for this frame
if ( mlt_properties_get( properties, "force_tff" ) )
// Find the codec
AVCodec *codec = avcodec_find_decoder( codec_context->codec_id );
#ifdef VDPAU
- if ( codec_context->codec_id == CODEC_ID_H264 )
+ if ( codec_context->codec_id == AV_CODEC_ID_H264 )
{
if ( ( codec = avcodec_find_decoder_by_name( "h264_vdpau" ) ) )
{
mlt_properties_set_int( properties, "height", self->video_codec->height );
}
// For DV, we'll just use the saved aspect ratio
- if ( codec_context->codec_id != CODEC_ID_DVVIDEO )
+ if ( codec_context->codec_id != AV_CODEC_ID_DVVIDEO )
get_aspect_ratio( properties, stream, self->video_codec, NULL );
// Determine the fps first from the codec
// If the muxer reports a frame rate different than the codec
#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(42<<8)+0)
double muxer_fps = av_q2d( stream->avg_frame_rate );
+#if LIBAVFORMAT_VERSION_MAJOR < 55
if ( isnan( muxer_fps ) || muxer_fps == 0 )
muxer_fps = av_q2d( stream->r_frame_rate );
+#endif
#else
double muxer_fps = av_q2d( stream->r_frame_rate );
#endif
// Choose the lesser - the wrong tends to be off by some multiple of 10
source_fps = FFMIN( source_fps, muxer_fps );
- if ( source_fps >= 1.0 && ( source_fps < muxer_fps || isnan( muxer_fps ) ) )
+ if ( source_fps > 1.0 && ( source_fps < muxer_fps || isnan( muxer_fps ) ) )
{
mlt_properties_set_int( properties, "meta.media.frame_rate_num", self->video_codec->time_base.den );
mlt_properties_set_int( properties, "meta.media.frame_rate_den", self->video_codec->time_base.num == 0 ? 1 : self->video_codec->time_base.num );
}
else if ( muxer_fps > 0 )
{
+#if LIBAVFORMAT_VERSION_MAJOR >= 55
+ AVRational frame_rate = stream->avg_frame_rate;
+#else
AVRational frame_rate = stream->r_frame_rate;
+#endif
// With my samples when r_frame_rate != 1000 but avg_frame_rate is valid,
// avg_frame_rate gives some approximate value that does not well match the media.
// Also, on my sample where r_frame_rate = 1000, using avg_frame_rate directly
// results in some very choppy output, but some value slightly different works
// great.
-#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(42<<8)+0)
+#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(42<<8)+0) && LIBAVFORMAT_VERSION_MAJOR < 55
if ( av_q2d( stream->r_frame_rate ) >= 1000 && av_q2d( stream->avg_frame_rate ) > 0 )
frame_rate = av_d2q( av_q2d( stream->avg_frame_rate ), 1024 );
#endif
}
}
+static void planar_to_interleaved2( uint8_t *dest, AVFrame *src, int samples, int channels, int bytes_per_sample )
+{
+ int s, c;
+ for ( s = 0; s < samples; s++ )
+ {
+ for ( c = 0; c < channels; c++ )
+ {
+ memcpy( dest, &src->data[c][s * bytes_per_sample], bytes_per_sample );
+ dest += bytes_per_sample;
+ }
+ }
+}
+
static int decode_audio( producer_avformat self, int *ignore, AVPacket pkt, int channels, int samples, double timecode, double fps )
{
// Fetch the audio_format
int data_size = self->audio_buffer_size[ index ];
// Decode the audio
-#if (LIBAVCODEC_VERSION_INT >= ((52<<16)+(26<<8)+0))
- ret = avcodec_decode_audio3( codec_context, (int16_t*) decode_buffer, &data_size, &pkt );
-#elif (LIBAVCODEC_VERSION_INT >= ((51<<16)+(29<<8)+0))
- ret = avcodec_decode_audio2( codec_context, decode_buffer, &data_size, ptr, len );
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ if ( !self->audio_frame )
+ self->audio_frame = avcodec_alloc_frame();
+ else
+ avcodec_get_frame_defaults( self->audio_frame );
+ ret = avcodec_decode_audio4( codec_context, self->audio_frame, &data_size, &pkt );
+ if ( data_size ) {
+ data_size = av_samples_get_buffer_size( NULL, codec_context->channels,
+ self->audio_frame->nb_samples, codec_context->sample_fmt, 1 );
+ decode_buffer = self->audio_frame->data[0];
+ }
#else
- ret = avcodec_decode_audio( codec_context, decode_buffer, &data_size, ptr, len );
+ ret = avcodec_decode_audio3( codec_context, (int16_t*) decode_buffer, &data_size, &pkt );
#endif
if ( ret < 0 )
{
self->audio_buffer_size[ index ] = ( audio_used + convert_samples * 2 ) * channels * sizeof_sample;
audio_buffer = self->audio_buffer[ index ] = mlt_pool_realloc( audio_buffer, self->audio_buffer_size[ index ] );
}
- uint8_t *source = decode_buffer;
uint8_t *dest = &audio_buffer[ audio_used * codec_context->channels * sizeof_sample ];
switch ( codec_context->sample_fmt )
{
case AV_SAMPLE_FMT_S16P:
case AV_SAMPLE_FMT_S32P:
case AV_SAMPLE_FMT_FLTP:
- planar_to_interleaved( dest, source, convert_samples, codec_context->channels, sizeof_sample );
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ planar_to_interleaved2( dest, self->audio_frame, convert_samples, codec_context->channels, sizeof_sample );
+#else
+ planar_to_interleaved( dest, decode_buffer, convert_samples, codec_context->channels, sizeof_sample );
+#endif
break;
#endif
default:
// Cleanup av contexts
av_free_packet( &self->pkt );
- av_free( self->av_frame );
+ av_free( self->video_frame );
+ av_free( self->audio_frame );
pthread_mutex_lock( &self->open_mutex );
int i;
for ( i = 0; i < MAX_AUDIO_STREAMS; i++ )