#define POSITION_INITIAL (-2)
#define POSITION_INVALID (-1)
-#define MAX_AUDIO_STREAMS (10)
+#define MAX_AUDIO_STREAMS (32)
#define MAX_VDPAU_SURFACES (10)
struct producer_avformat_s
AVCodecContext *audio_codec[ MAX_AUDIO_STREAMS ];
AVCodecContext *video_codec;
AVFrame *av_frame;
+ AVPacket pkt;
ReSampleContext *audio_resample[ MAX_AUDIO_STREAMS ];
mlt_position audio_expected;
mlt_position video_expected;
int max_channel;
int max_frequency;
unsigned int invalid_pts_counter;
+ unsigned int invalid_dts_counter;
double resample_factor;
mlt_cache image_cache;
int colorspace;
+ int full_luma;
pthread_mutex_t video_mutex;
pthread_mutex_t audio_mutex;
mlt_deque apackets;
mlt_service_unlock( MLT_PRODUCER_SERVICE( producer ) );
}
+static int64_t best_pts( producer_avformat self, int64_t pts, int64_t dts )
+{
+ self->invalid_pts_counter += pts == AV_NOPTS_VALUE;
+ self->invalid_dts_counter += dts == AV_NOPTS_VALUE;
+ if ( ( self->invalid_pts_counter <= self->invalid_dts_counter
+ || dts == AV_NOPTS_VALUE ) && pts != AV_NOPTS_VALUE )
+ return pts;
+ else
+ return dts;
+}
+
static int seek_video( producer_avformat self, mlt_position position,
- int64_t req_position, int preseek, int use_pts )
+ int64_t req_position, int preseek )
{
mlt_producer producer = self->parent;
int paused = 0;
if ( self->last_position == POSITION_INITIAL )
{
int ret = 0;
- int toscan = 100;
+ int toscan = 500;
AVPacket pkt;
while ( ret >= 0 && toscan-- > 0 )
{
ret = av_read_frame( context, &pkt );
- if ( ret >= 0 && pkt.stream_index == self->video_index )
+ if ( ret >= 0 && pkt.stream_index == self->video_index && ( pkt.flags & PKT_FLAG_KEY ) )
{
mlt_log_debug( MLT_PRODUCER_SERVICE(producer),
"first_pts %"PRId64" dts %"PRId64" pts_dts_delta %d\n",
pkt.pts, pkt.dts, (int)(pkt.pts - pkt.dts) );
- self->first_pts = use_pts? pkt.pts : pkt.dts;
+ self->first_pts = best_pts( self, pkt.pts, pkt.dts );
if ( self->first_pts != AV_NOPTS_VALUE )
toscan = 0;
}
// Seek to the timestamp
// NOTE: reopen_video is disabled at this time because it is causing trouble with A/V sync.
- if ( 1 || use_pts || req_position > 0 || self->last_position <= 0 )
+ if ( 1 || req_position > 0 || self->last_position <= 0 )
{
codec_context->skip_loop_filter = AVDISCARD_NONREF;
av_seek_frame( context, self->video_index, timestamp, AVSEEK_FLAG_BACKWARD );
#endif
}
-static mlt_image_format pick_format( enum PixelFormat pix_fmt )
+static mlt_image_format pick_pix_format( enum PixelFormat pix_fmt )
{
switch ( pix_fmt )
{
}
}
-static void convert_image( AVFrame *frame, uint8_t *buffer, int pix_fmt,
- mlt_image_format *format, int width, int height, int colorspace, uint8_t **alpha )
+static mlt_audio_format pick_audio_format( int sample_fmt )
+{
+ switch ( sample_fmt )
+ {
+ // interleaved
+ case AV_SAMPLE_FMT_S16:
+ return mlt_audio_s16;
+ case AV_SAMPLE_FMT_S32:
+ return mlt_audio_s32le;
+ case AV_SAMPLE_FMT_FLT:
+ return mlt_audio_f32le;
+ // planar - this producer converts planar to interleaved
+#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(17<<8)+0)
+ case AV_SAMPLE_FMT_S16P:
+ return mlt_audio_s16;
+ case AV_SAMPLE_FMT_S32P:
+ return mlt_audio_s32le;
+ case AV_SAMPLE_FMT_FLTP:
+ return mlt_audio_f32le;
+#endif
+ default:
+ return mlt_audio_none;
+ }
+}
+
+static void convert_image( producer_avformat self, AVFrame *frame, uint8_t *buffer, int pix_fmt,
+ mlt_image_format *format, int width, int height, uint8_t **alpha )
{
#ifdef SWSCALE
- int full_range = -1;
int flags = SWS_BICUBIC | SWS_ACCURATE_RND;
#ifdef USE_MMX
// extract alpha from planar formats
if ( ( pix_fmt == PIX_FMT_YUVA420P
-#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(35<<8)+101)
+#if defined(FFUDIV) && LIBAVUTIL_VERSION_INT >= ((51<<16)+(35<<8)+101)
|| pix_fmt == PIX_FMT_YUVA444P
#endif
) &&
output.linesize[0] = width;
output.linesize[1] = width >> 1;
output.linesize[2] = width >> 1;
- set_luma_transfer( context, colorspace, full_range );
+ set_luma_transfer( context, self->colorspace, -1 );
sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
output.data, output.linesize);
sws_freeContext( context );
width, height, PIX_FMT_RGB24, flags | SWS_FULL_CHR_H_INT, NULL, NULL, NULL);
AVPicture output;
avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
- set_luma_transfer( context, colorspace, full_range );
+ set_luma_transfer( context, self->colorspace, self->full_luma );
sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
output.data, output.linesize);
sws_freeContext( context );
width, height, PIX_FMT_RGBA, flags | SWS_FULL_CHR_H_INT, NULL, NULL, NULL);
AVPicture output;
avpicture_fill( &output, buffer, PIX_FMT_RGBA, width, height );
- set_luma_transfer( context, colorspace, full_range );
+ set_luma_transfer( context, self->colorspace, self->full_luma );
sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
output.data, output.linesize);
sws_freeContext( context );
width, height, PIX_FMT_YUYV422, flags | SWS_FULL_CHR_H_INP, NULL, NULL, NULL);
AVPicture output;
avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height );
- set_luma_transfer( context, colorspace, full_range );
+ set_luma_transfer( context, self->colorspace, -1 );
sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
output.data, output.linesize);
sws_freeContext( context );
avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
img_convert( &output, PIX_FMT_RGB24, (AVPicture *)frame, pix_fmt, width, height );
}
- else if ( format == mlt_image_rgb24a || format == mlt_image_opengl )
+ else if ( *format == mlt_image_rgb24a || *format == mlt_image_opengl )
{
AVPicture output;
avpicture_fill( &output, buffer, PIX_FMT_RGB32, width, height );
mlt_properties frame_properties = MLT_FRAME_PROPERTIES( frame );
// Obtain the frame number of this frame
- mlt_position position = mlt_properties_get_position( frame_properties, "avformat_position" );
+ mlt_position position = mlt_frame_original_position( frame );
// Get the producer properties
mlt_properties properties = MLT_PRODUCER_PROPERTIES( producer );
int image_size = 0;
// Get the image cache
- if ( ! self->image_cache && ! mlt_properties_get_int( properties, "noimagecache" ) )
+ if ( ! self->image_cache )
{
- self->image_cache = mlt_cache_init();
+ // if cache size supplied by environment variable
+ int cache_supplied = getenv( "MLT_AVFORMAT_CACHE" ) != NULL;
+ int cache_size = cache_supplied? atoi( getenv( "MLT_AVFORMAT_CACHE" ) ) : 0;
+
+ // cache size supplied via property
+ if ( mlt_properties_get( properties, "cache" ) )
+ {
+ cache_supplied = 1;
+ cache_size = mlt_properties_get_int( properties, "cache" );
+ }
+ if ( mlt_properties_get_int( properties, "noimagecache" ) )
+ cache_size = 0;
+ // create cache if not disabled
+ if ( !cache_supplied || cache_size > 0 )
+ self->image_cache = mlt_cache_init();
+ // set cache size if supplied
+ if ( self->image_cache && cache_supplied )
+ mlt_cache_set_size( self->image_cache, cache_size );
}
if ( self->image_cache )
{
- mlt_frame original = mlt_cache_get_frame( self->image_cache, mlt_frame_get_position( frame ) );
+ mlt_frame original = mlt_cache_get_frame( self->image_cache, position );
if ( original )
{
mlt_properties orig_props = MLT_FRAME_PROPERTIES( original );
}
// Cache miss
- // Packet
- AVPacket pkt;
-
// We may want to use the source fps if available
double source_fps = mlt_properties_get_double( properties, "meta.media.frame_rate_num" ) /
mlt_properties_get_double( properties, "meta.media.frame_rate_den" );
strcmp( codec_context->codec->name, "mjpeg" ) &&
strcmp( codec_context->codec->name, "rawvideo" ) );
- // Turn on usage of new seek API and PTS for seeking
- int use_pts = self->seekable &&
- codec_context->codec_id == CODEC_ID_H264 && codec_context->has_b_frames;
- if ( mlt_properties_get( properties, "use_pts" ) )
- use_pts = mlt_properties_get_int( properties, "use_pts" );
double delay = mlt_properties_get_double( properties, "video_delay" );
// Seek if necessary
const char *interp = mlt_properties_get( frame_properties, "rescale.interp" );
int preseek = must_decode
#if defined(FFUDIV) && LIBAVFORMAT_VERSION_INT >= ((53<<16)+(24<<8)+2)
- && ( !use_pts || ( interp && strcmp( interp, "nearest" ) ) )
+ && ( interp && strcmp( interp, "nearest" ) )
#endif
&& codec_context->has_b_frames;
- int paused = seek_video( self, position, req_position, preseek, use_pts );
+ int paused = seek_video( self, position, req_position, preseek );
// Seek might have reopened the file
context = self->video_format;
codec_context->pix_fmt == PIX_FMT_RGBA ||
codec_context->pix_fmt == PIX_FMT_ABGR ||
codec_context->pix_fmt == PIX_FMT_BGRA )
- *format = pick_format( codec_context->pix_fmt );
+ *format = pick_pix_format( codec_context->pix_fmt );
// Duplicate the last image if necessary
if ( self->av_frame && self->av_frame->linesize[0]
picture.linesize[0] = codec_context->width;
picture.linesize[1] = codec_context->width / 2;
picture.linesize[2] = codec_context->width / 2;
- convert_image( (AVFrame*) &picture, *buffer,
- PIX_FMT_YUV420P, format, *width, *height, self->colorspace, &alpha );
+ convert_image( self, (AVFrame*) &picture, *buffer,
+ PIX_FMT_YUV420P, format, *width, *height, &alpha );
}
else
#endif
- convert_image( self->av_frame, *buffer, codec_context->pix_fmt,
- format, *width, *height, self->colorspace, &alpha );
+ convert_image( self, self->av_frame, *buffer, codec_context->pix_fmt,
+ format, *width, *height, &alpha );
got_picture = 1;
}
}
int64_t int_position = 0;
int decode_errors = 0;
- av_init_packet( &pkt );
-
// Construct an AVFrame for YUV422 conversion
if ( !self->av_frame )
self->av_frame = avcodec_alloc_frame( );
while( ret >= 0 && !got_picture )
{
// Read a packet
+ if ( self->pkt.stream_index == self->video_index )
+ av_free_packet( &self->pkt );
+ av_init_packet( &self->pkt );
pthread_mutex_lock( &self->packets_mutex );
if ( mlt_deque_count( self->vpackets ) )
{
AVPacket *tmp = (AVPacket*) mlt_deque_pop_front( self->vpackets );
- pkt = *tmp;
+ self->pkt = *tmp;
free( tmp );
}
else
{
- ret = av_read_frame( context, &pkt );
- if ( ret >= 0 && !self->seekable && pkt.stream_index == self->audio_index )
+ ret = av_read_frame( context, &self->pkt );
+ if ( ret >= 0 && !self->seekable && self->pkt.stream_index == self->audio_index )
{
- if ( !av_dup_packet( &pkt ) )
+ if ( !av_dup_packet( &self->pkt ) )
{
AVPacket *tmp = malloc( sizeof(AVPacket) );
- *tmp = pkt;
+ *tmp = self->pkt;
mlt_deque_push_back( self->apackets, tmp );
}
}
pthread_mutex_unlock( &self->packets_mutex );
// We only deal with video from the selected video_index
- if ( ret >= 0 && pkt.stream_index == self->video_index && pkt.size > 0 )
+ if ( ret >= 0 && self->pkt.stream_index == self->video_index && self->pkt.size > 0 )
{
- // Determine time code of the packet
- if ( use_pts && pkt.pts == AV_NOPTS_VALUE )
- {
- self->invalid_pts_counter++;
- if ( self->invalid_pts_counter > 20 )
- {
- mlt_log_warning( MLT_PRODUCER_SERVICE(producer), "PTS invalid; using DTS instead\n" );
- mlt_properties_set_int( properties, "use_pts", 0 );
- use_pts = 0;
- }
- }
- else
- {
- self->invalid_pts_counter = 0;
- }
- int64_t pts = ( use_pts && pkt.pts != AV_NOPTS_VALUE )? pkt.pts : pkt.dts;
+ int64_t pts = best_pts( self, self->pkt.pts, self->pkt.dts );
if ( pts != AV_NOPTS_VALUE )
{
if ( !self->seekable && self->first_pts == AV_NOPTS_VALUE )
}
mlt_log_debug( MLT_PRODUCER_SERVICE(producer),
"V pkt.pts %"PRId64" pkt.dts %"PRId64" req_pos %"PRId64" cur_pos %"PRId64" pkt_pos %"PRId64"\n",
- pkt.pts, pkt.dts, req_position, self->current_position, int_position );
+ self->pkt.pts, self->pkt.dts, req_position, self->current_position, int_position );
// Make a dumb assumption on streams that contain wild timestamps
if ( abs( req_position - int_position ) > 999 )
if ( int_position >= req_position )
codec_context->skip_loop_filter = AVDISCARD_NONE;
#if (LIBAVCODEC_VERSION_INT >= ((52<<16)+(26<<8)+0))
- ret = avcodec_decode_video2( codec_context, self->av_frame, &got_picture, &pkt );
+ ret = avcodec_decode_video2( codec_context, self->av_frame, &got_picture, &self->pkt );
#else
- ret = avcodec_decode_video( codec_context, self->av_frame, &got_picture, pkt.data, pkt.size );
+ ret = avcodec_decode_video( codec_context, self->av_frame, &got_picture, self->pkt.data, self->pkt.size );
#endif
// Note: decode may fail at the beginning of MPEGfile (B-frames referencing before first I-frame), so allow a few errors.
if ( ret < 0 )
if ( got_picture )
{
- if ( use_pts )
- // Get position of reordered frame
- int_position = self->av_frame->reordered_opaque;
+ // Get position of reordered frame
+ int_position = self->av_frame->reordered_opaque;
+#if (LIBAVCODEC_VERSION_INT >= ((52<<16)+(106<<8)+0))
+ pts = best_pts( self, self->av_frame->pkt_pts, self->av_frame->pkt_dts );
+ if ( pts != AV_NOPTS_VALUE )
+ {
+ if ( self->first_pts != AV_NOPTS_VALUE )
+ pts -= self->first_pts;
+ else if ( context->start_time != AV_NOPTS_VALUE )
+ pts -= context->start_time;
+ int_position = ( int64_t )( ( av_q2d( stream->time_base ) * pts + delay ) * source_fps + 0.5 );
+ }
+#endif
if ( int_position < req_position )
got_picture = 0;
else if ( int_position >= req_position )
codec_context->skip_loop_filter = AVDISCARD_NONE;
}
- mlt_log_debug( MLT_PRODUCER_SERVICE(producer), " got_pic %d key %d\n", got_picture, pkt.flags & PKT_FLAG_KEY );
+ mlt_log_debug( MLT_PRODUCER_SERVICE(producer), " got_pic %d key %d\n", got_picture, self->pkt.flags & PKT_FLAG_KEY );
}
// Now handle the picture if we have one
VdpStatus status = vdp_surface_get_bits( render->surface, dest_format, planes, pitches );
if ( status == VDP_STATUS_OK )
{
- convert_image( self->av_frame, *buffer, PIX_FMT_YUV420P,
- format, *width, *height, self->colorspace, &alpha );
+ convert_image( self, self->av_frame, *buffer, PIX_FMT_YUV420P,
+ format, *width, *height, &alpha );
}
else
{
}
else
#endif
- convert_image( self->av_frame, *buffer, codec_context->pix_fmt,
- format, *width, *height, self->colorspace, &alpha );
+ convert_image( self, self->av_frame, *buffer, codec_context->pix_fmt,
+ format, *width, *height, &alpha );
self->top_field_first |= self->av_frame->top_field_first;
self->current_position = int_position;
}
got_picture = 0;
}
}
- if ( self->seekable || pkt.stream_index != self->audio_index )
- av_free_packet( &pkt );
+
+ // Free packet data if not video and not live audio packet
+ if ( self->pkt.stream_index != self->video_index &&
+ !( !self->seekable && self->pkt.stream_index == self->audio_index ) )
+ av_free_packet( &self->pkt );
}
}
picture.linesize[0] = codec_context->width;
picture.linesize[1] = codec_context->width / 2;
picture.linesize[2] = codec_context->width / 2;
- convert_image( (AVFrame*) &picture, *buffer,
- PIX_FMT_YUV420P, format, *width, *height, self->colorspace, &alpha );
+ convert_image( self, (AVFrame*) &picture, *buffer,
+ PIX_FMT_YUV420P, format, *width, *height, &alpha );
}
else
#endif
- convert_image( self->av_frame, *buffer, codec_context->pix_fmt,
- format, *width, *height, self->colorspace, &alpha );
+ convert_image( self, self->av_frame, *buffer, codec_context->pix_fmt,
+ format, *width, *height, &alpha );
got_picture = 1;
}
}
mlt_properties_set_int( properties, "meta.media.progressive", mlt_properties_get_int( frame_properties, "progressive" ) );
mlt_service_unlock( MLT_PRODUCER_SERVICE( producer ) );
+ // If we already have RGB, then the full range processing either happened already
+ // or does not apply (RGB source).
+ if ( *format == mlt_image_rgb24 || *format == mlt_image_rgb24a || *format == mlt_image_opengl )
+ mlt_properties_set( frame_properties, "force_full_luma", NULL );
+
return !got_picture;
}
#endif
// Reset some image properties
- mlt_properties_set_int( properties, "width", self->video_codec->width );
- mlt_properties_set_int( properties, "height", self->video_codec->height );
+ if ( self->video_codec )
+ {
+ mlt_properties_set_int( properties, "width", self->video_codec->width );
+ mlt_properties_set_int( properties, "height", self->video_codec->height );
+ }
// For DV, we'll just use the saved aspect ratio
if ( codec_context->codec_id != CODEC_ID_DVVIDEO )
get_aspect_ratio( properties, stream, self->video_codec, NULL );
double source_fps = (double) self->video_codec->time_base.den /
( self->video_codec->time_base.num == 0 ? 1 : self->video_codec->time_base.num );
- if ( mlt_properties_get( properties, "force_fps" ) )
- {
- source_fps = mlt_properties_get_double( properties, "force_fps" );
- stream->time_base = av_d2q( source_fps, 1024 );
- mlt_properties_set_int( properties, "meta.media.frame_rate_num", stream->time_base.num );
- mlt_properties_set_int( properties, "meta.media.frame_rate_den", stream->time_base.den );
- }
- else
{
// If the muxer reports a frame rate different than the codec
#if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(42<<8)+0)
mlt_properties_set_int( properties, "meta.media.frame_rate_den", frame_rate.den );
}
}
+ if ( mlt_properties_get( properties, "force_fps" ) )
+ {
+ double source_fps = mlt_properties_get_double( properties, "force_fps" );
+ AVRational fps = av_d2q( source_fps, 1024 );
+ stream->time_base.num *= mlt_properties_get_int( properties, "meta.media.frame_rate_num" ) * fps.den;
+ stream->time_base.den *= mlt_properties_get_int( properties, "meta.media.frame_rate_den" ) * fps.num;
+ mlt_properties_set_int( properties, "meta.media.frame_rate_num", fps.num );
+ mlt_properties_set_int( properties, "meta.media.frame_rate_den", fps.den );
+ }
// Set the YUV colorspace from override or detect
self->colorspace = mlt_properties_get_int( properties, "force_colorspace" );
#endif
// Let apps get chosen colorspace
mlt_properties_set_int( properties, "meta.media.colorspace", self->colorspace );
+
+ self->full_luma = -1;
+#if LIBAVCODEC_VERSION_INT >= ((52<<16)+(72<<8)+2)
+ mlt_log_debug( MLT_PRODUCER_SERVICE(self->parent), "color_range %d\n", codec_context->color_range );
+ if ( codec_context->color_range == AVCOL_RANGE_JPEG )
+ self->full_luma = 1;
+#endif
+ if ( mlt_properties_get( properties, "set.force_full_luma" ) )
+ self->full_luma = mlt_properties_get_int( properties, "set.force_full_luma" );
}
return self->video_codec && self->video_index > -1;
}
#endif
}
+static void planar_to_interleaved( uint8_t *dest, uint8_t *src, int samples, int channels, int bytes_per_sample )
+{
+ int s, c;
+ for ( s = 0; s < samples; s++ )
+ {
+ for ( c = 0; c < channels; c++ )
+ {
+ memcpy( dest, src + ( c * samples + s ) * bytes_per_sample, bytes_per_sample );
+ dest += bytes_per_sample;
+ }
+ }
+}
+
static int decode_audio( producer_avformat self, int *ignore, AVPacket pkt, int channels, int samples, double timecode, double fps )
{
// Fetch the audio_format
}
else
{
- // Straight copy to audio buffer
- memcpy( &audio_buffer[ audio_used * codec_context->channels * sizeof_sample ], decode_buffer, data_size );
+ uint8_t *source = decode_buffer;
+ uint8_t *dest = &audio_buffer[ audio_used * codec_context->channels * sizeof_sample ];
+ switch ( codec_context->sample_fmt )
+ {
+#if LIBAVUTIL_VERSION_INT >= ((51<<16)+(17<<8)+0)
+ case AV_SAMPLE_FMT_S16P:
+ case AV_SAMPLE_FMT_S32P:
+ case AV_SAMPLE_FMT_FLTP:
+ planar_to_interleaved( dest, source, convert_samples, codec_context->channels, sizeof_sample );
+ break;
+#endif
+ default:
+ // Straight copy to audio buffer
+ memcpy( dest, decode_buffer, data_size );
+ }
audio_used += convert_samples;
}
pthread_mutex_lock( &self->audio_mutex );
// Obtain the frame number of this frame
- mlt_position position = mlt_properties_get_position( MLT_FRAME_PROPERTIES( frame ), "avformat_position" );
+ mlt_position position = mlt_frame_original_position( frame );
// Calculate the real time code
double real_timecode = producer_time_of_frame( self->parent, position );
if ( self->audio_index == INT_MAX )
{
index = 0;
- index_max = context->nb_streams;
+ index_max = FFMIN( MAX_AUDIO_STREAMS, context->nb_streams );
*channels = self->total_channels;
*samples = *samples * FFMAX( self->max_frequency, *frequency ) / *frequency;
*frequency = FFMAX( self->max_frequency, *frequency );
}
// Initialize the resamplers and buffers
- for ( ; index < index_max; index++ )
+ for ( ; index < index_max && index < MAX_AUDIO_STREAMS; index++ )
{
// Get codec context
AVCodecContext *codec_context = self->audio_codec[ index ];
if ( codec_context && !self->audio_buffer[ index ] )
{
+#if LIBAVCODEC_VERSION_INT < ((54<<16)+(26<<8)+0)
// Check for resample and create if necessary
if ( codec_context->channels <= 2 )
{
#endif
}
else
+#endif
{
codec_context->request_channels = self->audio_index == INT_MAX ? codec_context->channels : *channels;
sizeof_sample = sample_bytes( codec_context );
}
// Get the audio if required
- if ( !paused )
+ if ( !paused && *frequency > 0 )
{
int ret = 0;
int got_audio = 0;
{
// Check if there is enough audio for all streams
got_audio = 1;
- for ( index = 0; got_audio && index < context->nb_streams; index++ )
+ for ( index = 0; got_audio && index < index_max; index++ )
if ( ( self->audio_codec[ index ] && self->audio_used[ index ] < *samples ) || ignore[ index ] )
got_audio = 0;
if ( got_audio )
// We only deal with audio from the selected audio index
index = pkt.stream_index;
- if ( ret >= 0 && pkt.data && pkt.size > 0 && ( index == self->audio_index ||
+ if ( index < MAX_AUDIO_STREAMS && ret >= 0 && pkt.data && pkt.size > 0 && ( index == self->audio_index ||
( self->audio_index == INT_MAX && context->streams[ index ]->codec->codec_type == CODEC_TYPE_AUDIO ) ) )
{
int channels2 = ( self->audio_index == INT_MAX || !self->audio_resample[index] ) ?
index = self->audio_index;
*channels = self->audio_codec[ index ]->channels;
*frequency = self->audio_codec[ index ]->sample_rate;
- *format = self->audio_codec[ index ]->sample_fmt == AV_SAMPLE_FMT_S32 ? mlt_audio_s32le
- : self->audio_codec[ index ]->sample_fmt == AV_SAMPLE_FMT_FLT ? mlt_audio_f32le
- : mlt_audio_s16;
+ *format = pick_audio_format( self->audio_codec[ index ]->sample_fmt );
sizeof_sample = sample_bytes( self->audio_codec[ index ] );
}
else if ( self->audio_index == INT_MAX )
{
- // This only works if all audio tracks have the same sample format.
for ( index = 0; index < index_max; index++ )
if ( self->audio_codec[ index ] && !self->audio_resample[ index ] )
{
- *format = self->audio_codec[ index ]->sample_fmt == AV_SAMPLE_FMT_S32 ? mlt_audio_s32le
- : self->audio_codec[ index ]->sample_fmt == AV_SAMPLE_FMT_FLT ? mlt_audio_f32le
- : mlt_audio_s16;
+ // XXX: This only works if all audio tracks have the same sample format.
+ *format = pick_audio_format( self->audio_codec[ index ]->sample_fmt );
sizeof_sample = sample_bytes( self->audio_codec[ index ] );
break;
}
else if ( context && index > -1 && audio_codec_init( self, index, properties ) )
{
// Set the frame properties
- if ( index < INT_MAX )
+ if ( index < MAX_AUDIO_STREAMS )
{
mlt_properties_set_int( frame_properties, "frequency", self->audio_codec[ index ]->sample_rate );
mlt_properties_set_int( frame_properties, "channels", self->audio_codec[ index ]->channels );
// Set the position of this producer
mlt_position position = self->seekable ? mlt_producer_frame( producer ) : self->nonseek_position++;
- mlt_properties_set_position( MLT_FRAME_PROPERTIES( *frame ), "avformat_position", position );
+ mlt_properties_set_position( MLT_FRAME_PROPERTIES( *frame ), "original_position", position );
// Calculate the next timecode
mlt_producer_prepare_next( producer );
mlt_log_debug( NULL, "producer_avformat_close\n" );
// Cleanup av contexts
+ av_free_packet( &self->pkt );
av_free( self->av_frame );
pthread_mutex_lock( &self->open_mutex );
int i;