#endif
#include "avcodec.h"
+#include "chroma.h"
/*****************************************************************************
* decoder_sys_t : decoder descriptor
static void ffmpeg_InitCodec ( decoder_t * );
static void ffmpeg_CopyPicture ( decoder_t *, picture_t *, AVFrame * );
static int ffmpeg_GetFrameBuf ( struct AVCodecContext *, AVFrame * );
+static int ffmpeg_ReGetFrameBuf( struct AVCodecContext *, AVFrame * );
static void ffmpeg_ReleaseFrameBuf( struct AVCodecContext *, AVFrame * );
static uint32_t ffmpeg_CodecTag( vlc_fourcc_t fcc )
case PIX_FMT_GRAY8:
return VLC_FOURCC('G','R','E','Y');
- case PIX_FMT_YUV410P:
- case PIX_FMT_YUV411P:
default:
+#if defined(HAVE_LIBSWSCALE_SWSCALE_H) || defined(HAVE_FFMPEG_SWSCALE_H)
+ if( GetVlcChroma( i_ff_chroma ) )
+ return GetVlcChroma( i_ff_chroma );
+#endif
return 0;
}
}
if( ( p_dec->p_sys = p_sys =
(decoder_sys_t *)malloc(sizeof(decoder_sys_t)) ) == NULL )
{
- msg_Err( p_dec, "out of memory" );
return VLC_ENOMEM;
}
memset( p_sys, 0, sizeof(decoder_sys_t) );
/* Always use our get_buffer wrapper so we can calculate the
* PTS correctly */
p_sys->p_context->get_buffer = ffmpeg_GetFrameBuf;
+ p_sys->p_context->reget_buffer = ffmpeg_ReGetFrameBuf;
p_sys->p_context->release_buffer = ffmpeg_ReleaseFrameBuf;
p_sys->p_context->opaque = p_dec;
p_sys->i_buffer = 0;
p_sys->i_buffer_orig = 1;
p_sys->p_buffer_orig = p_sys->p_buffer = malloc( p_sys->i_buffer_orig );
+ if( !p_sys->p_buffer_orig )
+ {
+ free( p_sys );
+ return VLC_ENOMEM;
+ }
/* Set output properties */
p_dec->fmt_out.i_cat = VIDEO_ES;
if( p_dec->fmt_in.video.p_palette )
p_sys->p_context->palctrl =
(AVPaletteControl *)p_dec->fmt_in.video.p_palette;
- else
+ else if( p_sys->i_codec_id != CODEC_ID_MSVIDEO1 && p_sys->i_codec_id != CODEC_ID_CINEPAK )
p_sys->p_context->palctrl = &palette_control;
/* ***** Open the codec ***** */
vlc_mutex_t *lock = var_AcquireMutex( "avcodec" );
if( lock == NULL )
{
+ free( p_sys->p_buffer_orig );
free( p_sys );
return VLC_ENOMEM;
}
{
vlc_mutex_unlock( lock );
msg_Err( p_dec, "cannot open codec (%s)", p_sys->psz_namecodec );
+ free( p_sys->p_buffer_orig );
free( p_sys );
return VLC_EGENERIC;
}
}
p_sys->p_buffer = p_sys->p_buffer_orig;
p_sys->i_buffer = p_block->i_buffer;
+ if( !p_sys->p_buffer )
+ {
+ block_Release( p_block );
+ return NULL;
+ }
vlc_memcpy( p_sys->p_buffer, p_block->p_buffer, p_block->i_buffer );
memset( p_sys->p_buffer + p_block->i_buffer, 0,
FF_INPUT_BUFFER_PADDING_SIZE );
p_sys->p_context->extradata_size = i_size + 12;
p = p_sys->p_context->extradata =
malloc( p_sys->p_context->extradata_size );
+ if( !p )
+ return;
memcpy( &p[0], "SVQ3", 4 );
memset( &p[4], 0, 8 );
{
p_sys->p_context->extradata_size = 8;
p_sys->p_context->extradata = malloc( 8 );
+ if( p_sys->p_context->extradata )
+ {
+ memcpy( p_sys->p_context->extradata,
+ p_dec->fmt_in.p_extra, p_dec->fmt_in.i_extra );
+ p_sys->p_context->sub_id = ((uint32_t*)p_dec->fmt_in.p_extra)[1];
- memcpy( p_sys->p_context->extradata,
- p_dec->fmt_in.p_extra, p_dec->fmt_in.i_extra );
- p_sys->p_context->sub_id= ((uint32_t*)p_dec->fmt_in.p_extra)[1];
-
- msg_Warn( p_dec, "using extra data for RV codec sub_id=%08x",
- p_sys->p_context->sub_id );
+ msg_Warn( p_dec, "using extra data for RV codec sub_id=%08x",
+ p_sys->p_context->sub_id );
+ }
}
}
else
p_sys->p_context->extradata_size = i_size;
p_sys->p_context->extradata =
malloc( i_size + FF_INPUT_BUFFER_PADDING_SIZE );
- memcpy( p_sys->p_context->extradata,
- p_dec->fmt_in.p_extra, i_size );
- memset( &((uint8_t*)p_sys->p_context->extradata)[i_size],
- 0, FF_INPUT_BUFFER_PADDING_SIZE );
+ if( p_sys->p_context->extradata )
+ {
+ memcpy( p_sys->p_context->extradata,
+ p_dec->fmt_in.p_extra, i_size );
+ memset( &((uint8_t*)p_sys->p_context->extradata)[i_size],
+ 0, FF_INPUT_BUFFER_PADDING_SIZE );
+ }
}
}
* It is used for direct rendering as well as to get the right PTS for each
* decoded picture (even in indirect rendering mode).
*****************************************************************************/
+static void ffmpeg_SetFrameBufferPts( decoder_t *p_dec, AVFrame *p_ff_pic );
+
static int ffmpeg_GetFrameBuf( struct AVCodecContext *p_context,
AVFrame *p_ff_pic )
{
picture_t *p_pic;
/* Set picture PTS */
- if( p_sys->input_pts )
- {
- p_ff_pic->pts = p_sys->input_pts;
- }
- else if( p_sys->input_dts )
- {
- /* Some demuxers only set the dts so let's try to find a useful
- * timestamp from this */
- if( !p_context->has_b_frames || !p_sys->b_has_b_frames ||
- !p_ff_pic->reference || !p_sys->i_pts )
- {
- p_ff_pic->pts = p_sys->input_dts;
- }
- else p_ff_pic->pts = 0;
- }
- else p_ff_pic->pts = 0;
-
- if( p_sys->i_pts ) /* make sure 1st frame has a pts > 0 */
- {
- p_sys->input_pts = p_sys->input_dts = 0;
- }
+ ffmpeg_SetFrameBufferPts( p_dec, p_ff_pic );
+ /* */
p_ff_pic->opaque = 0;
/* Not much to do in indirect rendering mode */
return 0;
}
+static int ffmpeg_ReGetFrameBuf( struct AVCodecContext *p_context, AVFrame *p_ff_pic )
+{
+ decoder_t *p_dec = (decoder_t *)p_context->opaque;
+
+ /* Set picture PTS */
+ ffmpeg_SetFrameBufferPts( p_dec, p_ff_pic );
+
+ /* We always use default reget function, it works perfectly fine */
+ return avcodec_default_reget_buffer( p_context, p_ff_pic );
+}
+
+static void ffmpeg_SetFrameBufferPts( decoder_t *p_dec, AVFrame *p_ff_pic )
+{
+ decoder_sys_t *p_sys = p_dec->p_sys;
+
+ /* Set picture PTS */
+ if( p_sys->input_pts )
+ {
+ p_ff_pic->pts = p_sys->input_pts;
+ }
+ else if( p_sys->input_dts )
+ {
+ /* Some demuxers only set the dts so let's try to find a useful
+ * timestamp from this */
+ if( !p_sys->p_context->has_b_frames || !p_sys->b_has_b_frames ||
+ !p_ff_pic->reference || !p_sys->i_pts )
+ {
+ p_ff_pic->pts = p_sys->input_dts;
+ }
+ else p_ff_pic->pts = 0;
+ }
+ else p_ff_pic->pts = 0;
+
+ if( p_sys->i_pts ) /* make sure 1st frame has a pts > 0 */
+ {
+ p_sys->input_pts = p_sys->input_dts = 0;
+ }
+}
static void ffmpeg_ReleaseFrameBuf( struct AVCodecContext *p_context,
AVFrame *p_ff_pic )