dct_unquantize_h263_axp(block, n_coeffs, qscale, (qscale - 1) | 1);
}
-void MPV_common_init_axp(MpegEncContext *s)
+void ff_MPV_common_init_axp(MpegEncContext *s)
{
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_axp;
s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_axp;
void ff_dct_unquantize_h263_intra_neon(MpegEncContext *s, DCTELEM *block,
int n, int qscale);
-void MPV_common_init_arm(MpegEncContext *s)
+void ff_MPV_common_init_arm(MpegEncContext *s)
{
/* IWMMXT support is a superset of armv5te, so
* allow optimized functions for armv5te unless
* a better iwmmxt function exists
*/
#if HAVE_ARMV5TE
- MPV_common_init_armv5te(s);
+ ff_MPV_common_init_armv5te(s);
#endif
#if HAVE_IWMMXT
- MPV_common_init_iwmmxt(s);
+ ff_MPV_common_init_iwmmxt(s);
#endif
if (HAVE_NEON) {
#include "libavcodec/mpegvideo.h"
-void MPV_common_init_iwmmxt(MpegEncContext *s);
-void MPV_common_init_armv5te(MpegEncContext *s);
+void ff_MPV_common_init_iwmmxt(MpegEncContext *s);
+void ff_MPV_common_init_armv5te(MpegEncContext *s);
#endif /* AVCODEC_ARM_MPEGVIDEO_H */
ff_dct_unquantize_h263_armv5te(block, qmul, qadd, nCoeffs + 1);
}
-void MPV_common_init_armv5te(MpegEncContext *s)
+void ff_MPV_common_init_armv5te(MpegEncContext *s)
{
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_armv5te;
s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_armv5te;
block_orig[0] = level;
}
-void MPV_common_init_iwmmxt(MpegEncContext *s)
+void ff_MPV_common_init_iwmmxt(MpegEncContext *s)
{
if (!(mm_flags & AV_CPU_FLAG_IWMMXT)) return;
return last_non_zero;
}
-void MPV_common_init_bfin (MpegEncContext *s)
+void ff_MPV_common_init_bfin (MpegEncContext *s)
{
/* s->dct_quantize= dct_quantize_bfin; */
}
AVSContext *h = avctx->priv_data;
MpegEncContext * const s = &h->s;
- MPV_decode_defaults(s);
+ ff_MPV_decode_defaults(s);
ff_cavsdsp_init(&h->cdsp, avctx);
s->avctx = avctx;
if (!s->context_initialized) {
s->avctx->idct_algo = FF_IDCT_CAVS;
- if (MPV_common_init(s) < 0)
+ if (ff_MPV_common_init(s) < 0)
return -1;
ff_init_scantable(s->dsp.idct_permutation,&h->scantable,ff_zigzag_direct);
}
ff_h264_hl_decode_mb(h);
} else {
assert(ref == 0);
- MPV_decode_mb(s, s->block);
+ ff_MPV_decode_mb(s, s->block);
}
}
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_FLV1,
.priv_data_size = sizeof(MpegEncContext),
- .init = MPV_encode_init,
- .encode = MPV_encode_picture,
- .close = MPV_encode_end,
+ .init = ff_MPV_encode_init,
+ .encode = ff_MPV_encode_picture,
+ .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"),
};
MpegEncContext * const s = &h->s;
// set defaults
- MPV_decode_defaults(s);
+ ff_MPV_decode_defaults(s);
s->avctx = avctx;
s->width = s->avctx->coded_width;
s->mb_skipped = 1;
h->mtype &= ~MB_TYPE_H261_FIL;
- MPV_decode_mb(s, s->block);
+ ff_MPV_decode_mb(s, s->block);
}
return 0;
s->block_last_index[i]= -1;
}
- MPV_decode_mb(s, s->block);
+ ff_MPV_decode_mb(s, s->block);
return SLICE_OK;
}
init_get_bits(&s->gb, buf, buf_size*8);
if(!s->context_initialized){
- if (MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix
+ if (ff_MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix
return -1;
}
if (s->width != avctx->coded_width || s->height != avctx->coded_height){
ParseContext pc= s->parse_context; //FIXME move this demuxing hack to libavformat
s->parse_context.buffer=0;
- MPV_common_end(s);
+ ff_MPV_common_end(s);
s->parse_context= pc;
}
if (!s->context_initialized) {
|| avctx->skip_frame >= AVDISCARD_ALL)
return get_consumed_bytes(s, buf_size);
- if(MPV_frame_start(s, avctx) < 0)
+ if(ff_MPV_frame_start(s, avctx) < 0)
return -1;
ff_er_frame_start(s);
break;
h261_decode_gob(h);
}
- MPV_frame_end(s);
+ ff_MPV_frame_end(s);
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
assert(s->current_picture.f.pict_type == s->pict_type);
H261Context *h= avctx->priv_data;
MpegEncContext *s = &h->s;
- MPV_common_end(s);
+ ff_MPV_common_end(s);
return 0;
}
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_H261,
.priv_data_size = sizeof(H261Context),
- .init = MPV_encode_init,
- .encode = MPV_encode_picture,
- .close = MPV_encode_end,
+ .init = ff_MPV_encode_init,
+ .encode = ff_MPV_encode_picture,
+ .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("H.261"),
};
s->workaround_bugs= avctx->workaround_bugs;
// set defaults
- MPV_decode_defaults(s);
+ ff_MPV_decode_defaults(s);
s->quant_precision=5;
s->decode_mb= ff_h263_decode_mb;
s->low_delay= 1;
/* for h263, we allocate the images after having read the header */
if (avctx->codec->id != CODEC_ID_H263 && avctx->codec->id != CODEC_ID_MPEG4)
- if (MPV_common_init(s) < 0)
+ if (ff_MPV_common_init(s) < 0)
return -1;
ff_h263_decode_init_vlc(s);
{
MpegEncContext *s = avctx->priv_data;
- MPV_common_end(s);
+ ff_MPV_common_end(s);
return 0;
}
if(ret<0){
const int xy= s->mb_x + s->mb_y*s->mb_stride;
if(ret==SLICE_END){
- MPV_decode_mb(s, s->block);
+ ff_MPV_decode_mb(s, s->block);
if(s->loop_filter)
ff_h263_loop_filter(s);
if(++s->mb_x >= s->mb_width){
s->mb_x=0;
ff_draw_horiz_band(s, s->mb_y*mb_size, mb_size);
- MPV_report_decode_progress(s);
+ ff_MPV_report_decode_progress(s);
s->mb_y++;
}
return 0;
return -1;
}
- MPV_decode_mb(s, s->block);
+ ff_MPV_decode_mb(s, s->block);
if(s->loop_filter)
ff_h263_loop_filter(s);
}
ff_draw_horiz_band(s, s->mb_y*mb_size, mb_size);
- MPV_report_decode_progress(s);
+ ff_MPV_report_decode_progress(s);
s->mb_x= 0;
}
s->bitstream_buffer_size=0;
if (!s->context_initialized) {
- if (MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix
+ if (ff_MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix
return -1;
}
/* H.263 could change picture size any time */
ParseContext pc= s->parse_context; //FIXME move these demuxng hack to avformat
s->parse_context.buffer=0;
- MPV_common_end(s);
+ ff_MPV_common_end(s);
s->parse_context= pc;
}
if (!s->context_initialized) {
s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab;
}
- if(MPV_frame_start(s, avctx) < 0)
+ if(ff_MPV_frame_start(s, avctx) < 0)
return -1;
if (!s->divx_packed) ff_thread_finish_setup(avctx);
ff_er_frame_start(s);
//the second part of the wmv2 header contains the MB skip bits which are stored in current_picture->mb_type
- //which is not available before MPV_frame_start()
+ //which is not available before ff_MPV_frame_start()
if (CONFIG_WMV2_DECODER && s->msmpeg4_version==5){
ret = ff_wmv2_decode_secondary_picture_header(s);
if(ret<0) return ret;
return -1;
}
- MPV_frame_end(s);
+ ff_MPV_frame_end(s);
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
assert(s->current_picture.f.pict_type == s->pict_type);
MpegEncContext * const s = &h->s;
int i;
- MPV_decode_defaults(s);
+ ff_MPV_decode_defaults(s);
s->avctx = avctx;
common_init(h);
int i;
const int pixel_shift = h->pixel_shift;
- if(MPV_frame_start(s, s->avctx) < 0)
+ if(ff_MPV_frame_start(s, s->avctx) < 0)
return -1;
ff_er_frame_start(s);
/*
- * MPV_frame_start uses pict_type to derive key_frame.
+ * ff_MPV_frame_start uses pict_type to derive key_frame.
* This is incorrect for H.264; IDR markings must be used.
* Zero here; IDR markings per slice in frame or fields are ORed in later.
* See decode_nal_units().
// We mark the current picture as non-reference after allocating it, so
// that if we break out due to an error it can be released automatically
- // in the next MPV_frame_start().
+ // in the next ff_MPV_frame_start().
// SVQ3 as well as most other codecs have only last/next/current and thus
// get released even with set reference, besides SVQ3 and others do not
// mark frames as reference later "naturally".
if (!FIELD_PICTURE)
ff_er_frame_end(s);
- MPV_frame_end(s);
+ ff_MPV_frame_end(s);
h->current_slice=0;
/**
* Decode a slice header.
- * This will also call MPV_common_init() and frame_start() as needed.
+ * This will also call ff_MPV_common_init() and frame_start() as needed.
*
* @param h h264context
* @param h0 h264 master context (differs from 'h' when doing sliced based parallel decoding)
}
free_tables(h, 0);
flush_dpb(s->avctx);
- MPV_common_end(s);
+ ff_MPV_common_end(s);
}
if (!s->context_initialized) {
if (h != h0) {
s->avctx->hwaccel = ff_find_hwaccel(s->avctx->codec->id, s->avctx->pix_fmt);
- if (MPV_common_init(s) < 0) {
- av_log(h->s.avctx, AV_LOG_ERROR, "MPV_common_init() failed.\n");
+ if (ff_MPV_common_init(s) < 0) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "ff_MPV_common_init() failed.\n");
return -1;
}
s->first_field = 0;
ff_h264_free_context(h);
- MPV_common_end(s);
+ ff_MPV_common_end(s);
// memset(h, 0, sizeof(H264Context));
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_LJPEG,
.priv_data_size = sizeof(MpegEncContext),
- .init = MPV_encode_init,
+ .init = ff_MPV_encode_init,
.encode = encode_picture_lossless,
- .close = MPV_encode_end,
+ .close = ff_MPV_encode_end,
.long_name = NULL_IF_CONFIG_SMALL("Lossless JPEG"),
};
}
-void MPV_common_init_mmi(MpegEncContext *s)
+void ff_MPV_common_init_mmi(MpegEncContext *s)
{
s->dct_unquantize_h263_intra =
s->dct_unquantize_h263_inter = dct_unquantize_h263_mmi;
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_MJPEG,
.priv_data_size = sizeof(MpegEncContext),
- .init = MPV_encode_init,
- .encode = MPV_encode_picture,
- .close = MPV_encode_end,
+ .init = ff_MPV_encode_init,
+ .encode = ff_MPV_encode_picture,
+ .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
};
for (i = 0; i < 64; i++)
s2->dsp.idct_permutation[i]=i;
- MPV_decode_defaults(s2);
+ ff_MPV_decode_defaults(s2);
s->mpeg_enc_ctx.avctx = avctx;
s->mpeg_enc_ctx.flags = avctx->flags;
if (s1->mpeg_enc_ctx_allocated) {
ParseContext pc = s->parse_context;
s->parse_context.buffer = 0;
- MPV_common_end(s);
+ ff_MPV_common_end(s);
s->parse_context = pc;
}
* if DCT permutation is changed. */
memcpy(old_permutation, s->dsp.idct_permutation, 64 * sizeof(uint8_t));
- if (MPV_common_init(s) < 0)
+ if (ff_MPV_common_init(s) < 0)
return -2;
quant_matrix_rebuild(s->intra_matrix, old_permutation, s->dsp.idct_permutation);
/* start frame decoding */
if (s->first_field || s->picture_structure == PICT_FRAME) {
- if (MPV_frame_start(s, avctx) < 0)
+ if (ff_MPV_frame_start(s, avctx) < 0)
return -1;
ff_er_frame_start(s);
s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift;
s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift;
- MPV_decode_mb(s, s->block);
+ ff_MPV_decode_mb(s, s->block);
if (++s->mb_x >= s->mb_width) {
const int mb_size = 16 >> s->avctx->lowres;
ff_draw_horiz_band(s, mb_size*(s->mb_y >> field_pic), mb_size);
- MPV_report_decode_progress(s);
+ ff_MPV_report_decode_progress(s);
s->mb_x = 0;
s->mb_y += 1 << field_pic;
ff_er_frame_end(s);
- MPV_frame_end(s);
+ ff_MPV_frame_end(s);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
*pict = *(AVFrame*)s->current_picture_ptr;
/* start new MPEG-1 context decoding */
s->out_format = FMT_MPEG1;
if (s1->mpeg_enc_ctx_allocated) {
- MPV_common_end(s);
+ ff_MPV_common_end(s);
}
s->width = avctx->coded_width;
s->height = avctx->coded_height;
if (avctx->idct_algo == FF_IDCT_AUTO)
avctx->idct_algo = FF_IDCT_SIMPLE;
- if (MPV_common_init(s) < 0)
+ if (ff_MPV_common_init(s) < 0)
return -1;
exchange_uv(s); // common init reset pblocks, so we swap them here
s->swap_uv = 1; // in case of xvmc we need to swap uv for each MB
Mpeg1Context *s = avctx->priv_data;
if (s->mpeg_enc_ctx_allocated)
- MPV_common_end(&s->mpeg_enc_ctx);
+ ff_MPV_common_end(&s->mpeg_enc_ctx);
return 0;
}
{
MpegEncContext *s = avctx->priv_data;
- if(MPV_encode_init(avctx) < 0)
+ if(ff_MPV_encode_init(avctx) < 0)
return -1;
if(find_frame_rate_index(s) < 0){
.id = CODEC_ID_MPEG1VIDEO,
.priv_data_size = sizeof(MpegEncContext),
.init = encode_init,
- .encode = MPV_encode_picture,
- .close = MPV_encode_end,
+ .encode = ff_MPV_encode_picture,
+ .close = ff_MPV_encode_end,
.supported_framerates= avpriv_frame_rate_tab+1,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
.id = CODEC_ID_MPEG2VIDEO,
.priv_data_size = sizeof(MpegEncContext),
.init = encode_init,
- .encode = MPV_encode_picture,
- .close = MPV_encode_end,
+ .encode = ff_MPV_encode_picture,
+ .close = ff_MPV_encode_end,
.supported_framerates= avpriv_frame_rate_tab+1,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE},
.capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
int ret;
static int done = 0;
- if((ret=MPV_encode_init(avctx)) < 0)
+ if((ret=ff_MPV_encode_init(avctx)) < 0)
return ret;
if (!done) {
.id = CODEC_ID_MPEG4,
.priv_data_size = sizeof(MpegEncContext),
.init = encode_init,
- .encode = MPV_encode_picture,
- .close = MPV_encode_end,
+ .encode = ff_MPV_encode_picture,
+ .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
#if HAVE_MMX
- MPV_common_init_mmx(s);
+ ff_MPV_common_init_mmx(s);
#elif ARCH_ALPHA
- MPV_common_init_axp(s);
+ ff_MPV_common_init_axp(s);
#elif HAVE_MMI
- MPV_common_init_mmi(s);
+ ff_MPV_common_init_mmi(s);
#elif ARCH_ARM
- MPV_common_init_arm(s);
+ ff_MPV_common_init_arm(s);
#elif HAVE_ALTIVEC
- MPV_common_init_altivec(s);
+ ff_MPV_common_init_altivec(s);
#elif ARCH_BFIN
- MPV_common_init_bfin(s);
+ ff_MPV_common_init_bfin(s);
#endif
/* load & permutate scantables
return 0;
fail:
- return -1; // free() through MPV_common_end()
+ return -1; // free() through ff_MPV_common_end()
}
static void free_duplicate_context(MpegEncContext *s)
s->bitstream_buffer = NULL;
s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
- MPV_common_init(s);
+ ff_MPV_common_init(s);
}
s->avctx->coded_height = s1->avctx->coded_height;
* The changed fields will not depend upon the
* prior state of the MpegEncContext.
*/
-void MPV_common_defaults(MpegEncContext *s)
+void ff_MPV_common_defaults(MpegEncContext *s)
{
s->y_dc_scale_table =
s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
* the changed fields will not depend upon
* the prior state of the MpegEncContext.
*/
-void MPV_decode_defaults(MpegEncContext *s)
+void ff_MPV_decode_defaults(MpegEncContext *s)
{
- MPV_common_defaults(s);
+ ff_MPV_common_defaults(s);
}
/**
* init common structure for both encoder and decoder.
* this assumes that some variables like width/height are already set
*/
-av_cold int MPV_common_init(MpegEncContext *s)
+av_cold int ff_MPV_common_init(MpegEncContext *s)
{
int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
int nb_slices = (HAVE_THREADS &&
return 0;
fail:
- MPV_common_end(s);
+ ff_MPV_common_end(s);
return -1;
}
/* init common structure for both encoder and decoder */
-void MPV_common_end(MpegEncContext *s)
+void ff_MPV_common_end(MpegEncContext *s)
{
int i, j, k;
* generic function for encode/decode called after coding/decoding
* the header and before a frame is coded/decoded.
*/
-int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
+int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
{
int i;
Picture *pic;
/* generic function for encode/decode called after a
* frame has been coded/decoded. */
-void MPV_frame_end(MpegEncContext *s)
+void ff_MPV_frame_end(MpegEncContext *s)
{
int i;
/* redraw edges for the frame if decoding didn't complete */
/**
* find the lowest MB row referenced in the MVs
*/
-int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
+int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
{
int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
int my, off, i, mvs;
if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
if (s->mv_dir & MV_DIR_FORWARD) {
- ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
+ ff_thread_await_progress((AVFrame*)s->last_picture_ptr, ff_MPV_lowest_referenced_row(s, 0), 0);
}
if (s->mv_dir & MV_DIR_BACKWARD) {
- ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
+ ff_thread_await_progress((AVFrame*)s->next_picture_ptr, ff_MPV_lowest_referenced_row(s, 1), 0);
}
}
}
}
-void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
+void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
#if !CONFIG_SMALL
if(s->out_format == FMT_MPEG1) {
if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
}
-void MPV_report_decode_progress(MpegEncContext *s)
+void ff_MPV_report_decode_progress(MpegEncContext *s)
{
if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
&new_ctx->picture[pic - old_ctx->picture] : pic - (Picture*)old_ctx + (Picture*)new_ctx)\
: NULL)
-void MPV_decode_defaults(MpegEncContext *s);
-int MPV_common_init(MpegEncContext *s);
-void MPV_common_end(MpegEncContext *s);
-void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]);
-int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx);
-void MPV_frame_end(MpegEncContext *s);
-int MPV_encode_init(AVCodecContext *avctx);
-int MPV_encode_end(AVCodecContext *avctx);
-int MPV_encode_picture(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data);
-void MPV_common_init_mmx(MpegEncContext *s);
-void MPV_common_init_axp(MpegEncContext *s);
-void MPV_common_init_mmi(MpegEncContext *s);
-void MPV_common_init_arm(MpegEncContext *s);
-void MPV_common_init_altivec(MpegEncContext *s);
-void MPV_common_init_bfin(MpegEncContext *s);
+void ff_MPV_decode_defaults(MpegEncContext *s);
+int ff_MPV_common_init(MpegEncContext *s);
+void ff_MPV_common_end(MpegEncContext *s);
+void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]);
+int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx);
+void ff_MPV_frame_end(MpegEncContext *s);
+int ff_MPV_encode_init(AVCodecContext *avctx);
+int ff_MPV_encode_end(AVCodecContext *avctx);
+int ff_MPV_encode_picture(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data);
+void ff_MPV_common_init_mmx(MpegEncContext *s);
+void ff_MPV_common_init_axp(MpegEncContext *s);
+void ff_MPV_common_init_mmi(MpegEncContext *s);
+void ff_MPV_common_init_arm(MpegEncContext *s);
+void ff_MPV_common_init_altivec(MpegEncContext *s);
+void ff_MPV_common_init_bfin(MpegEncContext *s);
void ff_clean_intra_table_entries(MpegEncContext *s);
void ff_draw_horiz_band(MpegEncContext *s, int y, int h);
void ff_mpeg_flush(AVCodecContext *avctx);
int ff_find_unused_picture(MpegEncContext *s, int shared);
void ff_denoise_dct(MpegEncContext *s, DCTELEM *block);
void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src);
-int MPV_lowest_referenced_row(MpegEncContext *s, int dir);
-void MPV_report_decode_progress(MpegEncContext *s);
+int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir);
+void ff_MPV_report_decode_progress(MpegEncContext *s);
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src);
const uint8_t *avpriv_mpv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state);
void ff_set_qscale(MpegEncContext * s, int qscale);
* Set the given MpegEncContext to common defaults (same for encoding and decoding).
* The changed fields will not depend upon the prior state of the MpegEncContext.
*/
-void MPV_common_defaults(MpegEncContext *s);
+void ff_MPV_common_defaults(MpegEncContext *s);
static inline void gmc1_motion(MpegEncContext *s,
uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
static void MPV_encode_defaults(MpegEncContext *s)
{
int i;
- MPV_common_defaults(s);
+ ff_MPV_common_defaults(s);
for (i = -16; i < 16; i++) {
default_fcode_tab[i + MAX_MV] = 1;
}
/* init video encoder */
-av_cold int MPV_encode_init(AVCodecContext *avctx)
+av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
int i;
s->alternate_scan);
/* init */
- if (MPV_common_init(s) < 0)
+ if (ff_MPV_common_init(s) < 0)
return -1;
if (!s->dct_quantize)
return 0;
}
-av_cold int MPV_encode_end(AVCodecContext *avctx)
+av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
ff_rate_control_uninit(s);
- MPV_common_end(s);
+ ff_MPV_common_end(s);
if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
s->out_format == FMT_MJPEG)
ff_mjpeg_encode_close(s);
return 0;
}
-int MPV_encode_picture(AVCodecContext *avctx,
- unsigned char *buf, int buf_size, void *data)
+int ff_MPV_encode_picture(AVCodecContext *avctx,
+ unsigned char *buf, int buf_size, void *data)
{
MpegEncContext *s = avctx->priv_data;
AVFrame *pic_arg = data;
//emms_c();
//printf("qs:%f %f %d\n", s->new_picture.quality,
// s->current_picture.quality, s->qscale);
- MPV_frame_start(s, avctx);
+ ff_MPV_frame_start(s, avctx);
vbv_retry:
if (encode_picture(s, s->picture_number) < 0)
return -1;
avctx->p_count = s->mb_num - s->i_count - s->skip_count;
avctx->skip_count = s->skip_count;
- MPV_frame_end(s);
+ ff_MPV_frame_end(s);
if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
ff_mjpeg_encode_picture_trailer(s);
}
if(s->avctx->mb_decision == FF_MB_DECISION_RD){
- MPV_decode_mb(s, s->block);
+ ff_MPV_decode_mb(s, s->block);
score *= s->lambda2;
score += sse_mb(s) << FF_LAMBDA_SHIFT;
}
if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
- MPV_decode_mb(s, s->block);
+ ff_MPV_decode_mb(s, s->block);
} else {
int motion_x = 0, motion_y = 0;
s->mv_type=MV_TYPE_16X16;
s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
ff_h263_update_motion_val(s);
- MPV_decode_mb(s, s->block);
+ ff_MPV_decode_mb(s, s->block);
}
/* clean the MV table in IPS frames for direct mode in B frames */
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_H263,
.priv_data_size = sizeof(MpegEncContext),
- .init = MPV_encode_init,
- .encode = MPV_encode_picture,
- .close = MPV_encode_end,
+ .init = ff_MPV_encode_init,
+ .encode = ff_MPV_encode_picture,
+ .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
.priv_class = &h263_class,
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_H263P,
.priv_data_size = sizeof(MpegEncContext),
- .init = MPV_encode_init,
- .encode = MPV_encode_picture,
- .close = MPV_encode_end,
+ .init = ff_MPV_encode_init,
+ .encode = ff_MPV_encode_picture,
+ .close = ff_MPV_encode_end,
.capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_MSMPEG4V2,
.priv_data_size = sizeof(MpegEncContext),
- .init = MPV_encode_init,
- .encode = MPV_encode_picture,
- .close = MPV_encode_end,
+ .init = ff_MPV_encode_init,
+ .encode = ff_MPV_encode_picture,
+ .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
};
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_MSMPEG4V3,
.priv_data_size = sizeof(MpegEncContext),
- .init = MPV_encode_init,
- .encode = MPV_encode_picture,
- .close = MPV_encode_end,
+ .init = ff_MPV_encode_init,
+ .encode = ff_MPV_encode_picture,
+ .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
};
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_WMV1,
.priv_data_size = sizeof(MpegEncContext),
- .init = MPV_encode_init,
- .encode = MPV_encode_picture,
- .close = MPV_encode_end,
+ .init = ff_MPV_encode_init,
+ .encode = ff_MPV_encode_picture,
+ .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
};
}
-void MPV_common_init_altivec(MpegEncContext *s)
+void ff_MPV_common_init_altivec(MpegEncContext *s)
{
if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC)) return;
av_log(s->avctx, AV_LOG_DEBUG, "attempting to change resolution to %dx%d\n", new_w, new_h);
if (av_image_check_size(new_w, new_h, 0, s->avctx) < 0)
return -1;
- MPV_common_end(s);
+ ff_MPV_common_end(s);
avcodec_set_dimensions(s->avctx, new_w, new_h);
s->width = new_w;
s->height = new_h;
- if (MPV_common_init(s) < 0)
+ if (ff_MPV_common_init(s) < 0)
return -1;
}
return -1;
}
- MPV_decode_defaults(s);
+ ff_MPV_decode_defaults(s);
s->avctx= avctx;
s->out_format = FMT_H263;
avctx->pix_fmt = PIX_FMT_YUV420P;
- if (MPV_common_init(s) < 0)
+ if (ff_MPV_common_init(s) < 0)
return -1;
ff_h263_decode_init_vlc(s);
{
MpegEncContext *s = avctx->priv_data;
- MPV_common_end(s);
+ ff_MPV_common_end(s);
return 0;
}
if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) {
if(s->current_picture_ptr){ //FIXME write parser so we always have complete frames?
ff_er_frame_end(s);
- MPV_frame_end(s);
+ ff_MPV_frame_end(s);
s->mb_x= s->mb_y = s->resync_mb_x = s->resync_mb_y= 0;
}
- if(MPV_frame_start(s, avctx) < 0)
+ if(ff_MPV_frame_start(s, avctx) < 0)
return -1;
ff_er_frame_start(s);
} else {
}
if(s->pict_type != AV_PICTURE_TYPE_B)
ff_h263_update_motion_val(s);
- MPV_decode_mb(s, s->block);
+ ff_MPV_decode_mb(s, s->block);
if(s->loop_filter)
ff_h263_loop_filter(s);
if(s->current_picture_ptr != NULL && s->mb_y>=s->mb_height){
ff_er_frame_end(s);
- MPV_frame_end(s);
+ ff_MPV_frame_end(s);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
*pict= *(AVFrame*)s->current_picture_ptr;
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_RV10,
.priv_data_size = sizeof(MpegEncContext),
- .init = MPV_encode_init,
- .encode = MPV_encode_picture,
- .close = MPV_encode_end,
+ .init = ff_MPV_encode_init,
+ .encode = ff_MPV_encode_picture,
+ .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("RealVideo 1.0"),
};
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_RV20,
.priv_data_size = sizeof(MpegEncContext),
- .init = MPV_encode_init,
- .encode = MPV_encode_picture,
- .close = MPV_encode_end,
+ .init = ff_MPV_encode_init,
+ .encode = ff_MPV_encode_picture,
+ .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("RealVideo 2.0"),
};
av_log(s->avctx, AV_LOG_WARNING, "Changing dimensions to %dx%d\n",
r->si.width, r->si.height);
- MPV_common_end(s);
+ ff_MPV_common_end(s);
s->width = r->si.width;
s->height = r->si.height;
avcodec_set_dimensions(s->avctx, s->width, s->height);
- if ((err = MPV_common_init(s)) < 0)
+ if ((err = ff_MPV_common_init(s)) < 0)
return err;
if ((err = rv34_decoder_realloc(r)) < 0)
return err;
}
s->pict_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
- if(MPV_frame_start(s, s->avctx) < 0)
+ if(ff_MPV_frame_start(s, s->avctx) < 0)
return -1;
ff_er_frame_start(s);
if (!r->tmp_b_block_base) {
MpegEncContext *s = &r->s;
int ret;
- MPV_decode_defaults(s);
+ ff_MPV_decode_defaults(s);
s->avctx = avctx;
s->out_format = FMT_H263;
s->codec_id = avctx->codec_id;
avctx->has_b_frames = 1;
s->low_delay = 0;
- if ((ret = MPV_common_init(s)) < 0)
+ if ((ret = ff_MPV_common_init(s)) < 0)
return ret;
ff_h264_pred_init(&r->h, CODEC_ID_RV40, 8, 1);
if (avctx->internal->is_copy) {
r->tmp_b_block_base = NULL;
- if ((err = MPV_common_init(&r->s)) < 0)
+ if ((err = ff_MPV_common_init(&r->s)) < 0)
return err;
if ((err = rv34_decoder_alloc(r)) < 0)
return err;
return 0;
if (s->height != s1->height || s->width != s1->width) {
- MPV_common_end(s);
+ ff_MPV_common_end(s);
s->height = s1->height;
s->width = s1->width;
- if ((err = MPV_common_init(s)) < 0)
+ if ((err = ff_MPV_common_init(s)) < 0)
return err;
if ((err = rv34_decoder_realloc(r)) < 0)
return err;
memset(&r->si, 0, sizeof(r->si));
/* necessary since it is it the condition checked for in decode_slice
- * to call MPV_frame_start. cmp. comment at the end of decode_frame */
+ * to call ff_MPV_frame_start. cmp. comment at the end of decode_frame */
s->current_picture_ptr = NULL;
return 0;
if(r->loop_filter)
r->loop_filter(r, s->mb_height - 1);
ff_er_frame_end(s);
- MPV_frame_end(s);
+ ff_MPV_frame_end(s);
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
{
RV34DecContext *r = avctx->priv_data;
- MPV_common_end(&r->s);
+ ff_MPV_common_end(&r->s);
rv34_decoder_free(r);
return 0;
|| avctx->skip_frame >= AVDISCARD_ALL)
return buf_size;
- if(MPV_frame_start(s, avctx) < 0)
+ if(ff_MPV_frame_start(s, avctx) < 0)
return -1;
pmv = av_malloc((FFALIGN(s->width, 16)/8 + 3) * sizeof(*pmv));
*pict = *(AVFrame*)&s->current_picture;
- MPV_frame_end(s);
+ ff_MPV_frame_end(s);
*data_size=sizeof(AVFrame);
result = buf_size;
int i;
int offset = 0;
- MPV_decode_defaults(s);
+ ff_MPV_decode_defaults(s);
s->avctx = avctx;
s->width = (avctx->width+3)&~3;
avctx->pix_fmt = PIX_FMT_YUV410P;
avctx->has_b_frames= 1; // not true, but DP frames and these behave like unidirectional b frames
s->flags= avctx->flags;
- if (MPV_common_init(s) < 0) return -1;
+ if (ff_MPV_common_init(s) < 0) return -1;
INIT_VLC_STATIC(&svq1_block_type, 2, 4,
&ff_svq1_block_type_vlc[0][1], 2, 1,
{
MpegEncContext *s = avctx->priv_data;
- MPV_common_end(s);
+ ff_MPV_common_end(s);
return 0;
}
s->width = avctx->width;
s->height = avctx->height;
- if (MPV_common_init(s) < 0)
+ if (ff_MPV_common_init(s) < 0)
return -1;
h->b_stride = 4*s->mb_width;
ff_draw_horiz_band(s, 16*s->mb_y, 16);
}
- MPV_frame_end(s);
+ ff_MPV_frame_end(s);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
*(AVFrame *) data = *(AVFrame *) &s->current_picture;
ff_h264_free_context(h);
- MPV_common_end(s);
+ ff_MPV_common_end(s);
return 0;
}
av_freep(&v->sr_rows[i >> 1][i & 1]);
av_freep(&v->hrd_rate);
av_freep(&v->hrd_buffer);
- MPV_common_end(&v->s);
+ ff_MPV_common_end(&v->s);
av_freep(&v->mv_type_mb_plane);
av_freep(&v->direct_mb_plane);
av_freep(&v->forward_mb_plane);
s->next_p_frame_damaged = 0;
}
- if (MPV_frame_start(s, avctx) < 0) {
+ if (ff_MPV_frame_start(s, avctx) < 0) {
goto err;
}
ff_er_frame_end(s);
}
- MPV_frame_end(s);
+ ff_MPV_frame_end(s);
if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
image:
static av_cold int wmv2_encode_init(AVCodecContext *avctx){
Wmv2Context * const w= avctx->priv_data;
- if(MPV_encode_init(avctx) < 0)
+ if(ff_MPV_encode_init(avctx) < 0)
return -1;
ff_wmv2_common_init(w);
.id = CODEC_ID_WMV2,
.priv_data_size = sizeof(Wmv2Context),
.init = wmv2_encode_init,
- .encode = MPV_encode_picture,
- .close = MPV_encode_end,
+ .encode = ff_MPV_encode_picture,
+ .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 8"),
};
#include "mpegvideo_mmx_template.c"
#endif
-void MPV_common_init_mmx(MpegEncContext *s)
+void ff_MPV_common_init_mmx(MpegEncContext *s)
{
int mm_flags = av_get_cpu_flags();