X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fmpegvideo_enc.c;h=cca2f7fe05ee520d74bad7b31e75c0f2a28294c1;hb=5e12701c5c881722f28379a15a60d5a20698b10f;hp=36be455f21f2cb4f3df84a10536a60ce318f495d;hpb=b2a9177017b426e782198a7b3f8ffd478bbfa323;p=ffmpeg diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c index 36be455f21f..cca2f7fe05e 100644 --- a/libavcodec/mpegvideo_enc.c +++ b/libavcodec/mpegvideo_enc.c @@ -1,6 +1,6 @@ /* * The simplest mpeg encoder (well, it was the simplest!) - * Copyright (c) 2000,2001 Fabrice Bellard. + * Copyright (c) 2000,2001 Fabrice Bellard * Copyright (c) 2002-2004 Michael Niedermayer * * 4MV & hq & B-frame encoding stuff by Michael Niedermayer @@ -23,7 +23,7 @@ */ /** - * @file mpegvideo_enc.c + * @file libavcodec/mpegvideo_enc.c * The simplest mpeg encoder (well, it was the simplest!). */ @@ -35,6 +35,7 @@ #include "msmpeg4.h" #include "h263.h" #include "faandct.h" +#include "aandcttab.h" #include //#undef NDEBUG @@ -49,18 +50,6 @@ static int sse_mb(MpegEncContext *s); //#define DEBUG -static const uint16_t aanscales[64] = { - /* precomputed values scaled up by 14 bits */ - 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520, - 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270, - 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906, - 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315, - 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520, - 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552, - 8867 , 12299, 11585, 10426, 8867, 6967, 4799, 2446, - 4520 , 6270, 5906, 5315, 4520, 3552, 2446, 1247 -}; - static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_MV*2+1]; static uint8_t default_fcode_tab[MAX_MV*2+1]; @@ -80,9 +69,9 @@ void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][6 for(i=0;i<64;i++) { const int j= dsp->idct_permutation[i]; /* 16 <= qscale * quant_matrix[i] <= 7905 */ - /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */ - /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */ - /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */ + /* 19952 <= ff_aanscales[i] * qscale * quant_matrix[i] <= 249205026 */ + /* (1 << 36) / 19952 >= (1 << 36) / (ff_aanscales[i] * qscale * quant_matrix[i]) >= (1 << 36) / 249205026 */ + /* 3444240 >= (1 << 36) / (ff_aanscales[i] * qscale * quant_matrix[i]) >= 275 */ qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j])); @@ -95,12 +84,12 @@ void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][6 for(i=0;i<64;i++) { const int j= dsp->idct_permutation[i]; /* 16 <= qscale * quant_matrix[i] <= 7905 */ - /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */ - /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */ - /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */ + /* 19952 <= ff_aanscales[i] * qscale * quant_matrix[i] <= 249205026 */ + /* (1 << 36) / 19952 >= (1 << 36) / (ff_aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */ + /* 3444240 >= (1 << 36) / (ff_aanscales[i] * qscale * quant_matrix[i]) >= 275 */ qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) / - (aanscales[i] * qscale * quant_matrix[j])); + (ff_aanscales[i] * qscale * quant_matrix[j])); } } else { for(i=0;i<64;i++) { @@ -126,7 +115,7 @@ void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][6 || dsp->fdct == ff_faandct #endif ) { - max= (8191LL*aanscales[i]) >> 14; + max = (8191LL*ff_aanscales[i]) >> 14; } while(((max * qmat[qscale][i]) >> shift) > INT_MAX){ shift++; @@ -229,7 +218,7 @@ static void MPV_encode_defaults(MpegEncContext *s){ } /* init video encoder */ -int MPV_encode_init(AVCodecContext *avctx) +av_cold int MPV_encode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; int i; @@ -246,7 +235,7 @@ int MPV_encode_init(AVCodecContext *avctx) break; case CODEC_ID_LJPEG: case CODEC_ID_MJPEG: - if(avctx->pix_fmt != PIX_FMT_YUVJ420P && avctx->pix_fmt != PIX_FMT_YUVJ422P && + if(avctx->pix_fmt != PIX_FMT_YUVJ420P && avctx->pix_fmt != PIX_FMT_YUVJ422P && avctx->pix_fmt != PIX_FMT_RGB32 && ((avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUV422P) || avctx->strict_std_compliance>FF_COMPLIANCE_INOFFICIAL)){ av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n"); return -1; @@ -403,7 +392,7 @@ int MPV_encode_init(AVCodecContext *avctx) return -1; } - if((s->flags & CODEC_FLAG_CBP_RD) && !(s->flags & CODEC_FLAG_TRELLIS_QUANT)){ + if((s->flags & CODEC_FLAG_CBP_RD) && !avctx->trellis){ av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n"); return -1; } @@ -414,7 +403,7 @@ int MPV_encode_init(AVCodecContext *avctx) } if(s->avctx->scenechange_threshold < 1000000000 && (s->flags & CODEC_FLAG_CLOSED_GOP)){ - av_log(avctx, AV_LOG_ERROR, "closed gop with scene change detection arent supported yet, set threshold to 1000000000\n"); + av_log(avctx, AV_LOG_ERROR, "closed gop with scene change detection are not supported yet, set threshold to 1000000000\n"); return -1; } @@ -424,8 +413,8 @@ int MPV_encode_init(AVCodecContext *avctx) } if(s->flags & CODEC_FLAG_LOW_DELAY){ - if (s->codec_id != CODEC_ID_MPEG2VIDEO && s->codec_id != CODEC_ID_MPEG1VIDEO){ - av_log(avctx, AV_LOG_ERROR, "low delay forcing is only available for mpeg1/2\n"); + if (s->codec_id != CODEC_ID_MPEG2VIDEO){ + av_log(avctx, AV_LOG_ERROR, "low delay forcing is only available for mpeg2\n"); return -1; } if (s->max_b_frames != 0){ @@ -475,7 +464,7 @@ int MPV_encode_init(AVCodecContext *avctx) avctx->b_frame_strategy = 0; } - i= ff_gcd(avctx->time_base.den, avctx->time_base.num); + i= av_gcd(avctx->time_base.den, avctx->time_base.num); if(i > 1){ av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n"); avctx->time_base.den /= i; @@ -529,14 +518,14 @@ int MPV_encode_init(AVCodecContext *avctx) s->mjpeg_hsample[0] = 2; s->mjpeg_hsample[1] = 2>>chroma_h_shift; s->mjpeg_hsample[2] = 2>>chroma_h_shift; - if (!(ENABLE_MJPEG_ENCODER || ENABLE_LJPEG_ENCODER) + if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) || ff_mjpeg_encode_init(s) < 0) return -1; avctx->delay=0; s->low_delay=1; break; case CODEC_ID_H261: - if (!ENABLE_H261_ENCODER) return -1; + if (!CONFIG_H261_ENCODER) return -1; if (ff_h261_get_picture_format(s->width, s->height) < 0) { av_log(avctx, AV_LOG_ERROR, "The specified picture size of %dx%d is not valid for the H.261 codec.\nValid sizes are 176x144, 352x288\n", s->width, s->height); return -1; @@ -546,7 +535,7 @@ int MPV_encode_init(AVCodecContext *avctx) s->low_delay=1; break; case CODEC_ID_H263: - if (!ENABLE_H263_ENCODER) return -1; + if (!CONFIG_H263_ENCODER) return -1; if (h263_get_picture_format(s->width, s->height) == 7) { av_log(avctx, AV_LOG_INFO, "The specified picture size of %dx%d is not valid for the H.263 codec.\nValid sizes are 128x96, 176x144, 352x288, 704x576, and 1408x1152. Try H.263+.\n", s->width, s->height); return -1; @@ -669,10 +658,10 @@ int MPV_encode_init(AVCodecContext *avctx) if(!s->denoise_dct) s->denoise_dct = denoise_dct_c; s->fast_dct_quantize = s->dct_quantize; - if(s->flags & CODEC_FLAG_TRELLIS_QUANT) + if(avctx->trellis) s->dct_quantize = dct_quantize_trellis_c; - if((ENABLE_H263P_ENCODER || ENABLE_RV20_ENCODER) && s->modified_quant) + if((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant) s->chroma_qscale_table= ff_h263_chroma_qscale_table; s->progressive_frame= s->progressive_sequence= !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN)); @@ -681,20 +670,20 @@ int MPV_encode_init(AVCodecContext *avctx) ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp); ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp); - if (ENABLE_H261_ENCODER && s->out_format == FMT_H261) + if (CONFIG_H261_ENCODER && s->out_format == FMT_H261) ff_h261_encode_init(s); - if (ENABLE_ANY_H263_ENCODER && s->out_format == FMT_H263) + if (CONFIG_ANY_H263_ENCODER && s->out_format == FMT_H263) h263_encode_init(s); - if (ENABLE_MSMPEG4_ENCODER && s->msmpeg4_version) + if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version) ff_msmpeg4_encode_init(s); - if ((ENABLE_MPEG1VIDEO_ENCODER || ENABLE_MPEG2VIDEO_ENCODER) + if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) && s->out_format == FMT_MPEG1) ff_mpeg1_encode_init(s); /* init q matrix */ for(i=0;i<64;i++) { int j= s->dsp.idct_permutation[i]; - if(ENABLE_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){ + if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){ s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i]; s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i]; }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){ @@ -726,14 +715,14 @@ int MPV_encode_init(AVCodecContext *avctx) return 0; } -int MPV_encode_end(AVCodecContext *avctx) +av_cold int MPV_encode_end(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; ff_rate_control_uninit(s); MPV_common_end(s); - if ((ENABLE_MJPEG_ENCODER || ENABLE_LJPEG_ENCODER) && s->out_format == FMT_MJPEG) + if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) && s->out_format == FMT_MJPEG) ff_mjpeg_encode_close(s); av_freep(&avctx->extradata); @@ -928,8 +917,8 @@ static int estimate_best_b_count(MpegEncContext *s){ assert(scale>=0 && scale <=3); // emms_c(); - p_lambda= s->last_lambda_for[P_TYPE]; //s->next_picture_ptr->quality; - b_lambda= s->last_lambda_for[B_TYPE]; //p_lambda *FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset; + p_lambda= s->last_lambda_for[FF_P_TYPE]; //s->next_picture_ptr->quality; + b_lambda= s->last_lambda_for[FF_B_TYPE]; //p_lambda *FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset; if(!b_lambda) b_lambda= p_lambda; //FIXME we should do this somewhere else lambda2= (b_lambda*b_lambda + (1<> FF_LAMBDA_SHIFT; @@ -984,7 +973,7 @@ static int estimate_best_b_count(MpegEncContext *s){ c->error[0]= c->error[1]= c->error[2]= 0; - input[0].pict_type= I_TYPE; + input[0].pict_type= FF_I_TYPE; input[0].quality= 1 * FF_QP2LAMBDA; out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[0]); // rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT; @@ -992,7 +981,7 @@ static int estimate_best_b_count(MpegEncContext *s){ for(i=0; imax_b_frames+1; i++){ int is_p= i % (j+1) == j || i==s->max_b_frames; - input[i+1].pict_type= is_p ? P_TYPE : B_TYPE; + input[i+1].pict_type= is_p ? FF_P_TYPE : FF_B_TYPE; input[i+1].quality= is_p ? p_lambda : b_lambda; out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[i+1]); rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3); @@ -1034,7 +1023,7 @@ static void select_input_picture(MpegEncContext *s){ if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){ if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){ s->reordered_input_picture[0]= s->input_picture[0]; - s->reordered_input_picture[0]->pict_type= I_TYPE; + s->reordered_input_picture[0]->pict_type= FF_I_TYPE; s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++; }else{ int b_frames; @@ -1069,7 +1058,7 @@ static void select_input_picture(MpegEncContext *s){ if(pict_num >= s->rc_context.num_entries) break; if(!s->input_picture[i]){ - s->rc_context.entry[pict_num-1].new_pict_type = P_TYPE; + s->rc_context.entry[pict_num-1].new_pict_type = FF_P_TYPE; break; } @@ -1113,10 +1102,10 @@ static void select_input_picture(MpegEncContext *s){ for(i= b_frames - 1; i>=0; i--){ int type= s->input_picture[i]->pict_type; - if(type && type != B_TYPE) + if(type && type != FF_B_TYPE) b_frames= i; } - if(s->input_picture[b_frames]->pict_type == B_TYPE && b_frames == s->max_b_frames){ + if(s->input_picture[b_frames]->pict_type == FF_B_TYPE && b_frames == s->max_b_frames){ av_log(s->avctx, AV_LOG_ERROR, "warning, too many b frames in a row\n"); } @@ -1126,31 +1115,31 @@ static void select_input_picture(MpegEncContext *s){ }else{ if(s->flags & CODEC_FLAG_CLOSED_GOP) b_frames=0; - s->input_picture[b_frames]->pict_type= I_TYPE; + s->input_picture[b_frames]->pict_type= FF_I_TYPE; } } if( (s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames - && s->input_picture[b_frames]->pict_type== I_TYPE) + && s->input_picture[b_frames]->pict_type== FF_I_TYPE) b_frames--; s->reordered_input_picture[0]= s->input_picture[b_frames]; - if(s->reordered_input_picture[0]->pict_type != I_TYPE) - s->reordered_input_picture[0]->pict_type= P_TYPE; + if(s->reordered_input_picture[0]->pict_type != FF_I_TYPE) + s->reordered_input_picture[0]->pict_type= FF_P_TYPE; s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++; for(i=0; ireordered_input_picture[i+1]= s->input_picture[i]; - s->reordered_input_picture[i+1]->pict_type= B_TYPE; + s->reordered_input_picture[i+1]->pict_type= FF_B_TYPE; s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++; } } } no_output_pic: if(s->reordered_input_picture[0]){ - s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0; + s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=FF_B_TYPE ? 3 : 0; - copy_picture(&s->new_picture, s->reordered_input_picture[0]); + ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]); if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED || s->avctx->rc_buffer_size){ // input is a shared pix, so we can't modifiy it -> alloc a new one & ensure that the shared one is reuseable @@ -1182,7 +1171,7 @@ no_output_pic: s->new_picture.data[i]+= INPLACE_OFFSET; } } - copy_picture(&s->current_picture, s->current_picture_ptr); + ff_copy_picture(&s->current_picture, s->current_picture_ptr); s->picture_number= s->new_picture.display_picture_number; //printf("dpn:%d\n", s->picture_number); @@ -1237,12 +1226,12 @@ vbv_retry: MPV_frame_end(s); - if (ENABLE_MJPEG_ENCODER && s->out_format == FMT_MJPEG) + if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG) ff_mjpeg_encode_picture_trailer(s); if(avctx->rc_buffer_size){ RateControlContext *rcc= &s->rc_context; - int max_size= rcc->buffer_index/3; + int max_size= rcc->buffer_index * avctx->rc_max_available_vbv_use; if(put_bits_count(&s->pb) > max_size && s->lambda < s->avctx->lmax){ s->next_lambda= FFMAX(s->lambda+1, s->lambda*(s->qscale+1) / s->qscale); @@ -1252,11 +1241,11 @@ vbv_retry: s->lambda_table[i]= FFMAX(s->lambda_table[i]+1, s->lambda_table[i]*(s->qscale+1) / s->qscale); } s->mb_skipped = 0; //done in MPV_frame_start() - if(s->pict_type==P_TYPE){ //done in encode_picture() so we must undo it + if(s->pict_type==FF_P_TYPE){ //done in encode_picture() so we must undo it if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4) s->no_rounding ^= 1; } - if(s->pict_type!=B_TYPE){ + if(s->pict_type!=FF_B_TYPE){ s->time_base= s->last_time_base; s->last_non_b_time= s->time - s->pp_time; } @@ -1332,7 +1321,7 @@ vbv_retry: s->total_bits += s->frame_bits; avctx->frame_bits = s->frame_bits; }else{ - assert((pbBufPtr(&s->pb) == s->pb.buf)); + assert((put_bits_ptr(&s->pb) == s->pb.buf)); s->frame_bits=0; } assert((s->frame_bits&7)==0); @@ -1364,7 +1353,7 @@ static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int th }else skip_dc=1; - /* are all which we could set to zero are allready zero? */ + /* Are all we could set to zero already zero? */ if(last_index<=skip_dc - 1) return; for(i=0; i<=last_index; i++){ @@ -1473,7 +1462,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, if(s->codec_id==CODEC_ID_MPEG4){ if(!s->mb_intra){ - if(s->pict_type == B_TYPE){ + if(s->pict_type == FF_B_TYPE){ if(s->dquant&1 || s->mv_dir&MV_DIRECT) s->dquant= 0; } @@ -1550,7 +1539,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, dest_cb = s->dest[1]; dest_cr = s->dest[2]; - if ((!s->no_rounding) || s->pict_type==B_TYPE){ + if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){ op_pix = s->dsp.put_pixels_tab; op_qpix= s->dsp.put_qpel_pixels_tab; }else{ @@ -1698,25 +1687,25 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, switch(s->codec_id){ //FIXME funct ptr could be slightly faster case CODEC_ID_MPEG1VIDEO: case CODEC_ID_MPEG2VIDEO: - if (ENABLE_MPEG1VIDEO_ENCODER || ENABLE_MPEG2VIDEO_ENCODER) + if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) mpeg1_encode_mb(s, s->block, motion_x, motion_y); break; case CODEC_ID_MPEG4: - if (ENABLE_MPEG4_ENCODER) + if (CONFIG_MPEG4_ENCODER) mpeg4_encode_mb(s, s->block, motion_x, motion_y); break; case CODEC_ID_MSMPEG4V2: case CODEC_ID_MSMPEG4V3: case CODEC_ID_WMV1: - if (ENABLE_MSMPEG4_ENCODER) + if (CONFIG_MSMPEG4_ENCODER) msmpeg4_encode_mb(s, s->block, motion_x, motion_y); break; case CODEC_ID_WMV2: - if (ENABLE_WMV2_ENCODER) + if (CONFIG_WMV2_ENCODER) ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); break; case CODEC_ID_H261: - if (ENABLE_H261_ENCODER) + if (CONFIG_H261_ENCODER) ff_h261_encode_mb(s, s->block, motion_x, motion_y); break; case CODEC_ID_H263: @@ -1724,12 +1713,12 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, case CODEC_ID_FLV1: case CODEC_ID_RV10: case CODEC_ID_RV20: - if (ENABLE_H263_ENCODER || ENABLE_H263P_ENCODER || - ENABLE_FLV_ENCODER || ENABLE_RV10_ENCODER || ENABLE_RV20_ENCODER) + if (CONFIG_H263_ENCODER || CONFIG_H263P_ENCODER || + CONFIG_FLV_ENCODER || CONFIG_RV10_ENCODER || CONFIG_RV20_ENCODER) h263_encode_mb(s, s->block, motion_x, motion_y); break; case CODEC_ID_MJPEG: - if (ENABLE_MJPEG_ENCODER) + if (CONFIG_MJPEG_ENCODER) ff_mjpeg_encode_mb(s, s->block); break; default: @@ -1906,7 +1895,7 @@ static int sse_mb(MpegEncContext *s){ } static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){ - MpegEncContext *s= arg; + MpegEncContext *s= *(void**)arg; s->me.pre_pass=1; @@ -1925,7 +1914,7 @@ static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){ } static int estimate_motion_thread(AVCodecContext *c, void *arg){ - MpegEncContext *s= arg; + MpegEncContext *s= *(void**)arg; ff_check_alignment(); @@ -1941,7 +1930,7 @@ static int estimate_motion_thread(AVCodecContext *c, void *arg){ s->block_index[3]+=2; /* compute motion vector & mb_type and store in context */ - if(s->pict_type==B_TYPE) + if(s->pict_type==FF_B_TYPE) ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y); else ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y); @@ -1952,7 +1941,7 @@ static int estimate_motion_thread(AVCodecContext *c, void *arg){ } static int mb_var_thread(AVCodecContext *c, void *arg){ - MpegEncContext *s= arg; + MpegEncContext *s= *(void**)arg; int mb_x, mb_y; ff_check_alignment(); @@ -1976,13 +1965,13 @@ static int mb_var_thread(AVCodecContext *c, void *arg){ } static void write_slice_end(MpegEncContext *s){ - if(ENABLE_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4){ + if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4){ if(s->partitioned_frame){ ff_mpeg4_merge_partitions(s); } ff_mpeg4_stuffing(&s->pb); - }else if(ENABLE_MJPEG_ENCODER && s->out_format == FMT_MJPEG){ + }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){ ff_mjpeg_encode_stuffing(&s->pb); } @@ -1994,8 +1983,9 @@ static void write_slice_end(MpegEncContext *s){ } static int encode_thread(AVCodecContext *c, void *arg){ - MpegEncContext *s= arg; + MpegEncContext *s= *(void**)arg; int mb_x, mb_y, pdif = 0; + int chr_h= 16>>s->chroma_y_shift; int i, j; MpegEncContext best_s, backup_s; uint8_t bit_buf[2][MAX_MB_BYTES]; @@ -2038,11 +2028,11 @@ static int encode_thread(AVCodecContext *c, void *arg){ case CODEC_ID_H263: case CODEC_ID_H263P: case CODEC_ID_FLV1: - if (ENABLE_H263_ENCODER || ENABLE_H263P_ENCODER || ENABLE_FLV_ENCODER) + if (CONFIG_H263_ENCODER || CONFIG_H263P_ENCODER || CONFIG_FLV_ENCODER) s->gob_index = ff_h263_get_gob_height(s); break; case CODEC_ID_MPEG4: - if(ENABLE_MPEG4_ENCODER && s->partitioned_frame) + if(CONFIG_MPEG4_ENCODER && s->partitioned_frame) ff_mpeg4_init_partitions(s); break; } @@ -2082,7 +2072,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ s->mb_y = mb_y; // moved into loop, can get changed by H.261 ff_update_block_index(s); - if(ENABLE_H261_ENCODER && s->codec_id == CODEC_ID_H261){ + if(CONFIG_H261_ENCODER && s->codec_id == CODEC_ID_H261){ ff_h261_reorder_mb_index(s); xy= s->mb_y*s->mb_stride + s->mb_x; mb_type= s->mb_type[xy]; @@ -2115,13 +2105,13 @@ static int encode_thread(AVCodecContext *c, void *arg){ if(s->start_mb_y != mb_y || mb_x!=0){ write_slice_end(s); - if(ENABLE_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame){ + if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame){ ff_mpeg4_init_partitions(s); } } assert((put_bits_count(&s->pb)&7) == 0); - current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob; + current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob; if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){ int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y; @@ -2131,7 +2121,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ #ifndef ALT_BITSTREAM_WRITER s->pb.buf_ptr= s->ptr_lastgob; #endif - assert(pbBufPtr(&s->pb) == s->ptr_lastgob); + assert(put_bits_ptr(&s->pb) == s->ptr_lastgob); } } @@ -2142,21 +2132,21 @@ static int encode_thread(AVCodecContext *c, void *arg){ switch(s->codec_id){ case CODEC_ID_MPEG4: - if (ENABLE_MPEG4_ENCODER) { + if (CONFIG_MPEG4_ENCODER) { ff_mpeg4_encode_video_packet_header(s); ff_mpeg4_clean_buffers(s); } break; case CODEC_ID_MPEG1VIDEO: case CODEC_ID_MPEG2VIDEO: - if (ENABLE_MPEG1VIDEO_ENCODER || ENABLE_MPEG2VIDEO_ENCODER) { + if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) { ff_mpeg1_encode_slice_header(s); ff_mpeg1_clean_buffers(s); } break; case CODEC_ID_H263: case CODEC_ID_H263P: - if (ENABLE_H263_ENCODER || ENABLE_H263P_ENCODER) + if (CONFIG_H263_ENCODER || CONFIG_H263P_ENCODER) h263_encode_gob_header(s, mb_y); break; } @@ -2338,7 +2328,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ s->mv[1][0][0] = best_s.mv[1][0][0]; s->mv[1][0][1] = best_s.mv[1][0][1]; - qpi = s->pict_type == B_TYPE ? 2 : 0; + qpi = s->pict_type == FF_B_TYPE ? 2 : 0; for(; qpi<4; qpi++){ int dquant= dquant_tab[qpi]; qp= last_qp + dquant; @@ -2365,7 +2355,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ } } } - if(ENABLE_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){ + if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){ int mx= s->b_direct_mv_table[xy][0]; int my= s->b_direct_mv_table[xy][1]; @@ -2376,7 +2366,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb, &dmin, &next_block, mx, my); } - if(ENABLE_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){ + if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){ backup_s.dquant = 0; s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT; s->mb_intra= 0; @@ -2391,7 +2381,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ if(coded){ int mx,my; memcpy(s->mv, best_s.mv, sizeof(s->mv)); - if(ENABLE_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){ + if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){ mx=my=0; //FIXME find the one we actually used ff_mpeg4_set_direct_mv(s, mx, my); }else if(best_s.mv_dir&MV_DIR_BACKWARD){ @@ -2439,8 +2429,8 @@ static int encode_thread(AVCodecContext *c, void *arg){ } s->last_bits= put_bits_count(&s->pb); - if (ENABLE_ANY_H263_ENCODER && - s->out_format == FMT_H263 && s->pict_type!=B_TYPE) + if (CONFIG_ANY_H263_ENCODER && + s->out_format == FMT_H263 && s->pict_type!=FF_B_TYPE) ff_h263_update_motion_val(s); if(next_block==0){ //FIXME 16 vs linesize16 @@ -2489,7 +2479,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ } break; case CANDIDATE_MB_TYPE_DIRECT: - if (ENABLE_MPEG4_ENCODER) { + if (CONFIG_MPEG4_ENCODER) { s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT; s->mb_intra= 0; motion_x=s->b_direct_mv_table[xy][0]; @@ -2498,7 +2488,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ } break; case CANDIDATE_MB_TYPE_DIRECT0: - if (ENABLE_MPEG4_ENCODER) { + if (CONFIG_MPEG4_ENCODER) { s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT; s->mb_intra= 0; ff_mpeg4_set_direct_mv(s, 0, 0); @@ -2566,8 +2556,8 @@ static int encode_thread(AVCodecContext *c, void *arg){ // RAL: Update last macroblock type s->last_mv_dir = s->mv_dir; - if (ENABLE_ANY_H263_ENCODER && - s->out_format == FMT_H263 && s->pict_type!=B_TYPE) + if (CONFIG_ANY_H263_ENCODER && + s->out_format == FMT_H263 && s->pict_type!=FF_B_TYPE) ff_h263_update_motion_val(s); MPV_decode_mb(s, s->block); @@ -2590,14 +2580,14 @@ static int encode_thread(AVCodecContext *c, void *arg){ s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize); s->current_picture.error[1] += sse( - s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8, - s->dest[1], w>>1, h>>1, s->uvlinesize); + s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h, + s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize); s->current_picture.error[2] += sse( - s, s->new_picture .data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8, - s->dest[2], w>>1, h>>1, s->uvlinesize); + s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h, + s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize); } if(s->loop_filter){ - if(ENABLE_ANY_H263_ENCODER && s->out_format == FMT_H263) + if(CONFIG_ANY_H263_ENCODER && s->out_format == FMT_H263) ff_h263_loop_filter(s); } //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, put_bits_count(&s->pb)); @@ -2605,7 +2595,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ } //not beautiful here but we must write it before flushing so it has to be here - if (ENABLE_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE) + if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == FF_I_TYPE) msmpeg4_encode_ext_header(s); write_slice_end(s); @@ -2613,7 +2603,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ /* Send the last GOB if RTP */ if (s->avctx->rtp_callback) { int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x; - pdif = pbBufPtr(&s->pb) - s->ptr_lastgob; + pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob; /* Call the RTP callback to send the last GOB */ emms_c(); s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb); @@ -2676,13 +2666,13 @@ static int estimate_qp(MpegEncContext *s, int dry_run){ if(s->adaptive_quant){ switch(s->codec_id){ case CODEC_ID_MPEG4: - if (ENABLE_MPEG4_ENCODER) + if (CONFIG_MPEG4_ENCODER) ff_clean_mpeg4_qscales(s); break; case CODEC_ID_H263: case CODEC_ID_H263P: case CODEC_ID_FLV1: - if (ENABLE_H263_ENCODER||ENABLE_H263P_ENCODER||ENABLE_FLV_ENCODER) + if (CONFIG_H263_ENCODER||CONFIG_H263P_ENCODER||CONFIG_FLV_ENCODER) ff_clean_h263_qscales(s); break; } @@ -2701,7 +2691,7 @@ static void set_frame_distances(MpegEncContext * s){ assert(s->current_picture_ptr->pts != AV_NOPTS_VALUE); s->time= s->current_picture_ptr->pts*s->avctx->time_base.num; - if(s->pict_type==B_TYPE){ + if(s->pict_type==FF_B_TYPE){ s->pb_time= s->pp_time - (s->last_non_b_time - s->time); assert(s->pb_time > 0 && s->pb_time < s->pp_time); }else{ @@ -2726,17 +2716,17 @@ static int encode_picture(MpegEncContext *s, int picture_number) // RAL: Condition added for MPEG1VIDEO if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->h263_msmpeg4)) set_frame_distances(s); - if(ENABLE_MPEG4_ENCODER && s->codec_id == CODEC_ID_MPEG4) + if(CONFIG_MPEG4_ENCODER && s->codec_id == CODEC_ID_MPEG4) ff_set_mpeg4_time(s); s->me.scene_change_score=0; -// s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME ratedistoration +// s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion - if(s->pict_type==I_TYPE){ + if(s->pict_type==FF_I_TYPE){ if(s->msmpeg4_version >= 3) s->no_rounding=1; else s->no_rounding=0; - }else if(s->pict_type!=B_TYPE){ + }else if(s->pict_type!=FF_B_TYPE){ if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4) s->no_rounding ^= 1; } @@ -2746,7 +2736,7 @@ static int encode_picture(MpegEncContext *s, int picture_number) return -1; ff_get_2pass_fcode(s); }else if(!(s->flags & CODEC_FLAG_QSCALE)){ - if(s->pict_type==B_TYPE) + if(s->pict_type==FF_B_TYPE) s->lambda= s->last_lambda_for[s->pict_type]; else s->lambda= s->last_lambda_for[s->last_non_b_pict_type]; @@ -2758,27 +2748,28 @@ static int encode_picture(MpegEncContext *s, int picture_number) ff_update_duplicate_context(s->thread_context[i], s); } - ff_init_me(s); + if(ff_init_me(s)<0) + return -1; /* Estimate motion for every MB */ - if(s->pict_type != I_TYPE){ + if(s->pict_type != FF_I_TYPE){ s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8; s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8; - if(s->pict_type != B_TYPE && s->avctx->me_threshold==0){ - if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){ - s->avctx->execute(s->avctx, pre_estimate_motion_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count); + if(s->pict_type != FF_B_TYPE && s->avctx->me_threshold==0){ + if((s->avctx->pre_me && s->last_non_b_pict_type==FF_I_TYPE) || s->avctx->pre_me==2){ + s->avctx->execute(s->avctx, pre_estimate_motion_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count, sizeof(void*)); } } - s->avctx->execute(s->avctx, estimate_motion_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count); - }else /* if(s->pict_type == I_TYPE) */{ + s->avctx->execute(s->avctx, estimate_motion_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count, sizeof(void*)); + }else /* if(s->pict_type == FF_I_TYPE) */{ /* I-Frame */ for(i=0; imb_stride*s->mb_height; i++) s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA; if(!s->fixed_qscale){ /* finding spatial complexity for I-frame rate control */ - s->avctx->execute(s->avctx, mb_var_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count); + s->avctx->execute(s->avctx, mb_var_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count, sizeof(void*)); } } for(i=1; iavctx->thread_count; i++){ @@ -2788,22 +2779,22 @@ static int encode_picture(MpegEncContext *s, int picture_number) s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp; emms_c(); - if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == P_TYPE){ - s->pict_type= I_TYPE; + if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == FF_P_TYPE){ + s->pict_type= FF_I_TYPE; for(i=0; imb_stride*s->mb_height; i++) s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA; //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum); } if(!s->umvplus){ - if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) { + if(s->pict_type==FF_P_TYPE || s->pict_type==FF_S_TYPE) { s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER); if(s->flags & CODEC_FLAG_INTERLACED_ME){ int a,b; a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I); - s->f_code= FFMAX(s->f_code, FFMAX(a,b)); + s->f_code= FFMAX3(s->f_code, a, b); } ff_fix_long_p_mvs(s); @@ -2818,7 +2809,7 @@ static int encode_picture(MpegEncContext *s, int picture_number) } } - if(s->pict_type==B_TYPE){ + if(s->pict_type==FF_B_TYPE){ int a, b; a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD); @@ -2852,7 +2843,7 @@ static int encode_picture(MpegEncContext *s, int picture_number) if (estimate_qp(s, 0) < 0) return -1; - if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==I_TYPE && !(s->flags & CODEC_FLAG_QSCALE)) + if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==FF_I_TYPE && !(s->flags & CODEC_FLAG_QSCALE)) s->qscale= 3; //reduce clipping problems if (s->out_format == FMT_MJPEG) { @@ -2870,7 +2861,7 @@ static int encode_picture(MpegEncContext *s, int picture_number) //FIXME var duplication s->current_picture_ptr->key_frame= - s->current_picture.key_frame= s->pict_type == I_TYPE; //FIXME pic_ptr + s->current_picture.key_frame= s->pict_type == FF_I_TYPE; //FIXME pic_ptr s->current_picture_ptr->pict_type= s->current_picture.pict_type= s->pict_type; @@ -2880,31 +2871,31 @@ static int encode_picture(MpegEncContext *s, int picture_number) s->last_bits= put_bits_count(&s->pb); switch(s->out_format) { case FMT_MJPEG: - if (ENABLE_MJPEG_ENCODER) + if (CONFIG_MJPEG_ENCODER) ff_mjpeg_encode_picture_header(s); break; case FMT_H261: - if (ENABLE_H261_ENCODER) + if (CONFIG_H261_ENCODER) ff_h261_encode_picture_header(s, picture_number); break; case FMT_H263: - if (ENABLE_WMV2_ENCODER && s->codec_id == CODEC_ID_WMV2) + if (CONFIG_WMV2_ENCODER && s->codec_id == CODEC_ID_WMV2) ff_wmv2_encode_picture_header(s, picture_number); - else if (ENABLE_MSMPEG4_ENCODER && s->h263_msmpeg4) + else if (CONFIG_MSMPEG4_ENCODER && s->h263_msmpeg4) msmpeg4_encode_picture_header(s, picture_number); - else if (ENABLE_MPEG4_ENCODER && s->h263_pred) + else if (CONFIG_MPEG4_ENCODER && s->h263_pred) mpeg4_encode_picture_header(s, picture_number); - else if (ENABLE_RV10_ENCODER && s->codec_id == CODEC_ID_RV10) + else if (CONFIG_RV10_ENCODER && s->codec_id == CODEC_ID_RV10) rv10_encode_picture_header(s, picture_number); - else if (ENABLE_RV20_ENCODER && s->codec_id == CODEC_ID_RV20) + else if (CONFIG_RV20_ENCODER && s->codec_id == CODEC_ID_RV20) rv20_encode_picture_header(s, picture_number); - else if (ENABLE_FLV_ENCODER && s->codec_id == CODEC_ID_FLV1) + else if (CONFIG_FLV_ENCODER && s->codec_id == CODEC_ID_FLV1) ff_flv_encode_picture_header(s, picture_number); - else if (ENABLE_ANY_H263_ENCODER) + else if (CONFIG_ANY_H263_ENCODER) h263_encode_picture_header(s, picture_number); break; case FMT_MPEG1: - if (ENABLE_MPEG1VIDEO_ENCODER || ENABLE_MPEG2VIDEO_ENCODER) + if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) mpeg1_encode_picture_header(s, picture_number); break; case FMT_H264: @@ -2918,7 +2909,7 @@ static int encode_picture(MpegEncContext *s, int picture_number) for(i=1; iavctx->thread_count; i++){ update_duplicate_context_after_me(s->thread_context[i], s); } - s->avctx->execute(s->avctx, encode_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count); + s->avctx->execute(s->avctx, encode_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count, sizeof(void*)); for(i=1; iavctx->thread_count; i++){ merge_context_after_encode(s, s->thread_context[i]); } @@ -3067,12 +3058,20 @@ int dct_quantize_trellis_c(MpegEncContext *s, survivor_count= 1; for(i=start_i; i<=last_non_zero; i++){ - int level_index, j; - const int dct_coeff= FFABS(block[ scantable[i] ]); - const int zero_distoration= dct_coeff*dct_coeff; + int level_index, j, zero_distortion; + int dct_coeff= FFABS(block[ scantable[i] ]); int best_score=256*256*256*120; + + if ( s->dsp.fdct == fdct_ifast +#ifndef FAAN_POSTSCALE + || s->dsp.fdct == ff_faandct +#endif + ) + dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12; + zero_distortion= dct_coeff*dct_coeff; + for(level_index=0; level_index < coeff_count[i]; level_index++){ - int distoration; + int distortion; int level= coeff[level_index][i]; const int alevel= FFABS(level); int unquant_coeff; @@ -3093,12 +3092,12 @@ int dct_quantize_trellis_c(MpegEncContext *s, unquant_coeff<<= 3; } - distoration= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distoration; + distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion; level+=64; if((level&(~127)) == 0){ for(j=survivor_count-1; j>=0; j--){ int run= i - survivor[j]; - int score= distoration + length[UNI_AC_ENC_INDEX(run, level)]*lambda; + int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda; score += score_tab[i-run]; if(score < best_score){ @@ -3111,7 +3110,7 @@ int dct_quantize_trellis_c(MpegEncContext *s, if(s->out_format == FMT_H263){ for(j=survivor_count-1; j>=0; j--){ int run= i - survivor[j]; - int score= distoration + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda; + int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda; score += score_tab[i-run]; if(score < last_score){ last_score= score; @@ -3122,10 +3121,10 @@ int dct_quantize_trellis_c(MpegEncContext *s, } } }else{ - distoration += esc_length*lambda; + distortion += esc_length*lambda; for(j=survivor_count-1; j>=0; j--){ int run= i - survivor[j]; - int score= distoration + score_tab[i-run]; + int score= distortion + score_tab[i-run]; if(score < best_score){ best_score= score; @@ -3137,7 +3136,7 @@ int dct_quantize_trellis_c(MpegEncContext *s, if(s->out_format == FMT_H263){ for(j=survivor_count-1; j>=0; j--){ int run= i - survivor[j]; - int score= distoration + score_tab[i-run]; + int score= distortion + score_tab[i-run]; if(score < last_score){ last_score= score; last_run= run; @@ -3724,7 +3723,8 @@ AVCodec h263_encoder = { MPV_encode_init, MPV_encode_picture, MPV_encode_end, - .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1}, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, + .long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"), }; AVCodec h263p_encoder = { @@ -3735,7 +3735,8 @@ AVCodec h263p_encoder = { MPV_encode_init, MPV_encode_picture, MPV_encode_end, - .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1}, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, + .long_name= NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"), }; AVCodec flv_encoder = { @@ -3746,7 +3747,8 @@ AVCodec flv_encoder = { MPV_encode_init, MPV_encode_picture, MPV_encode_end, - .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1}, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, + .long_name= NULL_IF_CONFIG_SMALL("Flash Video (FLV)"), }; AVCodec rv10_encoder = { @@ -3757,7 +3759,8 @@ AVCodec rv10_encoder = { MPV_encode_init, MPV_encode_picture, MPV_encode_end, - .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1}, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, + .long_name= NULL_IF_CONFIG_SMALL("RealVideo 1.0"), }; AVCodec rv20_encoder = { @@ -3768,7 +3771,8 @@ AVCodec rv20_encoder = { MPV_encode_init, MPV_encode_picture, MPV_encode_end, - .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1}, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, + .long_name= NULL_IF_CONFIG_SMALL("RealVideo 2.0"), }; AVCodec mpeg4_encoder = { @@ -3779,8 +3783,9 @@ AVCodec mpeg4_encoder = { MPV_encode_init, MPV_encode_picture, MPV_encode_end, - .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1}, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .capabilities= CODEC_CAP_DELAY, + .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"), }; AVCodec msmpeg4v1_encoder = { @@ -3791,7 +3796,8 @@ AVCodec msmpeg4v1_encoder = { MPV_encode_init, MPV_encode_picture, MPV_encode_end, - .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1}, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, + .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 1"), }; AVCodec msmpeg4v2_encoder = { @@ -3802,7 +3808,8 @@ AVCodec msmpeg4v2_encoder = { MPV_encode_init, MPV_encode_picture, MPV_encode_end, - .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1}, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, + .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"), }; AVCodec msmpeg4v3_encoder = { @@ -3813,7 +3820,8 @@ AVCodec msmpeg4v3_encoder = { MPV_encode_init, MPV_encode_picture, MPV_encode_end, - .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1}, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, + .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"), }; AVCodec wmv1_encoder = { @@ -3824,5 +3832,6 @@ AVCodec wmv1_encoder = { MPV_encode_init, MPV_encode_picture, MPV_encode_end, - .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1}, + .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, + .long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 7"), };