]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/mpegvideo_enc.c
cosmetics in dsp init
[ffmpeg] / libavcodec / mpegvideo_enc.c
index 1f630d48c8d4375c252b7facc8ccdddc2e772749..f7600beb3ccd9fd09da0ea09198f6bcc9cb36961 100644 (file)
@@ -61,12 +61,21 @@ static const uint16_t aanscales[64] = {
     4520 ,  6270,  5906,  5315,  4520,  3552,  2446,  1247
 };
 
+static const uint16_t inv_aanscales[64] = {
+  4096,  2953,  3135,  3483,  4096,  5213,  7568, 14846,
+  2953,  2129,  2260,  2511,  2953,  3759,  5457, 10703,
+  3135,  2260,  2399,  2666,  3135,  3990,  5793, 11363,
+  3483,  2511,  2666,  2962,  3483,  4433,  6436, 12625,
+  4096,  2953,  3135,  3483,  4096,  5213,  7568, 14846,
+  5213,  3759,  3990,  4433,  5213,  6635,  9633, 18895,
+  7568,  5457,  5793,  6436,  7568,  9633, 13985, 27432,
+ 14846, 10703, 11363, 12625, 14846, 18895, 27432, 53809,
+};
+
 static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_MV*2+1];
 static uint8_t default_fcode_tab[MAX_MV*2+1];
 
-enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1};
-
-static void convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64],
+void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64],
                            const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
 {
     int qscale;
@@ -231,7 +240,7 @@ static void MPV_encode_defaults(MpegEncContext *s){
 }
 
 /* init video encoder */
-int MPV_encode_init(AVCodecContext *avctx)
+av_cold int MPV_encode_init(AVCodecContext *avctx)
 {
     MpegEncContext *s = avctx->priv_data;
     int i;
@@ -323,6 +332,11 @@ int MPV_encode_init(AVCodecContext *avctx)
     s->intra_vlc_format= !!(s->flags2 & CODEC_FLAG2_INTRA_VLC);
     s->q_scale_type= !!(s->flags2 & CODEC_FLAG2_NON_LINEAR_QUANT);
 
+#if LIBAVCODEC_VERSION_INT < ((52<<16)+(0<<8)+0)
+    if (s->flags & CODEC_FLAG_TRELLIS_QUANT)
+        avctx->trellis = 1;
+#endif
+
     if(avctx->rc_max_rate && !avctx->rc_buffer_size){
         av_log(avctx, AV_LOG_ERROR, "a vbv buffer size is needed, for encoding with a maximum bitrate\n");
         return -1;
@@ -342,6 +356,10 @@ int MPV_encode_init(AVCodecContext *avctx)
         return -1;
     }
 
+    if(avctx->rc_max_rate && avctx->rc_max_rate == avctx->bit_rate && avctx->rc_max_rate != avctx->rc_min_rate){
+        av_log(avctx, AV_LOG_INFO, "impossible bitrate constraints, this will fail\n");
+    }
+
     if(avctx->rc_buffer_size && avctx->bit_rate*av_q2d(avctx->time_base) > avctx->rc_buffer_size){
         av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
         return -1;
@@ -401,7 +419,7 @@ int MPV_encode_init(AVCodecContext *avctx)
         return -1;
     }
 
-    if((s->flags & CODEC_FLAG_CBP_RD) && !(s->flags & CODEC_FLAG_TRELLIS_QUANT)){
+    if((s->flags & CODEC_FLAG_CBP_RD) && !avctx->trellis){
         av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
         return -1;
     }
@@ -412,7 +430,7 @@ int MPV_encode_init(AVCodecContext *avctx)
     }
 
     if(s->avctx->scenechange_threshold < 1000000000 && (s->flags & CODEC_FLAG_CLOSED_GOP)){
-        av_log(avctx, AV_LOG_ERROR, "closed gop with scene change detection arent supported yet, set threshold to 1000000000\n");
+        av_log(avctx, AV_LOG_ERROR, "closed gop with scene change detection are not supported yet, set threshold to 1000000000\n");
         return -1;
     }
 
@@ -422,8 +440,8 @@ int MPV_encode_init(AVCodecContext *avctx)
     }
 
     if(s->flags & CODEC_FLAG_LOW_DELAY){
-        if (s->codec_id != CODEC_ID_MPEG2VIDEO && s->codec_id != CODEC_ID_MPEG1VIDEO){
-            av_log(avctx, AV_LOG_ERROR, "low delay forcing is only available for mpeg1/2\n");
+        if (s->codec_id != CODEC_ID_MPEG2VIDEO){
+            av_log(avctx, AV_LOG_ERROR, "low delay forcing is only available for mpeg2\n");
             return -1;
         }
         if (s->max_b_frames != 0){
@@ -667,7 +685,7 @@ int MPV_encode_init(AVCodecContext *avctx)
     if(!s->denoise_dct)
         s->denoise_dct = denoise_dct_c;
     s->fast_dct_quantize = s->dct_quantize;
-    if(s->flags & CODEC_FLAG_TRELLIS_QUANT)
+    if(avctx->trellis)
         s->dct_quantize = dct_quantize_trellis_c;
 
     if((ENABLE_H263P_ENCODER || ENABLE_RV20_ENCODER) && s->modified_quant)
@@ -712,9 +730,9 @@ int MPV_encode_init(AVCodecContext *avctx)
     /* precompute matrix */
     /* for mjpeg, we do include qscale in the matrix */
     if (s->out_format != FMT_MJPEG) {
-        convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
+        ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
                        s->intra_matrix, s->intra_quant_bias, avctx->qmin, 31, 1);
-        convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
+        ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
                        s->inter_matrix, s->inter_quant_bias, avctx->qmin, 31, 0);
     }
 
@@ -724,7 +742,7 @@ int MPV_encode_init(AVCodecContext *avctx)
     return 0;
 }
 
-int MPV_encode_end(AVCodecContext *avctx)
+av_cold int MPV_encode_end(AVCodecContext *avctx)
 {
     MpegEncContext *s = avctx->priv_data;
 
@@ -926,8 +944,8 @@ static int estimate_best_b_count(MpegEncContext *s){
     assert(scale>=0 && scale <=3);
 
 //    emms_c();
-    p_lambda= s->last_lambda_for[P_TYPE]; //s->next_picture_ptr->quality;
-    b_lambda= s->last_lambda_for[B_TYPE]; //p_lambda *FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
+    p_lambda= s->last_lambda_for[FF_P_TYPE]; //s->next_picture_ptr->quality;
+    b_lambda= s->last_lambda_for[FF_B_TYPE]; //p_lambda *FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
     if(!b_lambda) b_lambda= p_lambda; //FIXME we should do this somewhere else
     lambda2= (b_lambda*b_lambda + (1<<FF_LAMBDA_SHIFT)/2 ) >> FF_LAMBDA_SHIFT;
 
@@ -982,7 +1000,7 @@ static int estimate_best_b_count(MpegEncContext *s){
 
         c->error[0]= c->error[1]= c->error[2]= 0;
 
-        input[0].pict_type= I_TYPE;
+        input[0].pict_type= FF_I_TYPE;
         input[0].quality= 1 * FF_QP2LAMBDA;
         out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[0]);
 //        rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
@@ -990,7 +1008,7 @@ static int estimate_best_b_count(MpegEncContext *s){
         for(i=0; i<s->max_b_frames+1; i++){
             int is_p= i % (j+1) == j || i==s->max_b_frames;
 
-            input[i+1].pict_type= is_p ? P_TYPE : B_TYPE;
+            input[i+1].pict_type= is_p ? FF_P_TYPE : FF_B_TYPE;
             input[i+1].quality= is_p ? p_lambda : b_lambda;
             out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[i+1]);
             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
@@ -1032,7 +1050,7 @@ static void select_input_picture(MpegEncContext *s){
     if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
         if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
             s->reordered_input_picture[0]= s->input_picture[0];
-            s->reordered_input_picture[0]->pict_type= I_TYPE;
+            s->reordered_input_picture[0]->pict_type= FF_I_TYPE;
             s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
         }else{
             int b_frames;
@@ -1067,7 +1085,7 @@ static void select_input_picture(MpegEncContext *s){
                     if(pict_num >= s->rc_context.num_entries)
                         break;
                     if(!s->input_picture[i]){
-                        s->rc_context.entry[pict_num-1].new_pict_type = P_TYPE;
+                        s->rc_context.entry[pict_num-1].new_pict_type = FF_P_TYPE;
                         break;
                     }
 
@@ -1111,10 +1129,10 @@ static void select_input_picture(MpegEncContext *s){
 
             for(i= b_frames - 1; i>=0; i--){
                 int type= s->input_picture[i]->pict_type;
-                if(type && type != B_TYPE)
+                if(type && type != FF_B_TYPE)
                     b_frames= i;
             }
-            if(s->input_picture[b_frames]->pict_type == B_TYPE && b_frames == s->max_b_frames){
+            if(s->input_picture[b_frames]->pict_type == FF_B_TYPE && b_frames == s->max_b_frames){
                 av_log(s->avctx, AV_LOG_ERROR, "warning, too many b frames in a row\n");
             }
 
@@ -1124,29 +1142,29 @@ static void select_input_picture(MpegEncContext *s){
               }else{
                 if(s->flags & CODEC_FLAG_CLOSED_GOP)
                     b_frames=0;
-                s->input_picture[b_frames]->pict_type= I_TYPE;
+                s->input_picture[b_frames]->pict_type= FF_I_TYPE;
               }
             }
 
             if(   (s->flags & CODEC_FLAG_CLOSED_GOP)
                && b_frames
-               && s->input_picture[b_frames]->pict_type== I_TYPE)
+               && s->input_picture[b_frames]->pict_type== FF_I_TYPE)
                 b_frames--;
 
             s->reordered_input_picture[0]= s->input_picture[b_frames];
-            if(s->reordered_input_picture[0]->pict_type != I_TYPE)
-                s->reordered_input_picture[0]->pict_type= P_TYPE;
+            if(s->reordered_input_picture[0]->pict_type != FF_I_TYPE)
+                s->reordered_input_picture[0]->pict_type= FF_P_TYPE;
             s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
             for(i=0; i<b_frames; i++){
                 s->reordered_input_picture[i+1]= s->input_picture[i];
-                s->reordered_input_picture[i+1]->pict_type= B_TYPE;
+                s->reordered_input_picture[i+1]->pict_type= FF_B_TYPE;
                 s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++;
             }
         }
     }
 no_output_pic:
     if(s->reordered_input_picture[0]){
-        s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0;
+        s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=FF_B_TYPE ? 3 : 0;
 
         copy_picture(&s->new_picture, s->reordered_input_picture[0]);
 
@@ -1250,11 +1268,11 @@ vbv_retry:
                         s->lambda_table[i]= FFMAX(s->lambda_table[i]+1, s->lambda_table[i]*(s->qscale+1) / s->qscale);
                 }
                 s->mb_skipped = 0;        //done in MPV_frame_start()
-                if(s->pict_type==P_TYPE){ //done in encode_picture() so we must undo it
+                if(s->pict_type==FF_P_TYPE){ //done in encode_picture() so we must undo it
                     if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
                         s->no_rounding ^= 1;
                 }
-                if(s->pict_type!=B_TYPE){
+                if(s->pict_type!=FF_B_TYPE){
                     s->time_base= s->last_time_base;
                     s->last_non_b_time= s->time - s->pp_time;
                 }
@@ -1362,7 +1380,7 @@ static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int th
     }else
         skip_dc=1;
 
-    /* are all which we could set to zero are allready zero? */
+    /* Are all we could set to zero already zero? */
     if(last_index<=skip_dc - 1) return;
 
     for(i=0; i<=last_index; i++){
@@ -1418,7 +1436,7 @@ static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index
         av_log(s->avctx, AV_LOG_INFO, "warning, clipping %d dct coefficients to %d..%d\n", overflow, minlevel, maxlevel);
 }
 
-static void get_vissual_weight(int16_t *weight, uint8_t *ptr, int stride){
+static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride){
     int x, y;
 //FIXME optimize
     for(y=0; y<8; y++){
@@ -1471,7 +1489,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x,
 
                 if(s->codec_id==CODEC_ID_MPEG4){
                     if(!s->mb_intra){
-                        if(s->pict_type == B_TYPE){
+                        if(s->pict_type == FF_B_TYPE){
                             if(s->dquant&1 || s->mv_dir&MV_DIRECT)
                                 s->dquant= 0;
                         }
@@ -1548,7 +1566,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x,
         dest_cb = s->dest[1];
         dest_cr = s->dest[2];
 
-        if ((!s->no_rounding) || s->pict_type==B_TYPE){
+        if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
             op_pix = s->dsp.put_pixels_tab;
             op_qpix= s->dsp.put_qpel_pixels_tab;
         }else{
@@ -1622,15 +1640,15 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x,
     }
 
     if(s->avctx->quantizer_noise_shaping){
-        if(!skip_dct[0]) get_vissual_weight(weight[0], ptr_y                 , wrap_y);
-        if(!skip_dct[1]) get_vissual_weight(weight[1], ptr_y              + 8, wrap_y);
-        if(!skip_dct[2]) get_vissual_weight(weight[2], ptr_y + dct_offset    , wrap_y);
-        if(!skip_dct[3]) get_vissual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
-        if(!skip_dct[4]) get_vissual_weight(weight[4], ptr_cb                , wrap_c);
-        if(!skip_dct[5]) get_vissual_weight(weight[5], ptr_cr                , wrap_c);
+        if(!skip_dct[0]) get_visual_weight(weight[0], ptr_y                 , wrap_y);
+        if(!skip_dct[1]) get_visual_weight(weight[1], ptr_y              + 8, wrap_y);
+        if(!skip_dct[2]) get_visual_weight(weight[2], ptr_y + dct_offset    , wrap_y);
+        if(!skip_dct[3]) get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
+        if(!skip_dct[4]) get_visual_weight(weight[4], ptr_cb                , wrap_c);
+        if(!skip_dct[5]) get_visual_weight(weight[5], ptr_cr                , wrap_c);
         if(!s->chroma_y_shift){ /* 422 */
-            if(!skip_dct[6]) get_vissual_weight(weight[6], ptr_cb + (dct_offset>>1), wrap_c);
-            if(!skip_dct[7]) get_vissual_weight(weight[7], ptr_cr + (dct_offset>>1), wrap_c);
+            if(!skip_dct[6]) get_visual_weight(weight[6], ptr_cb + (dct_offset>>1), wrap_c);
+            if(!skip_dct[7]) get_visual_weight(weight[7], ptr_cr + (dct_offset>>1), wrap_c);
         }
         memcpy(orig[0], s->block[0], sizeof(DCTELEM)*64*mb_block_count);
     }
@@ -1765,6 +1783,8 @@ static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext
     d->mb_skipped= 0;
     d->qscale= s->qscale;
     d->dquant= s->dquant;
+
+    d->esc3_level_length= s->esc3_level_length;
 }
 
 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
@@ -1802,6 +1822,8 @@ static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *
         d->block_last_index[i]= s->block_last_index[i];
     d->interlaced_dct= s->interlaced_dct;
     d->qscale= s->qscale;
+
+    d->esc3_level_length= s->esc3_level_length;
 }
 
 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
@@ -1935,7 +1957,7 @@ static int estimate_motion_thread(AVCodecContext *c, void *arg){
             s->block_index[3]+=2;
 
             /* compute motion vector & mb_type and store in context */
-            if(s->pict_type==B_TYPE)
+            if(s->pict_type==FF_B_TYPE)
                 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
             else
                 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
@@ -1990,6 +2012,7 @@ static void write_slice_end(MpegEncContext *s){
 static int encode_thread(AVCodecContext *c, void *arg){
     MpegEncContext *s= arg;
     int mb_x, mb_y, pdif = 0;
+    int chr_h= 16>>s->chroma_y_shift;
     int i, j;
     MpegEncContext best_s, backup_s;
     uint8_t bit_buf[2][MAX_MB_BYTES];
@@ -2332,7 +2355,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
                         s->mv[1][0][0] = best_s.mv[1][0][0];
                         s->mv[1][0][1] = best_s.mv[1][0][1];
 
-                        qpi = s->pict_type == B_TYPE ? 2 : 0;
+                        qpi = s->pict_type == FF_B_TYPE ? 2 : 0;
                         for(; qpi<4; qpi++){
                             int dquant= dquant_tab[qpi];
                             qp= last_qp + dquant;
@@ -2434,7 +2457,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
                 s->last_bits= put_bits_count(&s->pb);
 
                 if (ENABLE_ANY_H263_ENCODER &&
-                    s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
+                    s->out_format == FMT_H263 && s->pict_type!=FF_B_TYPE)
                     ff_h263_update_motion_val(s);
 
                 if(next_block==0){ //FIXME 16 vs linesize16
@@ -2561,7 +2584,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
                 s->last_mv_dir = s->mv_dir;
 
                 if (ENABLE_ANY_H263_ENCODER &&
-                    s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
+                    s->out_format == FMT_H263 && s->pict_type!=FF_B_TYPE)
                     ff_h263_update_motion_val(s);
 
                 MPV_decode_mb(s, s->block);
@@ -2584,11 +2607,11 @@ static int encode_thread(AVCodecContext *c, void *arg){
                     s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
                     s->dest[0], w, h, s->linesize);
                 s->current_picture.error[1] += sse(
-                    s, s->new_picture.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,
-                    s->dest[1], w>>1, h>>1, s->uvlinesize);
+                    s, s->new_picture.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
+                    s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
                 s->current_picture.error[2] += sse(
-                    s, s->new_picture    .data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,
-                    s->dest[2], w>>1, h>>1, s->uvlinesize);
+                    s, s->new_picture.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
+                    s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
             }
             if(s->loop_filter){
                 if(ENABLE_ANY_H263_ENCODER && s->out_format == FMT_H263)
@@ -2599,7 +2622,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
     }
 
     //not beautiful here but we must write it before flushing so it has to be here
-    if (ENABLE_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE)
+    if (ENABLE_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == FF_I_TYPE)
         msmpeg4_encode_ext_header(s);
 
     write_slice_end(s);
@@ -2695,7 +2718,7 @@ static void set_frame_distances(MpegEncContext * s){
     assert(s->current_picture_ptr->pts != AV_NOPTS_VALUE);
     s->time= s->current_picture_ptr->pts*s->avctx->time_base.num;
 
-    if(s->pict_type==B_TYPE){
+    if(s->pict_type==FF_B_TYPE){
         s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
         assert(s->pb_time > 0 && s->pb_time < s->pp_time);
     }else{
@@ -2725,12 +2748,12 @@ static int encode_picture(MpegEncContext *s, int picture_number)
 
     s->me.scene_change_score=0;
 
-//    s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME ratedistoration
+//    s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
 
-    if(s->pict_type==I_TYPE){
+    if(s->pict_type==FF_I_TYPE){
         if(s->msmpeg4_version >= 3) s->no_rounding=1;
         else                        s->no_rounding=0;
-    }else if(s->pict_type!=B_TYPE){
+    }else if(s->pict_type!=FF_B_TYPE){
         if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
             s->no_rounding ^= 1;
     }
@@ -2740,7 +2763,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
             return -1;
         ff_get_2pass_fcode(s);
     }else if(!(s->flags & CODEC_FLAG_QSCALE)){
-        if(s->pict_type==B_TYPE)
+        if(s->pict_type==FF_B_TYPE)
             s->lambda= s->last_lambda_for[s->pict_type];
         else
             s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
@@ -2755,17 +2778,17 @@ static int encode_picture(MpegEncContext *s, int picture_number)
     ff_init_me(s);
 
     /* Estimate motion for every MB */
-    if(s->pict_type != I_TYPE){
+    if(s->pict_type != FF_I_TYPE){
         s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
         s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
-        if(s->pict_type != B_TYPE && s->avctx->me_threshold==0){
-            if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){
+        if(s->pict_type != FF_B_TYPE && s->avctx->me_threshold==0){
+            if((s->avctx->pre_me && s->last_non_b_pict_type==FF_I_TYPE) || s->avctx->pre_me==2){
                 s->avctx->execute(s->avctx, pre_estimate_motion_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count);
             }
         }
 
         s->avctx->execute(s->avctx, estimate_motion_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count);
-    }else /* if(s->pict_type == I_TYPE) */{
+    }else /* if(s->pict_type == FF_I_TYPE) */{
         /* I-Frame */
         for(i=0; i<s->mb_stride*s->mb_height; i++)
             s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
@@ -2782,22 +2805,22 @@ static int encode_picture(MpegEncContext *s, int picture_number)
     s->current_picture.   mb_var_sum= s->current_picture_ptr->   mb_var_sum= s->me.   mb_var_sum_temp;
     emms_c();
 
-    if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == P_TYPE){
-        s->pict_type= I_TYPE;
+    if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == FF_P_TYPE){
+        s->pict_type= FF_I_TYPE;
         for(i=0; i<s->mb_stride*s->mb_height; i++)
             s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
 //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
     }
 
     if(!s->umvplus){
-        if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) {
+        if(s->pict_type==FF_P_TYPE || s->pict_type==FF_S_TYPE) {
             s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
 
             if(s->flags & CODEC_FLAG_INTERLACED_ME){
                 int a,b;
                 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
                 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
-                s->f_code= FFMAX(s->f_code, FFMAX(a,b));
+                s->f_code= FFMAX3(s->f_code, a, b);
             }
 
             ff_fix_long_p_mvs(s);
@@ -2812,7 +2835,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
             }
         }
 
-        if(s->pict_type==B_TYPE){
+        if(s->pict_type==FF_B_TYPE){
             int a, b;
 
             a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
@@ -2846,7 +2869,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
     if (estimate_qp(s, 0) < 0)
         return -1;
 
-    if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==I_TYPE && !(s->flags & CODEC_FLAG_QSCALE))
+    if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==FF_I_TYPE && !(s->flags & CODEC_FLAG_QSCALE))
         s->qscale= 3; //reduce clipping problems
 
     if (s->out_format == FMT_MJPEG) {
@@ -2857,14 +2880,14 @@ static int encode_picture(MpegEncContext *s, int picture_number)
 
             s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
         }
-        convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
+        ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
                        s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
         s->qscale= 8;
     }
 
     //FIXME var duplication
     s->current_picture_ptr->key_frame=
-    s->current_picture.key_frame= s->pict_type == I_TYPE; //FIXME pic_ptr
+    s->current_picture.key_frame= s->pict_type == FF_I_TYPE; //FIXME pic_ptr
     s->current_picture_ptr->pict_type=
     s->current_picture.pict_type= s->pict_type;
 
@@ -3061,12 +3084,20 @@ int dct_quantize_trellis_c(MpegEncContext *s,
     survivor_count= 1;
 
     for(i=start_i; i<=last_non_zero; i++){
-        int level_index, j;
-        const int dct_coeff= FFABS(block[ scantable[i] ]);
-        const int zero_distoration= dct_coeff*dct_coeff;
+        int level_index, j, zero_distortion;
+        int dct_coeff= FFABS(block[ scantable[i] ]);
         int best_score=256*256*256*120;
+
+        if (   s->dsp.fdct == fdct_ifast
+#ifndef FAAN_POSTSCALE
+            || s->dsp.fdct == ff_faandct
+#endif
+           )
+            dct_coeff= (dct_coeff*inv_aanscales[ scantable[i] ]) >> 12;
+        zero_distortion= dct_coeff*dct_coeff;
+
         for(level_index=0; level_index < coeff_count[i]; level_index++){
-            int distoration;
+            int distortion;
             int level= coeff[level_index][i];
             const int alevel= FFABS(level);
             int unquant_coeff;
@@ -3087,12 +3118,12 @@ int dct_quantize_trellis_c(MpegEncContext *s,
                 unquant_coeff<<= 3;
             }
 
-            distoration= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distoration;
+            distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
             level+=64;
             if((level&(~127)) == 0){
                 for(j=survivor_count-1; j>=0; j--){
                     int run= i - survivor[j];
-                    int score= distoration + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
+                    int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
                     score += score_tab[i-run];
 
                     if(score < best_score){
@@ -3105,7 +3136,7 @@ int dct_quantize_trellis_c(MpegEncContext *s,
                 if(s->out_format == FMT_H263){
                     for(j=survivor_count-1; j>=0; j--){
                         int run= i - survivor[j];
-                        int score= distoration + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
+                        int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
                         score += score_tab[i-run];
                         if(score < last_score){
                             last_score= score;
@@ -3116,10 +3147,10 @@ int dct_quantize_trellis_c(MpegEncContext *s,
                     }
                 }
             }else{
-                distoration += esc_length*lambda;
+                distortion += esc_length*lambda;
                 for(j=survivor_count-1; j>=0; j--){
                     int run= i - survivor[j];
-                    int score= distoration + score_tab[i-run];
+                    int score= distortion + score_tab[i-run];
 
                     if(score < best_score){
                         best_score= score;
@@ -3131,7 +3162,7 @@ int dct_quantize_trellis_c(MpegEncContext *s,
                 if(s->out_format == FMT_H263){
                   for(j=survivor_count-1; j>=0; j--){
                         int run= i - survivor[j];
-                        int score= distoration + score_tab[i-run];
+                        int score= distortion + score_tab[i-run];
                         if(score < last_score){
                             last_score= score;
                             last_run= run;
@@ -3718,7 +3749,8 @@ AVCodec h263_encoder = {
     MPV_encode_init,
     MPV_encode_picture,
     MPV_encode_end,
-    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
+    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
+    .long_name= NULL_IF_CONFIG_SMALL("H.263"),
 };
 
 AVCodec h263p_encoder = {
@@ -3729,7 +3761,8 @@ AVCodec h263p_encoder = {
     MPV_encode_init,
     MPV_encode_picture,
     MPV_encode_end,
-    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
+    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
+    .long_name= NULL_IF_CONFIG_SMALL("H.263+ / H.263 version 2"),
 };
 
 AVCodec flv_encoder = {
@@ -3740,7 +3773,8 @@ AVCodec flv_encoder = {
     MPV_encode_init,
     MPV_encode_picture,
     MPV_encode_end,
-    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
+    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
+    .long_name= NULL_IF_CONFIG_SMALL("Flash Video"),
 };
 
 AVCodec rv10_encoder = {
@@ -3751,7 +3785,8 @@ AVCodec rv10_encoder = {
     MPV_encode_init,
     MPV_encode_picture,
     MPV_encode_end,
-    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
+    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
+    .long_name= NULL_IF_CONFIG_SMALL("RealVideo 1.0"),
 };
 
 AVCodec rv20_encoder = {
@@ -3762,7 +3797,8 @@ AVCodec rv20_encoder = {
     MPV_encode_init,
     MPV_encode_picture,
     MPV_encode_end,
-    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
+    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
+    .long_name= NULL_IF_CONFIG_SMALL("RealVideo 2.0"),
 };
 
 AVCodec mpeg4_encoder = {
@@ -3773,8 +3809,9 @@ AVCodec mpeg4_encoder = {
     MPV_encode_init,
     MPV_encode_picture,
     MPV_encode_end,
-    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
+    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
     .capabilities= CODEC_CAP_DELAY,
+    .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
 };
 
 AVCodec msmpeg4v1_encoder = {
@@ -3785,7 +3822,8 @@ AVCodec msmpeg4v1_encoder = {
     MPV_encode_init,
     MPV_encode_picture,
     MPV_encode_end,
-    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
+    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
+    .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 1"),
 };
 
 AVCodec msmpeg4v2_encoder = {
@@ -3796,7 +3834,8 @@ AVCodec msmpeg4v2_encoder = {
     MPV_encode_init,
     MPV_encode_picture,
     MPV_encode_end,
-    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
+    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
+    .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
 };
 
 AVCodec msmpeg4v3_encoder = {
@@ -3807,7 +3846,8 @@ AVCodec msmpeg4v3_encoder = {
     MPV_encode_init,
     MPV_encode_picture,
     MPV_encode_end,
-    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
+    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
+    .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
 };
 
 AVCodec wmv1_encoder = {
@@ -3818,5 +3858,6 @@ AVCodec wmv1_encoder = {
     MPV_encode_init,
     MPV_encode_picture,
     MPV_encode_end,
-    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
+    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
+    .long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
 };