]> git.sesse.net Git - ffmpeg/blob - libavcodec/mpegvideo_enc.c
h264: correct implicit weight table computation for long ref pics
[ffmpeg] / libavcodec / mpegvideo_enc.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29
30 #include "libavutil/intmath.h"
31 #include "libavutil/mathematics.h"
32 #include "avcodec.h"
33 #include "dsputil.h"
34 #include "mpegvideo.h"
35 #include "mpegvideo_common.h"
36 #include "h263.h"
37 #include "mjpegenc.h"
38 #include "msmpeg4.h"
39 #include "faandct.h"
40 #include "thread.h"
41 #include "aandcttab.h"
42 #include "flv.h"
43 #include "mpeg4video.h"
44 #include "internal.h"
45 #include <limits.h>
46
47 //#undef NDEBUG
48 //#include <assert.h>
49
50 static int encode_picture(MpegEncContext *s, int picture_number);
51 static int dct_quantize_refine(MpegEncContext *s, DCTELEM *block, int16_t *weight, DCTELEM *orig, int n, int qscale);
52 static int sse_mb(MpegEncContext *s);
53 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block);
54 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
55
56 /* enable all paranoid tests for rounding, overflows, etc... */
57 //#define PARANOID
58
59 //#define DEBUG
60
61 static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_MV*2+1];
62 static uint8_t default_fcode_tab[MAX_MV*2+1];
63
64 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64],
65                            const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
66 {
67     int qscale;
68     int shift=0;
69
70     for(qscale=qmin; qscale<=qmax; qscale++){
71         int i;
72         if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
73             dsp->fdct == ff_jpeg_fdct_islow_10
74 #ifdef FAAN_POSTSCALE
75             || dsp->fdct == ff_faandct
76 #endif
77             ) {
78             for(i=0;i<64;i++) {
79                 const int j= dsp->idct_permutation[i];
80                 /* 16 <= qscale * quant_matrix[i] <= 7905 */
81                 /* 19952             <= ff_aanscales[i] * qscale * quant_matrix[i]               <= 249205026 */
82                 /* (1 << 36) / 19952 >= (1 << 36) / (ff_aanscales[i] * qscale * quant_matrix[i]) >= (1 << 36) / 249205026 */
83                 /* 3444240           >= (1 << 36) / (ff_aanscales[i] * qscale * quant_matrix[i]) >= 275 */
84
85                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
86                                 (qscale * quant_matrix[j]));
87             }
88         } else if (dsp->fdct == fdct_ifast
89 #ifndef FAAN_POSTSCALE
90                    || dsp->fdct == ff_faandct
91 #endif
92                    ) {
93             for(i=0;i<64;i++) {
94                 const int j= dsp->idct_permutation[i];
95                 /* 16 <= qscale * quant_matrix[i] <= 7905 */
96                 /* 19952             <= ff_aanscales[i] * qscale * quant_matrix[i]               <= 249205026 */
97                 /* (1 << 36) / 19952 >= (1 << 36) / (ff_aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
98                 /* 3444240           >= (1 << 36) / (ff_aanscales[i] * qscale * quant_matrix[i]) >= 275 */
99
100                 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
101                                 (ff_aanscales[i] * qscale * quant_matrix[j]));
102             }
103         } else {
104             for(i=0;i<64;i++) {
105                 const int j= dsp->idct_permutation[i];
106                 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
107                    So 16           <= qscale * quant_matrix[i]             <= 7905
108                    so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
109                    so 32768        >= (1<<19) / (qscale * quant_matrix[i]) >= 67
110                 */
111                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j]));
112 //                qmat  [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
113                 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]);
114
115                 if(qmat16[qscale][0][i]==0 || qmat16[qscale][0][i]==128*256) qmat16[qscale][0][i]=128*256-1;
116                 qmat16[qscale][1][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][0][i]);
117             }
118         }
119
120         for(i=intra; i<64; i++){
121             int64_t max= 8191;
122             if (dsp->fdct == fdct_ifast
123 #ifndef FAAN_POSTSCALE
124                    || dsp->fdct == ff_faandct
125 #endif
126                    ) {
127                 max = (8191LL*ff_aanscales[i]) >> 14;
128             }
129             while(((max * qmat[qscale][i]) >> shift) > INT_MAX){
130                 shift++;
131             }
132         }
133     }
134     if(shift){
135         av_log(NULL, AV_LOG_INFO, "Warning, QMAT_SHIFT is larger than %d, overflows possible\n", QMAT_SHIFT - shift);
136     }
137 }
138
139 static inline void update_qscale(MpegEncContext *s){
140     s->qscale= (s->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
141     s->qscale= av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
142
143     s->lambda2= (s->lambda*s->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
144 }
145
146 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix){
147     int i;
148
149     if(matrix){
150         put_bits(pb, 1, 1);
151         for(i=0;i<64;i++) {
152             put_bits(pb, 8, matrix[ ff_zigzag_direct[i] ]);
153         }
154     }else
155         put_bits(pb, 1, 0);
156 }
157
158 /**
159  * init s->current_picture.qscale_table from s->lambda_table
160  */
161 void ff_init_qscale_tab(MpegEncContext *s){
162     int8_t * const qscale_table = s->current_picture.f.qscale_table;
163     int i;
164
165     for(i=0; i<s->mb_num; i++){
166         unsigned int lam= s->lambda_table[ s->mb_index2xy[i] ];
167         int qp= (lam*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
168         qscale_table[ s->mb_index2xy[i] ]= av_clip(qp, s->avctx->qmin, s->avctx->qmax);
169     }
170 }
171
172 static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst, AVFrame *src){
173     int i;
174
175     dst->pict_type              = src->pict_type;
176     dst->quality                = src->quality;
177     dst->coded_picture_number   = src->coded_picture_number;
178     dst->display_picture_number = src->display_picture_number;
179 //    dst->reference              = src->reference;
180     dst->pts                    = src->pts;
181     dst->interlaced_frame       = src->interlaced_frame;
182     dst->top_field_first        = src->top_field_first;
183
184     if(s->avctx->me_threshold){
185         if(!src->motion_val[0])
186             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
187         if(!src->mb_type)
188             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
189         if(!src->ref_index[0])
190             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
191         if(src->motion_subsample_log2 != dst->motion_subsample_log2)
192             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
193             src->motion_subsample_log2, dst->motion_subsample_log2);
194
195         memcpy(dst->mb_type, src->mb_type, s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
196
197         for(i=0; i<2; i++){
198             int stride= ((16*s->mb_width )>>src->motion_subsample_log2) + 1;
199             int height= ((16*s->mb_height)>>src->motion_subsample_log2);
200
201             if(src->motion_val[i] && src->motion_val[i] != dst->motion_val[i]){
202                 memcpy(dst->motion_val[i], src->motion_val[i], 2*stride*height*sizeof(int16_t));
203             }
204             if(src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]){
205                 memcpy(dst->ref_index[i], src->ref_index[i], s->mb_stride*4*s->mb_height*sizeof(int8_t));
206             }
207         }
208     }
209 }
210
211 static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src){
212 #define COPY(a) dst->a= src->a
213     COPY(pict_type);
214     COPY(current_picture);
215     COPY(f_code);
216     COPY(b_code);
217     COPY(qscale);
218     COPY(lambda);
219     COPY(lambda2);
220     COPY(picture_in_gop_number);
221     COPY(gop_picture_number);
222     COPY(frame_pred_frame_dct); //FIXME don't set in encode_header
223     COPY(progressive_frame); //FIXME don't set in encode_header
224     COPY(partitioned_frame); //FIXME don't set in encode_header
225 #undef COPY
226 }
227
228 /**
229  * sets the given MpegEncContext to defaults for encoding.
230  * the changed fields will not depend upon the prior state of the MpegEncContext.
231  */
232 static void MPV_encode_defaults(MpegEncContext *s){
233     int i;
234     MPV_common_defaults(s);
235
236     for(i=-16; i<16; i++){
237         default_fcode_tab[i + MAX_MV]= 1;
238     }
239     s->me.mv_penalty= default_mv_penalty;
240     s->fcode_tab= default_fcode_tab;
241 }
242
243 /* init video encoder */
244 av_cold int MPV_encode_init(AVCodecContext *avctx)
245 {
246     MpegEncContext *s = avctx->priv_data;
247     int i;
248     int chroma_h_shift, chroma_v_shift;
249
250     MPV_encode_defaults(s);
251
252     switch (avctx->codec_id) {
253     case CODEC_ID_MPEG2VIDEO:
254         if(avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUV422P){
255             av_log(avctx, AV_LOG_ERROR, "only YUV420 and YUV422 are supported\n");
256             return -1;
257         }
258         break;
259     case CODEC_ID_LJPEG:
260         if(avctx->pix_fmt != PIX_FMT_YUVJ420P && avctx->pix_fmt != PIX_FMT_YUVJ422P && avctx->pix_fmt != PIX_FMT_YUVJ444P && avctx->pix_fmt != PIX_FMT_BGRA &&
261            ((avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUV422P && avctx->pix_fmt != PIX_FMT_YUV444P) || avctx->strict_std_compliance>FF_COMPLIANCE_UNOFFICIAL)){
262             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
263             return -1;
264         }
265         break;
266     case CODEC_ID_MJPEG:
267         if(avctx->pix_fmt != PIX_FMT_YUVJ420P && avctx->pix_fmt != PIX_FMT_YUVJ422P &&
268            ((avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUV422P) || avctx->strict_std_compliance>FF_COMPLIANCE_UNOFFICIAL)){
269             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
270             return -1;
271         }
272         break;
273     default:
274         if(avctx->pix_fmt != PIX_FMT_YUV420P){
275             av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
276             return -1;
277         }
278     }
279
280     switch (avctx->pix_fmt) {
281     case PIX_FMT_YUVJ422P:
282     case PIX_FMT_YUV422P:
283         s->chroma_format = CHROMA_422;
284         break;
285     case PIX_FMT_YUVJ420P:
286     case PIX_FMT_YUV420P:
287     default:
288         s->chroma_format = CHROMA_420;
289         break;
290     }
291
292     s->bit_rate = avctx->bit_rate;
293     s->width = avctx->width;
294     s->height = avctx->height;
295     if(avctx->gop_size > 600 && avctx->strict_std_compliance>FF_COMPLIANCE_EXPERIMENTAL){
296         av_log(avctx, AV_LOG_ERROR, "Warning keyframe interval too large! reducing it ...\n");
297         avctx->gop_size=600;
298     }
299     s->gop_size = avctx->gop_size;
300     s->avctx = avctx;
301     s->flags= avctx->flags;
302     s->flags2= avctx->flags2;
303     s->max_b_frames= avctx->max_b_frames;
304     s->codec_id= avctx->codec->id;
305     s->luma_elim_threshold  = avctx->luma_elim_threshold;
306     s->chroma_elim_threshold= avctx->chroma_elim_threshold;
307     s->strict_std_compliance= avctx->strict_std_compliance;
308     s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
309     s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0;
310     s->mpeg_quant= avctx->mpeg_quant;
311     s->rtp_mode= !!avctx->rtp_payload_size;
312     s->intra_dc_precision= avctx->intra_dc_precision;
313     s->user_specified_pts = AV_NOPTS_VALUE;
314
315     if (s->gop_size <= 1) {
316         s->intra_only = 1;
317         s->gop_size = 12;
318     } else {
319         s->intra_only = 0;
320     }
321
322     s->me_method = avctx->me_method;
323
324     /* Fixed QSCALE */
325     s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
326
327     s->adaptive_quant= (   s->avctx->lumi_masking
328                         || s->avctx->dark_masking
329                         || s->avctx->temporal_cplx_masking
330                         || s->avctx->spatial_cplx_masking
331                         || s->avctx->p_masking
332                         || s->avctx->border_masking
333                         || (s->flags&CODEC_FLAG_QP_RD))
334                        && !s->fixed_qscale;
335
336     s->obmc= !!(s->flags & CODEC_FLAG_OBMC);
337     s->loop_filter= !!(s->flags & CODEC_FLAG_LOOP_FILTER);
338     s->alternate_scan= !!(s->flags & CODEC_FLAG_ALT_SCAN);
339     s->intra_vlc_format= !!(s->flags2 & CODEC_FLAG2_INTRA_VLC);
340     s->q_scale_type= !!(s->flags2 & CODEC_FLAG2_NON_LINEAR_QUANT);
341
342     if(avctx->rc_max_rate && !avctx->rc_buffer_size){
343         av_log(avctx, AV_LOG_ERROR, "a vbv buffer size is needed, for encoding with a maximum bitrate\n");
344         return -1;
345     }
346
347     if(avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate){
348         av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
349     }
350
351     if(avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate){
352         av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
353         return -1;
354     }
355
356     if(avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate){
357         av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
358         return -1;
359     }
360
361     if(avctx->rc_max_rate && avctx->rc_max_rate == avctx->bit_rate && avctx->rc_max_rate != avctx->rc_min_rate){
362         av_log(avctx, AV_LOG_INFO, "impossible bitrate constraints, this will fail\n");
363     }
364
365     if(avctx->rc_buffer_size && avctx->bit_rate*(int64_t)avctx->time_base.num > avctx->rc_buffer_size * (int64_t)avctx->time_base.den){
366         av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
367         return -1;
368     }
369
370     if(!s->fixed_qscale && avctx->bit_rate*av_q2d(avctx->time_base) > avctx->bit_rate_tolerance){
371         av_log(avctx, AV_LOG_ERROR, "bitrate tolerance too small for bitrate\n");
372         return -1;
373     }
374
375     if(   s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate
376        && (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO)
377        && 90000LL * (avctx->rc_buffer_size-1) > s->avctx->rc_max_rate*0xFFFFLL){
378
379         av_log(avctx, AV_LOG_INFO, "Warning vbv_delay will be set to 0xFFFF (=VBR) as the specified vbv buffer is too large for the given bitrate!\n");
380     }
381
382     if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4
383        && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P && s->codec_id != CODEC_ID_FLV1){
384         av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
385         return -1;
386     }
387
388     if(s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE){
389         av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with simple mb decision\n");
390         return -1;
391     }
392
393     if(s->obmc && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P){
394         av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with H263(+)\n");
395         return -1;
396     }
397
398     if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){
399         av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
400         return -1;
401     }
402
403     if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){
404         av_log(avctx, AV_LOG_ERROR, "data partitioning not supported by codec\n");
405         return -1;
406     }
407
408     if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO){
409         av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
410         return -1;
411     }
412
413     if ((s->codec_id == CODEC_ID_MPEG4 || s->codec_id == CODEC_ID_H263 ||
414          s->codec_id == CODEC_ID_H263P) &&
415         (avctx->sample_aspect_ratio.num > 255 || avctx->sample_aspect_ratio.den > 255)) {
416         av_log(avctx, AV_LOG_ERROR, "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
417                avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
418         return -1;
419     }
420
421     if((s->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN))
422        && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG2VIDEO){
423         av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
424         return -1;
425     }
426
427     if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too
428         av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supported by codec\n");
429         return -1;
430     }
431
432     if((s->flags & CODEC_FLAG_CBP_RD) && !avctx->trellis){
433         av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
434         return -1;
435     }
436
437     if((s->flags & CODEC_FLAG_QP_RD) && s->avctx->mb_decision != FF_MB_DECISION_RD){
438         av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
439         return -1;
440     }
441
442     if(s->avctx->scenechange_threshold < 1000000000 && (s->flags & CODEC_FLAG_CLOSED_GOP)){
443         av_log(avctx, AV_LOG_ERROR, "closed gop with scene change detection are not supported yet, set threshold to 1000000000\n");
444         return -1;
445     }
446
447     if((s->flags2 & CODEC_FLAG2_INTRA_VLC) && s->codec_id != CODEC_ID_MPEG2VIDEO){
448         av_log(avctx, AV_LOG_ERROR, "intra vlc table not supported by codec\n");
449         return -1;
450     }
451
452     if(s->flags & CODEC_FLAG_LOW_DELAY){
453         if (s->codec_id != CODEC_ID_MPEG2VIDEO){
454             av_log(avctx, AV_LOG_ERROR, "low delay forcing is only available for mpeg2\n");
455             return -1;
456         }
457         if (s->max_b_frames != 0){
458             av_log(avctx, AV_LOG_ERROR, "b frames cannot be used with low delay\n");
459             return -1;
460         }
461     }
462
463     if(s->q_scale_type == 1){
464         if(s->codec_id != CODEC_ID_MPEG2VIDEO){
465             av_log(avctx, AV_LOG_ERROR, "non linear quant is only available for mpeg2\n");
466             return -1;
467         }
468         if(avctx->qmax > 12){
469             av_log(avctx, AV_LOG_ERROR, "non linear quant only supports qmax <= 12 currently\n");
470             return -1;
471         }
472     }
473
474     if(s->avctx->thread_count > 1 && s->codec_id != CODEC_ID_MPEG4
475        && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO
476        && (s->codec_id != CODEC_ID_H263P || !(s->flags & CODEC_FLAG_H263P_SLICE_STRUCT))){
477         av_log(avctx, AV_LOG_ERROR, "multi threaded encoding not supported by codec\n");
478         return -1;
479     }
480
481     if(s->avctx->thread_count < 1){
482         av_log(avctx, AV_LOG_ERROR, "automatic thread number detection not supported by codec, patch welcome\n");
483         return -1;
484     }
485
486     if(s->avctx->thread_count > 1)
487         s->rtp_mode= 1;
488
489     if(!avctx->time_base.den || !avctx->time_base.num){
490         av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
491         return -1;
492     }
493
494     i= (INT_MAX/2+128)>>8;
495     if(avctx->me_threshold >= i){
496         av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n", i - 1);
497         return -1;
498     }
499     if(avctx->mb_threshold >= i){
500         av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n", i - 1);
501         return -1;
502     }
503
504     if(avctx->b_frame_strategy && (avctx->flags&CODEC_FLAG_PASS2)){
505         av_log(avctx, AV_LOG_INFO, "notice: b_frame_strategy only affects the first pass\n");
506         avctx->b_frame_strategy = 0;
507     }
508
509     i= av_gcd(avctx->time_base.den, avctx->time_base.num);
510     if(i > 1){
511         av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
512         avctx->time_base.den /= i;
513         avctx->time_base.num /= i;
514 //        return -1;
515     }
516
517     if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO || s->codec_id==CODEC_ID_MJPEG){
518         s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x
519         s->inter_quant_bias= 0;
520     }else{
521         s->intra_quant_bias=0;
522         s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2)); //(a - x/4)/x
523     }
524
525     if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
526         s->intra_quant_bias= avctx->intra_quant_bias;
527     if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
528         s->inter_quant_bias= avctx->inter_quant_bias;
529
530     avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
531
532     if(avctx->codec_id == CODEC_ID_MPEG4 && s->avctx->time_base.den > (1<<16)-1){
533         av_log(avctx, AV_LOG_ERROR, "timebase %d/%d not supported by MPEG 4 standard, "
534                "the maximum admitted value for the timebase denominator is %d\n",
535                s->avctx->time_base.num, s->avctx->time_base.den, (1<<16)-1);
536         return -1;
537     }
538     s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
539
540     switch(avctx->codec->id) {
541     case CODEC_ID_MPEG1VIDEO:
542         s->out_format = FMT_MPEG1;
543         s->low_delay= !!(s->flags & CODEC_FLAG_LOW_DELAY);
544         avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
545         break;
546     case CODEC_ID_MPEG2VIDEO:
547         s->out_format = FMT_MPEG1;
548         s->low_delay= !!(s->flags & CODEC_FLAG_LOW_DELAY);
549         avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
550         s->rtp_mode= 1;
551         break;
552     case CODEC_ID_LJPEG:
553     case CODEC_ID_MJPEG:
554         s->out_format = FMT_MJPEG;
555         s->intra_only = 1; /* force intra only for jpeg */
556         if(avctx->codec->id == CODEC_ID_LJPEG && avctx->pix_fmt == PIX_FMT_BGRA){
557             s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
558             s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
559             s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
560         }else{
561             s->mjpeg_vsample[0] = 2;
562             s->mjpeg_vsample[1] = 2>>chroma_v_shift;
563             s->mjpeg_vsample[2] = 2>>chroma_v_shift;
564             s->mjpeg_hsample[0] = 2;
565             s->mjpeg_hsample[1] = 2>>chroma_h_shift;
566             s->mjpeg_hsample[2] = 2>>chroma_h_shift;
567         }
568         if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER)
569             || ff_mjpeg_encode_init(s) < 0)
570             return -1;
571         avctx->delay=0;
572         s->low_delay=1;
573         break;
574     case CODEC_ID_H261:
575         if (!CONFIG_H261_ENCODER)  return -1;
576         if (ff_h261_get_picture_format(s->width, s->height) < 0) {
577             av_log(avctx, AV_LOG_ERROR, "The specified picture size of %dx%d is not valid for the H.261 codec.\nValid sizes are 176x144, 352x288\n", s->width, s->height);
578             return -1;
579         }
580         s->out_format = FMT_H261;
581         avctx->delay=0;
582         s->low_delay=1;
583         break;
584     case CODEC_ID_H263:
585         if (!CONFIG_H263_ENCODER)  return -1;
586         if (ff_match_2uint16(h263_format, FF_ARRAY_ELEMS(h263_format), s->width, s->height) == 8) {
587             av_log(avctx, AV_LOG_INFO, "The specified picture size of %dx%d is not valid for the H.263 codec.\nValid sizes are 128x96, 176x144, 352x288, 704x576, and 1408x1152. Try H.263+.\n", s->width, s->height);
588             return -1;
589         }
590         s->out_format = FMT_H263;
591         s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0;
592         avctx->delay=0;
593         s->low_delay=1;
594         break;
595     case CODEC_ID_H263P:
596         s->out_format = FMT_H263;
597         s->h263_plus = 1;
598         /* Fx */
599         s->umvplus = (avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0;
600         s->h263_aic= (avctx->flags & CODEC_FLAG_AC_PRED) ? 1:0;
601         s->modified_quant= s->h263_aic;
602         s->alt_inter_vlc= (avctx->flags & CODEC_FLAG_H263P_AIV) ? 1:0;
603         s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0;
604         s->loop_filter= (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1:0;
605         s->unrestricted_mv= s->obmc || s->loop_filter || s->umvplus;
606         s->h263_slice_structured= (s->flags & CODEC_FLAG_H263P_SLICE_STRUCT) ? 1:0;
607
608         /* /Fx */
609         /* These are just to be sure */
610         avctx->delay=0;
611         s->low_delay=1;
612         break;
613     case CODEC_ID_FLV1:
614         s->out_format = FMT_H263;
615         s->h263_flv = 2; /* format = 1; 11-bit codes */
616         s->unrestricted_mv = 1;
617         s->rtp_mode=0; /* don't allow GOB */
618         avctx->delay=0;
619         s->low_delay=1;
620         break;
621     case CODEC_ID_RV10:
622         s->out_format = FMT_H263;
623         avctx->delay=0;
624         s->low_delay=1;
625         break;
626     case CODEC_ID_RV20:
627         s->out_format = FMT_H263;
628         avctx->delay=0;
629         s->low_delay=1;
630         s->modified_quant=1;
631         s->h263_aic=1;
632         s->h263_plus=1;
633         s->loop_filter=1;
634         s->unrestricted_mv= 0;
635         break;
636     case CODEC_ID_MPEG4:
637         s->out_format = FMT_H263;
638         s->h263_pred = 1;
639         s->unrestricted_mv = 1;
640         s->low_delay= s->max_b_frames ? 0 : 1;
641         avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
642         break;
643     case CODEC_ID_MSMPEG4V2:
644         s->out_format = FMT_H263;
645         s->h263_pred = 1;
646         s->unrestricted_mv = 1;
647         s->msmpeg4_version= 2;
648         avctx->delay=0;
649         s->low_delay=1;
650         break;
651     case CODEC_ID_MSMPEG4V3:
652         s->out_format = FMT_H263;
653         s->h263_pred = 1;
654         s->unrestricted_mv = 1;
655         s->msmpeg4_version= 3;
656         s->flipflop_rounding=1;
657         avctx->delay=0;
658         s->low_delay=1;
659         break;
660     case CODEC_ID_WMV1:
661         s->out_format = FMT_H263;
662         s->h263_pred = 1;
663         s->unrestricted_mv = 1;
664         s->msmpeg4_version= 4;
665         s->flipflop_rounding=1;
666         avctx->delay=0;
667         s->low_delay=1;
668         break;
669     case CODEC_ID_WMV2:
670         s->out_format = FMT_H263;
671         s->h263_pred = 1;
672         s->unrestricted_mv = 1;
673         s->msmpeg4_version= 5;
674         s->flipflop_rounding=1;
675         avctx->delay=0;
676         s->low_delay=1;
677         break;
678     default:
679         return -1;
680     }
681
682     avctx->has_b_frames= !s->low_delay;
683
684     s->encoding = 1;
685
686     s->progressive_frame=
687     s->progressive_sequence= !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN));
688
689     /* init */
690     if (MPV_common_init(s) < 0)
691         return -1;
692
693     if(!s->dct_quantize)
694         s->dct_quantize = dct_quantize_c;
695     if(!s->denoise_dct)
696         s->denoise_dct = denoise_dct_c;
697     s->fast_dct_quantize = s->dct_quantize;
698     if(avctx->trellis)
699         s->dct_quantize = dct_quantize_trellis_c;
700
701     if((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
702         s->chroma_qscale_table= ff_h263_chroma_qscale_table;
703
704     s->quant_precision=5;
705
706     ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
707     ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
708
709     if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
710         ff_h261_encode_init(s);
711     if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
712         h263_encode_init(s);
713     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
714         ff_msmpeg4_encode_init(s);
715     if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
716         && s->out_format == FMT_MPEG1)
717         ff_mpeg1_encode_init(s);
718
719     /* init q matrix */
720     for(i=0;i<64;i++) {
721         int j= s->dsp.idct_permutation[i];
722         if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
723             s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
724             s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
725         }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
726             s->intra_matrix[j] =
727             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
728         }else
729         { /* mpeg1/2 */
730             s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
731             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
732         }
733         if(s->avctx->intra_matrix)
734             s->intra_matrix[j] = s->avctx->intra_matrix[i];
735         if(s->avctx->inter_matrix)
736             s->inter_matrix[j] = s->avctx->inter_matrix[i];
737     }
738
739     /* precompute matrix */
740     /* for mjpeg, we do include qscale in the matrix */
741     if (s->out_format != FMT_MJPEG) {
742         ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
743                        s->intra_matrix, s->intra_quant_bias, avctx->qmin, 31, 1);
744         ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
745                        s->inter_matrix, s->inter_quant_bias, avctx->qmin, 31, 0);
746     }
747
748     if(ff_rate_control_init(s) < 0)
749         return -1;
750
751     return 0;
752 }
753
754 av_cold int MPV_encode_end(AVCodecContext *avctx)
755 {
756     MpegEncContext *s = avctx->priv_data;
757
758     ff_rate_control_uninit(s);
759
760     MPV_common_end(s);
761     if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) && s->out_format == FMT_MJPEG)
762         ff_mjpeg_encode_close(s);
763
764     av_freep(&avctx->extradata);
765
766     return 0;
767 }
768
769 static int get_sae(uint8_t *src, int ref, int stride){
770     int x,y;
771     int acc=0;
772
773     for(y=0; y<16; y++){
774         for(x=0; x<16; x++){
775             acc+= FFABS(src[x+y*stride] - ref);
776         }
777     }
778
779     return acc;
780 }
781
782 static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){
783     int x, y, w, h;
784     int acc=0;
785
786     w= s->width &~15;
787     h= s->height&~15;
788
789     for(y=0; y<h; y+=16){
790         for(x=0; x<w; x+=16){
791             int offset= x + y*stride;
792             int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride, 16);
793             int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8;
794             int sae = get_sae(src + offset, mean, stride);
795
796             acc+= sae + 500 < sad;
797         }
798     }
799     return acc;
800 }
801
802
803 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
804     AVFrame *pic=NULL;
805     int64_t pts;
806     int i;
807     const int encoding_delay= s->max_b_frames;
808     int direct=1;
809
810     if(pic_arg){
811         pts= pic_arg->pts;
812         pic_arg->display_picture_number= s->input_picture_number++;
813
814         if(pts != AV_NOPTS_VALUE){
815             if(s->user_specified_pts != AV_NOPTS_VALUE){
816                 int64_t time= pts;
817                 int64_t last= s->user_specified_pts;
818
819                 if(time <= last){
820                     av_log(s->avctx, AV_LOG_ERROR, "Error, Invalid timestamp=%"PRId64", last=%"PRId64"\n", pts, s->user_specified_pts);
821                     return -1;
822                 }
823             }
824             s->user_specified_pts= pts;
825         }else{
826             if(s->user_specified_pts != AV_NOPTS_VALUE){
827                 s->user_specified_pts=
828                 pts= s->user_specified_pts + 1;
829                 av_log(s->avctx, AV_LOG_INFO, "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n", pts);
830             }else{
831                 pts= pic_arg->display_picture_number;
832             }
833         }
834     }
835
836   if(pic_arg){
837     if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
838     if(pic_arg->linesize[0] != s->linesize) direct=0;
839     if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
840     if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
841
842 //    av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
843
844     if(direct){
845         i= ff_find_unused_picture(s, 1);
846
847         pic= (AVFrame*)&s->picture[i];
848         pic->reference= 3;
849
850         for(i=0; i<4; i++){
851             pic->data[i]= pic_arg->data[i];
852             pic->linesize[i]= pic_arg->linesize[i];
853         }
854         if(ff_alloc_picture(s, (Picture*)pic, 1) < 0){
855             return -1;
856         }
857     }else{
858         i= ff_find_unused_picture(s, 0);
859
860         pic= (AVFrame*)&s->picture[i];
861         pic->reference= 3;
862
863         if(ff_alloc_picture(s, (Picture*)pic, 0) < 0){
864             return -1;
865         }
866
867         if(   pic->data[0] + INPLACE_OFFSET == pic_arg->data[0]
868            && pic->data[1] + INPLACE_OFFSET == pic_arg->data[1]
869            && pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]){
870        // empty
871         }else{
872             int h_chroma_shift, v_chroma_shift;
873             avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
874
875             for(i=0; i<3; i++){
876                 int src_stride= pic_arg->linesize[i];
877                 int dst_stride= i ? s->uvlinesize : s->linesize;
878                 int h_shift= i ? h_chroma_shift : 0;
879                 int v_shift= i ? v_chroma_shift : 0;
880                 int w= s->width >>h_shift;
881                 int h= s->height>>v_shift;
882                 uint8_t *src= pic_arg->data[i];
883                 uint8_t *dst= pic->data[i];
884
885                 if(!s->avctx->rc_buffer_size)
886                     dst +=INPLACE_OFFSET;
887
888                 if(src_stride==dst_stride)
889                     memcpy(dst, src, src_stride*h);
890                 else{
891                     while(h--){
892                         memcpy(dst, src, w);
893                         dst += dst_stride;
894                         src += src_stride;
895                     }
896                 }
897             }
898         }
899     }
900     copy_picture_attributes(s, pic, pic_arg);
901     pic->pts= pts; //we set this here to avoid modifiying pic_arg
902   }
903
904     /* shift buffer entries */
905     for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++)
906         s->input_picture[i-1]= s->input_picture[i];
907
908     s->input_picture[encoding_delay]= (Picture*)pic;
909
910     return 0;
911 }
912
913 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref){
914     int x, y, plane;
915     int score=0;
916     int64_t score64=0;
917
918     for(plane=0; plane<3; plane++){
919         const int stride = p->f.linesize[plane];
920         const int bw= plane ? 1 : 2;
921         for(y=0; y<s->mb_height*bw; y++){
922             for(x=0; x<s->mb_width*bw; x++){
923                 int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0: 16;
924                 int v   = s->dsp.frame_skip_cmp[1](s, p->f.data[plane] + 8*(x + y*stride)+off, ref->f.data[plane] + 8*(x + y*stride), stride, 8);
925
926                 switch(s->avctx->frame_skip_exp){
927                     case 0: score= FFMAX(score, v); break;
928                     case 1: score+= FFABS(v);break;
929                     case 2: score+= v*v;break;
930                     case 3: score64+= FFABS(v*v*(int64_t)v);break;
931                     case 4: score64+= v*v*(int64_t)(v*v);break;
932                 }
933             }
934         }
935     }
936
937     if(score) score64= score;
938
939     if(score64 < s->avctx->frame_skip_threshold)
940         return 1;
941     if(score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda)>>8))
942         return 1;
943     return 0;
944 }
945
946 static int estimate_best_b_count(MpegEncContext *s){
947     AVCodec *codec= avcodec_find_encoder(s->avctx->codec_id);
948     AVCodecContext *c = avcodec_alloc_context3(NULL);
949     AVFrame input[FF_MAX_B_FRAMES+2];
950     const int scale= s->avctx->brd_scale;
951     int i, j, out_size, p_lambda, b_lambda, lambda2;
952     int outbuf_size= s->width * s->height; //FIXME
953     uint8_t *outbuf= av_malloc(outbuf_size);
954     int64_t best_rd= INT64_MAX;
955     int best_b_count= -1;
956
957     assert(scale>=0 && scale <=3);
958
959 //    emms_c();
960     p_lambda= s->last_lambda_for[AV_PICTURE_TYPE_P]; //s->next_picture_ptr->quality;
961     b_lambda= s->last_lambda_for[AV_PICTURE_TYPE_B]; //p_lambda *FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
962     if(!b_lambda) b_lambda= p_lambda; //FIXME we should do this somewhere else
963     lambda2= (b_lambda*b_lambda + (1<<FF_LAMBDA_SHIFT)/2 ) >> FF_LAMBDA_SHIFT;
964
965     c->width = s->width >> scale;
966     c->height= s->height>> scale;
967     c->flags= CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR | CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
968     c->flags|= s->avctx->flags & CODEC_FLAG_QPEL;
969     c->mb_decision= s->avctx->mb_decision;
970     c->me_cmp= s->avctx->me_cmp;
971     c->mb_cmp= s->avctx->mb_cmp;
972     c->me_sub_cmp= s->avctx->me_sub_cmp;
973     c->pix_fmt = PIX_FMT_YUV420P;
974     c->time_base= s->avctx->time_base;
975     c->max_b_frames= s->max_b_frames;
976
977     if (avcodec_open2(c, codec, NULL) < 0)
978         return -1;
979
980     for(i=0; i<s->max_b_frames+2; i++){
981         int ysize= c->width*c->height;
982         int csize= (c->width/2)*(c->height/2);
983         Picture pre_input, *pre_input_ptr= i ? s->input_picture[i-1] : s->next_picture_ptr;
984
985         avcodec_get_frame_defaults(&input[i]);
986         input[i].data[0]= av_malloc(ysize + 2*csize);
987         input[i].data[1]= input[i].data[0] + ysize;
988         input[i].data[2]= input[i].data[1] + csize;
989         input[i].linesize[0]= c->width;
990         input[i].linesize[1]=
991         input[i].linesize[2]= c->width/2;
992
993         if(pre_input_ptr && (!i || s->input_picture[i-1])) {
994             pre_input= *pre_input_ptr;
995
996             if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
997                 pre_input.f.data[0] += INPLACE_OFFSET;
998                 pre_input.f.data[1] += INPLACE_OFFSET;
999                 pre_input.f.data[2] += INPLACE_OFFSET;
1000             }
1001
1002             s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0], pre_input.f.data[0], pre_input.f.linesize[0], c->width,      c->height);
1003             s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1], pre_input.f.data[1], pre_input.f.linesize[1], c->width >> 1, c->height >> 1);
1004             s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2], pre_input.f.data[2], pre_input.f.linesize[2], c->width >> 1, c->height >> 1);
1005         }
1006     }
1007
1008     for(j=0; j<s->max_b_frames+1; j++){
1009         int64_t rd=0;
1010
1011         if(!s->input_picture[j])
1012             break;
1013
1014         c->error[0]= c->error[1]= c->error[2]= 0;
1015
1016         input[0].pict_type= AV_PICTURE_TYPE_I;
1017         input[0].quality= 1 * FF_QP2LAMBDA;
1018         out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[0]);
1019 //        rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1020
1021         for(i=0; i<s->max_b_frames+1; i++){
1022             int is_p= i % (j+1) == j || i==s->max_b_frames;
1023
1024             input[i+1].pict_type= is_p ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1025             input[i+1].quality= is_p ? p_lambda : b_lambda;
1026             out_size = avcodec_encode_video(c, outbuf, outbuf_size, &input[i+1]);
1027             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1028         }
1029
1030         /* get the delayed frames */
1031         while(out_size){
1032             out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
1033             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1034         }
1035
1036         rd += c->error[0] + c->error[1] + c->error[2];
1037
1038         if(rd < best_rd){
1039             best_rd= rd;
1040             best_b_count= j;
1041         }
1042     }
1043
1044     av_freep(&outbuf);
1045     avcodec_close(c);
1046     av_freep(&c);
1047
1048     for(i=0; i<s->max_b_frames+2; i++){
1049         av_freep(&input[i].data[0]);
1050     }
1051
1052     return best_b_count;
1053 }
1054
1055 static int select_input_picture(MpegEncContext *s){
1056     int i;
1057
1058     for(i=1; i<MAX_PICTURE_COUNT; i++)
1059         s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
1060     s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
1061
1062     /* set next picture type & ordering */
1063     if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
1064         if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
1065             s->reordered_input_picture[0]= s->input_picture[0];
1066             s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1067             s->reordered_input_picture[0]->f.coded_picture_number = s->coded_picture_number++;
1068         }else{
1069             int b_frames;
1070
1071             if(s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor){
1072                 if(s->picture_in_gop_number < s->gop_size && skip_check(s, s->input_picture[0], s->next_picture_ptr)){
1073                 //FIXME check that te gop check above is +-1 correct
1074 //av_log(NULL, AV_LOG_DEBUG, "skip %p %"PRId64"\n", s->input_picture[0]->f.data[0], s->input_picture[0]->pts);
1075
1076                     if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) {
1077                         for(i=0; i<4; i++)
1078                             s->input_picture[0]->f.data[i] = NULL;
1079                         s->input_picture[0]->f.type = 0;
1080                     }else{
1081                         assert(   s->input_picture[0]->type==FF_BUFFER_TYPE_USER
1082                                || s->input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
1083
1084                         s->avctx->release_buffer(s->avctx, (AVFrame*)s->input_picture[0]);
1085                     }
1086
1087                     emms_c();
1088                     ff_vbv_update(s, 0);
1089
1090                     goto no_output_pic;
1091                 }
1092             }
1093
1094             if(s->flags&CODEC_FLAG_PASS2){
1095                 for(i=0; i<s->max_b_frames+1; i++){
1096                     int pict_num = s->input_picture[0]->f.display_picture_number + i;
1097
1098                     if(pict_num >= s->rc_context.num_entries)
1099                         break;
1100                     if(!s->input_picture[i]){
1101                         s->rc_context.entry[pict_num-1].new_pict_type = AV_PICTURE_TYPE_P;
1102                         break;
1103                     }
1104
1105                     s->input_picture[i]->f.pict_type =
1106                         s->rc_context.entry[pict_num].new_pict_type;
1107                 }
1108             }
1109
1110             if(s->avctx->b_frame_strategy==0){
1111                 b_frames= s->max_b_frames;
1112                 while(b_frames && !s->input_picture[b_frames]) b_frames--;
1113             }else if(s->avctx->b_frame_strategy==1){
1114                 for(i=1; i<s->max_b_frames+1; i++){
1115                     if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){
1116                         s->input_picture[i]->b_frame_score=
1117                             get_intra_count(s, s->input_picture[i  ]->f.data[0],
1118                                                s->input_picture[i-1]->f.data[0], s->linesize) + 1;
1119                     }
1120                 }
1121                 for(i=0; i<s->max_b_frames+1; i++){
1122                     if(s->input_picture[i]==NULL || s->input_picture[i]->b_frame_score - 1 > s->mb_num/s->avctx->b_sensitivity) break;
1123                 }
1124
1125                 b_frames= FFMAX(0, i-1);
1126
1127                 /* reset scores */
1128                 for(i=0; i<b_frames+1; i++){
1129                     s->input_picture[i]->b_frame_score=0;
1130                 }
1131             }else if(s->avctx->b_frame_strategy==2){
1132                 b_frames= estimate_best_b_count(s);
1133             }else{
1134                 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1135                 b_frames=0;
1136             }
1137
1138             emms_c();
1139 //static int b_count=0;
1140 //b_count+= b_frames;
1141 //av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
1142
1143             for(i= b_frames - 1; i>=0; i--){
1144                 int type = s->input_picture[i]->f.pict_type;
1145                 if(type && type != AV_PICTURE_TYPE_B)
1146                     b_frames= i;
1147             }
1148             if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B && b_frames == s->max_b_frames){
1149                 av_log(s->avctx, AV_LOG_ERROR, "warning, too many b frames in a row\n");
1150             }
1151
1152             if(s->picture_in_gop_number + b_frames >= s->gop_size){
1153               if((s->flags2 & CODEC_FLAG2_STRICT_GOP) && s->gop_size > s->picture_in_gop_number){
1154                     b_frames= s->gop_size - s->picture_in_gop_number - 1;
1155               }else{
1156                 if(s->flags & CODEC_FLAG_CLOSED_GOP)
1157                     b_frames=0;
1158                 s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1159               }
1160             }
1161
1162             if(   (s->flags & CODEC_FLAG_CLOSED_GOP)
1163                && b_frames
1164                && s->input_picture[b_frames]->f.pict_type== AV_PICTURE_TYPE_I)
1165                 b_frames--;
1166
1167             s->reordered_input_picture[0]= s->input_picture[b_frames];
1168             if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1169                 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1170             s->reordered_input_picture[0]->f.coded_picture_number = s->coded_picture_number++;
1171             for(i=0; i<b_frames; i++){
1172                 s->reordered_input_picture[i + 1] = s->input_picture[i];
1173                 s->reordered_input_picture[i + 1]->f.pict_type = AV_PICTURE_TYPE_B;
1174                 s->reordered_input_picture[i + 1]->f.coded_picture_number = s->coded_picture_number++;
1175             }
1176         }
1177     }
1178 no_output_pic:
1179     if(s->reordered_input_picture[0]){
1180         s->reordered_input_picture[0]->f.reference = s->reordered_input_picture[0]->f.pict_type!=AV_PICTURE_TYPE_B ? 3 : 0;
1181
1182         ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
1183
1184         if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED || s->avctx->rc_buffer_size) {
1185             // input is a shared pix, so we can't modifiy it -> alloc a new one & ensure that the shared one is reuseable
1186
1187             int i= ff_find_unused_picture(s, 0);
1188             Picture *pic= &s->picture[i];
1189
1190             pic->f.reference = s->reordered_input_picture[0]->f.reference;
1191             if(ff_alloc_picture(s, pic, 0) < 0){
1192                 return -1;
1193             }
1194
1195             /* mark us unused / free shared pic */
1196             if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL)
1197                 s->avctx->release_buffer(s->avctx, (AVFrame*)s->reordered_input_picture[0]);
1198             for(i=0; i<4; i++)
1199                 s->reordered_input_picture[0]->f.data[i] = NULL;
1200             s->reordered_input_picture[0]->f.type = 0;
1201
1202             copy_picture_attributes(s, (AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]);
1203
1204             s->current_picture_ptr= pic;
1205         }else{
1206             // input is not a shared pix -> reuse buffer for current_pix
1207
1208             assert(   s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER
1209                    || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
1210
1211             s->current_picture_ptr= s->reordered_input_picture[0];
1212             for(i=0; i<4; i++){
1213                 s->new_picture.f.data[i] += INPLACE_OFFSET;
1214             }
1215         }
1216         ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1217
1218         s->picture_number = s->new_picture.f.display_picture_number;
1219 //printf("dpn:%d\n", s->picture_number);
1220     }else{
1221        memset(&s->new_picture, 0, sizeof(Picture));
1222     }
1223     return 0;
1224 }
1225
1226 int MPV_encode_picture(AVCodecContext *avctx,
1227                        unsigned char *buf, int buf_size, void *data)
1228 {
1229     MpegEncContext *s = avctx->priv_data;
1230     AVFrame *pic_arg = data;
1231     int i, stuffing_count, context_count = avctx->thread_count;
1232
1233     for(i=0; i<context_count; i++){
1234         int start_y= s->thread_context[i]->start_mb_y;
1235         int   end_y= s->thread_context[i]->  end_mb_y;
1236         int h= s->mb_height;
1237         uint8_t *start= buf + (size_t)(((int64_t) buf_size)*start_y/h);
1238         uint8_t *end  = buf + (size_t)(((int64_t) buf_size)*  end_y/h);
1239
1240         init_put_bits(&s->thread_context[i]->pb, start, end - start);
1241     }
1242
1243     s->picture_in_gop_number++;
1244
1245     if(load_input_picture(s, pic_arg) < 0)
1246         return -1;
1247
1248     if(select_input_picture(s) < 0){
1249         return -1;
1250     }
1251
1252     /* output? */
1253     if (s->new_picture.f.data[0]) {
1254         s->pict_type = s->new_picture.f.pict_type;
1255 //emms_c();
1256 //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
1257         MPV_frame_start(s, avctx);
1258 vbv_retry:
1259         if (encode_picture(s, s->picture_number) < 0)
1260             return -1;
1261
1262         avctx->header_bits = s->header_bits;
1263         avctx->mv_bits     = s->mv_bits;
1264         avctx->misc_bits   = s->misc_bits;
1265         avctx->i_tex_bits  = s->i_tex_bits;
1266         avctx->p_tex_bits  = s->p_tex_bits;
1267         avctx->i_count     = s->i_count;
1268         avctx->p_count     = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx
1269         avctx->skip_count  = s->skip_count;
1270
1271         MPV_frame_end(s);
1272
1273         if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1274             ff_mjpeg_encode_picture_trailer(s);
1275
1276         if(avctx->rc_buffer_size){
1277             RateControlContext *rcc= &s->rc_context;
1278             int max_size= rcc->buffer_index * avctx->rc_max_available_vbv_use;
1279
1280             if(put_bits_count(&s->pb) > max_size && s->lambda < s->avctx->lmax){
1281                 s->next_lambda= FFMAX(s->lambda+1, s->lambda*(s->qscale+1) / s->qscale);
1282                 if(s->adaptive_quant){
1283                     int i;
1284                     for(i=0; i<s->mb_height*s->mb_stride; i++)
1285                         s->lambda_table[i]= FFMAX(s->lambda_table[i]+1, s->lambda_table[i]*(s->qscale+1) / s->qscale);
1286                 }
1287                 s->mb_skipped = 0;        //done in MPV_frame_start()
1288                 if(s->pict_type==AV_PICTURE_TYPE_P){ //done in encode_picture() so we must undo it
1289                     if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
1290                         s->no_rounding ^= 1;
1291                 }
1292                 if(s->pict_type!=AV_PICTURE_TYPE_B){
1293                     s->time_base= s->last_time_base;
1294                     s->last_non_b_time= s->time - s->pp_time;
1295                 }
1296 //                av_log(NULL, AV_LOG_ERROR, "R:%d ", s->next_lambda);
1297                 for(i=0; i<context_count; i++){
1298                     PutBitContext *pb= &s->thread_context[i]->pb;
1299                     init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1300                 }
1301                 goto vbv_retry;
1302             }
1303
1304             assert(s->avctx->rc_max_rate);
1305         }
1306
1307         if(s->flags&CODEC_FLAG_PASS1)
1308             ff_write_pass1_stats(s);
1309
1310         for(i=0; i<4; i++){
1311             s->current_picture_ptr->f.error[i]  = s->current_picture.f.error[i];
1312             avctx->error[i]                        += s->current_picture_ptr->f.error[i];
1313         }
1314
1315         if(s->flags&CODEC_FLAG_PASS1)
1316             assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits + avctx->i_tex_bits + avctx->p_tex_bits == put_bits_count(&s->pb));
1317         flush_put_bits(&s->pb);
1318         s->frame_bits  = put_bits_count(&s->pb);
1319
1320         stuffing_count= ff_vbv_update(s, s->frame_bits);
1321         if(stuffing_count){
1322             if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < stuffing_count + 50){
1323                 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1324                 return -1;
1325             }
1326
1327             switch(s->codec_id){
1328             case CODEC_ID_MPEG1VIDEO:
1329             case CODEC_ID_MPEG2VIDEO:
1330                 while(stuffing_count--){
1331                     put_bits(&s->pb, 8, 0);
1332                 }
1333             break;
1334             case CODEC_ID_MPEG4:
1335                 put_bits(&s->pb, 16, 0);
1336                 put_bits(&s->pb, 16, 0x1C3);
1337                 stuffing_count -= 4;
1338                 while(stuffing_count--){
1339                     put_bits(&s->pb, 8, 0xFF);
1340                 }
1341             break;
1342             default:
1343                 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1344             }
1345             flush_put_bits(&s->pb);
1346             s->frame_bits  = put_bits_count(&s->pb);
1347         }
1348
1349         /* update mpeg1/2 vbv_delay for CBR */
1350         if(s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate && s->out_format == FMT_MPEG1
1351            && 90000LL * (avctx->rc_buffer_size-1) <= s->avctx->rc_max_rate*0xFFFFLL){
1352             int vbv_delay, min_delay;
1353             double inbits = s->avctx->rc_max_rate*av_q2d(s->avctx->time_base);
1354             int    minbits= s->frame_bits - 8*(s->vbv_delay_ptr - s->pb.buf - 1);
1355             double bits   = s->rc_context.buffer_index + minbits - inbits;
1356
1357             if(bits<0)
1358                 av_log(s->avctx, AV_LOG_ERROR, "Internal error, negative bits\n");
1359
1360             assert(s->repeat_first_field==0);
1361
1362             vbv_delay=     bits * 90000                               / s->avctx->rc_max_rate;
1363             min_delay= (minbits * 90000LL + s->avctx->rc_max_rate - 1)/ s->avctx->rc_max_rate;
1364
1365             vbv_delay= FFMAX(vbv_delay, min_delay);
1366
1367             assert(vbv_delay < 0xFFFF);
1368
1369             s->vbv_delay_ptr[0] &= 0xF8;
1370             s->vbv_delay_ptr[0] |= vbv_delay>>13;
1371             s->vbv_delay_ptr[1]  = vbv_delay>>5;
1372             s->vbv_delay_ptr[2] &= 0x07;
1373             s->vbv_delay_ptr[2] |= vbv_delay<<3;
1374             avctx->vbv_delay = vbv_delay*300;
1375         }
1376         s->total_bits += s->frame_bits;
1377         avctx->frame_bits  = s->frame_bits;
1378     }else{
1379         assert((put_bits_ptr(&s->pb) == s->pb.buf));
1380         s->frame_bits=0;
1381     }
1382     assert((s->frame_bits&7)==0);
1383
1384     return s->frame_bits/8;
1385 }
1386
1387 static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
1388 {
1389     static const char tab[64]=
1390         {3,2,2,1,1,1,1,1,
1391          1,1,1,1,1,1,1,1,
1392          1,1,1,1,1,1,1,1,
1393          0,0,0,0,0,0,0,0,
1394          0,0,0,0,0,0,0,0,
1395          0,0,0,0,0,0,0,0,
1396          0,0,0,0,0,0,0,0,
1397          0,0,0,0,0,0,0,0};
1398     int score=0;
1399     int run=0;
1400     int i;
1401     DCTELEM *block= s->block[n];
1402     const int last_index= s->block_last_index[n];
1403     int skip_dc;
1404
1405     if(threshold<0){
1406         skip_dc=0;
1407         threshold= -threshold;
1408     }else
1409         skip_dc=1;
1410
1411     /* Are all we could set to zero already zero? */
1412     if(last_index<=skip_dc - 1) return;
1413
1414     for(i=0; i<=last_index; i++){
1415         const int j = s->intra_scantable.permutated[i];
1416         const int level = FFABS(block[j]);
1417         if(level==1){
1418             if(skip_dc && i==0) continue;
1419             score+= tab[run];
1420             run=0;
1421         }else if(level>1){
1422             return;
1423         }else{
1424             run++;
1425         }
1426     }
1427     if(score >= threshold) return;
1428     for(i=skip_dc; i<=last_index; i++){
1429         const int j = s->intra_scantable.permutated[i];
1430         block[j]=0;
1431     }
1432     if(block[0]) s->block_last_index[n]= 0;
1433     else         s->block_last_index[n]= -1;
1434 }
1435
1436 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index)
1437 {
1438     int i;
1439     const int maxlevel= s->max_qcoeff;
1440     const int minlevel= s->min_qcoeff;
1441     int overflow=0;
1442
1443     if(s->mb_intra){
1444         i=1; //skip clipping of intra dc
1445     }else
1446         i=0;
1447
1448     for(;i<=last_index; i++){
1449         const int j= s->intra_scantable.permutated[i];
1450         int level = block[j];
1451
1452         if     (level>maxlevel){
1453             level=maxlevel;
1454             overflow++;
1455         }else if(level<minlevel){
1456             level=minlevel;
1457             overflow++;
1458         }
1459
1460         block[j]= level;
1461     }
1462
1463     if(overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1464         av_log(s->avctx, AV_LOG_INFO, "warning, clipping %d dct coefficients to %d..%d\n", overflow, minlevel, maxlevel);
1465 }
1466
1467 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride){
1468     int x, y;
1469 //FIXME optimize
1470     for(y=0; y<8; y++){
1471         for(x=0; x<8; x++){
1472             int x2, y2;
1473             int sum=0;
1474             int sqr=0;
1475             int count=0;
1476
1477             for(y2= FFMAX(y-1, 0); y2 < FFMIN(8, y+2); y2++){
1478                 for(x2= FFMAX(x-1, 0); x2 < FFMIN(8, x+2); x2++){
1479                     int v= ptr[x2 + y2*stride];
1480                     sum += v;
1481                     sqr += v*v;
1482                     count++;
1483                 }
1484             }
1485             weight[x + 8*y]= (36*ff_sqrt(count*sqr - sum*sum)) / count;
1486         }
1487     }
1488 }
1489
1490 static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_count)
1491 {
1492     int16_t weight[8][64];
1493     DCTELEM orig[8][64];
1494     const int mb_x= s->mb_x;
1495     const int mb_y= s->mb_y;
1496     int i;
1497     int skip_dct[8];
1498     int dct_offset   = s->linesize*8; //default for progressive frames
1499     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1500     int wrap_y, wrap_c;
1501
1502     for(i=0; i<mb_block_count; i++) skip_dct[i]=s->skipdct;
1503
1504     if(s->adaptive_quant){
1505         const int last_qp= s->qscale;
1506         const int mb_xy= mb_x + mb_y*s->mb_stride;
1507
1508         s->lambda= s->lambda_table[mb_xy];
1509         update_qscale(s);
1510
1511         if(!(s->flags&CODEC_FLAG_QP_RD)){
1512             s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
1513             s->dquant= s->qscale - last_qp;
1514
1515             if(s->out_format==FMT_H263){
1516                 s->dquant= av_clip(s->dquant, -2, 2);
1517
1518                 if(s->codec_id==CODEC_ID_MPEG4){
1519                     if(!s->mb_intra){
1520                         if(s->pict_type == AV_PICTURE_TYPE_B){
1521                             if(s->dquant&1 || s->mv_dir&MV_DIRECT)
1522                                 s->dquant= 0;
1523                         }
1524                         if(s->mv_type==MV_TYPE_8X8)
1525                             s->dquant=0;
1526                     }
1527                 }
1528             }
1529         }
1530         ff_set_qscale(s, last_qp + s->dquant);
1531     }else if(s->flags&CODEC_FLAG_QP_RD)
1532         ff_set_qscale(s, s->qscale + s->dquant);
1533
1534     wrap_y = s->linesize;
1535     wrap_c = s->uvlinesize;
1536     ptr_y  = s->new_picture.f.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
1537     ptr_cb = s->new_picture.f.data[1] + (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1538     ptr_cr = s->new_picture.f.data[2] + (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1539
1540     if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
1541         uint8_t *ebuf= s->edge_emu_buffer + 32;
1542         s->dsp.emulated_edge_mc(ebuf            , ptr_y , wrap_y,16,16,mb_x*16,mb_y*16, s->width   , s->height);
1543         ptr_y= ebuf;
1544         s->dsp.emulated_edge_mc(ebuf+18*wrap_y  , ptr_cb, wrap_c, 8, mb_block_height, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
1545         ptr_cb= ebuf+18*wrap_y;
1546         s->dsp.emulated_edge_mc(ebuf+18*wrap_y+8, ptr_cr, wrap_c, 8, mb_block_height, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
1547         ptr_cr= ebuf+18*wrap_y+8;
1548     }
1549
1550     if (s->mb_intra) {
1551         if(s->flags&CODEC_FLAG_INTERLACED_DCT){
1552             int progressive_score, interlaced_score;
1553
1554             s->interlaced_dct=0;
1555             progressive_score= s->dsp.ildct_cmp[4](s, ptr_y           , NULL, wrap_y, 8)
1556                               +s->dsp.ildct_cmp[4](s, ptr_y + wrap_y*8, NULL, wrap_y, 8) - 400;
1557
1558             if(progressive_score > 0){
1559                 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y           , NULL, wrap_y*2, 8)
1560                                   +s->dsp.ildct_cmp[4](s, ptr_y + wrap_y  , NULL, wrap_y*2, 8);
1561                 if(progressive_score > interlaced_score){
1562                     s->interlaced_dct=1;
1563
1564                     dct_offset= wrap_y;
1565                     wrap_y<<=1;
1566                     if (s->chroma_format == CHROMA_422)
1567                         wrap_c<<=1;
1568                 }
1569             }
1570         }
1571
1572         s->dsp.get_pixels(s->block[0], ptr_y                 , wrap_y);
1573         s->dsp.get_pixels(s->block[1], ptr_y              + 8, wrap_y);
1574         s->dsp.get_pixels(s->block[2], ptr_y + dct_offset    , wrap_y);
1575         s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
1576
1577         if(s->flags&CODEC_FLAG_GRAY){
1578             skip_dct[4]= 1;
1579             skip_dct[5]= 1;
1580         }else{
1581             s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1582             s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1583             if(!s->chroma_y_shift){ /* 422 */
1584                 s->dsp.get_pixels(s->block[6], ptr_cb + (dct_offset>>1), wrap_c);
1585                 s->dsp.get_pixels(s->block[7], ptr_cr + (dct_offset>>1), wrap_c);
1586             }
1587         }
1588     }else{
1589         op_pixels_func (*op_pix)[4];
1590         qpel_mc_func (*op_qpix)[16];
1591         uint8_t *dest_y, *dest_cb, *dest_cr;
1592
1593         dest_y  = s->dest[0];
1594         dest_cb = s->dest[1];
1595         dest_cr = s->dest[2];
1596
1597         if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
1598             op_pix = s->dsp.put_pixels_tab;
1599             op_qpix= s->dsp.put_qpel_pixels_tab;
1600         }else{
1601             op_pix = s->dsp.put_no_rnd_pixels_tab;
1602             op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
1603         }
1604
1605         if (s->mv_dir & MV_DIR_FORWARD) {
1606             MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
1607             op_pix = s->dsp.avg_pixels_tab;
1608             op_qpix= s->dsp.avg_qpel_pixels_tab;
1609         }
1610         if (s->mv_dir & MV_DIR_BACKWARD) {
1611             MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
1612         }
1613
1614         if(s->flags&CODEC_FLAG_INTERLACED_DCT){
1615             int progressive_score, interlaced_score;
1616
1617             s->interlaced_dct=0;
1618             progressive_score= s->dsp.ildct_cmp[0](s, dest_y           , ptr_y           , wrap_y, 8)
1619                               +s->dsp.ildct_cmp[0](s, dest_y + wrap_y*8, ptr_y + wrap_y*8, wrap_y, 8) - 400;
1620
1621             if(s->avctx->ildct_cmp == FF_CMP_VSSE) progressive_score -= 400;
1622
1623             if(progressive_score>0){
1624                 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y           , ptr_y           , wrap_y*2, 8)
1625                                   +s->dsp.ildct_cmp[0](s, dest_y + wrap_y  , ptr_y + wrap_y  , wrap_y*2, 8);
1626
1627                 if(progressive_score > interlaced_score){
1628                     s->interlaced_dct=1;
1629
1630                     dct_offset= wrap_y;
1631                     wrap_y<<=1;
1632                     if (s->chroma_format == CHROMA_422)
1633                         wrap_c<<=1;
1634                 }
1635             }
1636         }
1637
1638         s->dsp.diff_pixels(s->block[0], ptr_y                 , dest_y                 , wrap_y);
1639         s->dsp.diff_pixels(s->block[1], ptr_y              + 8, dest_y              + 8, wrap_y);
1640         s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset    , dest_y + dct_offset    , wrap_y);
1641         s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y);
1642
1643         if(s->flags&CODEC_FLAG_GRAY){
1644             skip_dct[4]= 1;
1645             skip_dct[5]= 1;
1646         }else{
1647             s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1648             s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1649             if(!s->chroma_y_shift){ /* 422 */
1650                 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset>>1), dest_cb + (dct_offset>>1), wrap_c);
1651                 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset>>1), dest_cr + (dct_offset>>1), wrap_c);
1652             }
1653         }
1654         /* pre quantization */
1655         if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){
1656             //FIXME optimize
1657             if(s->dsp.sad[1](NULL, ptr_y               , dest_y               , wrap_y, 8) < 20*s->qscale) skip_dct[0]= 1;
1658             if(s->dsp.sad[1](NULL, ptr_y            + 8, dest_y            + 8, wrap_y, 8) < 20*s->qscale) skip_dct[1]= 1;
1659             if(s->dsp.sad[1](NULL, ptr_y +dct_offset   , dest_y +dct_offset   , wrap_y, 8) < 20*s->qscale) skip_dct[2]= 1;
1660             if(s->dsp.sad[1](NULL, ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y, 8) < 20*s->qscale) skip_dct[3]= 1;
1661             if(s->dsp.sad[1](NULL, ptr_cb              , dest_cb              , wrap_c, 8) < 20*s->qscale) skip_dct[4]= 1;
1662             if(s->dsp.sad[1](NULL, ptr_cr              , dest_cr              , wrap_c, 8) < 20*s->qscale) skip_dct[5]= 1;
1663             if(!s->chroma_y_shift){ /* 422 */
1664                 if(s->dsp.sad[1](NULL, ptr_cb +(dct_offset>>1), dest_cb +(dct_offset>>1), wrap_c, 8) < 20*s->qscale) skip_dct[6]= 1;
1665                 if(s->dsp.sad[1](NULL, ptr_cr +(dct_offset>>1), dest_cr +(dct_offset>>1), wrap_c, 8) < 20*s->qscale) skip_dct[7]= 1;
1666             }
1667         }
1668     }
1669
1670     if(s->avctx->quantizer_noise_shaping){
1671         if(!skip_dct[0]) get_visual_weight(weight[0], ptr_y                 , wrap_y);
1672         if(!skip_dct[1]) get_visual_weight(weight[1], ptr_y              + 8, wrap_y);
1673         if(!skip_dct[2]) get_visual_weight(weight[2], ptr_y + dct_offset    , wrap_y);
1674         if(!skip_dct[3]) get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1675         if(!skip_dct[4]) get_visual_weight(weight[4], ptr_cb                , wrap_c);
1676         if(!skip_dct[5]) get_visual_weight(weight[5], ptr_cr                , wrap_c);
1677         if(!s->chroma_y_shift){ /* 422 */
1678             if(!skip_dct[6]) get_visual_weight(weight[6], ptr_cb + (dct_offset>>1), wrap_c);
1679             if(!skip_dct[7]) get_visual_weight(weight[7], ptr_cr + (dct_offset>>1), wrap_c);
1680         }
1681         memcpy(orig[0], s->block[0], sizeof(DCTELEM)*64*mb_block_count);
1682     }
1683
1684     /* DCT & quantize */
1685     assert(s->out_format!=FMT_MJPEG || s->qscale==8);
1686     {
1687         for(i=0;i<mb_block_count;i++) {
1688             if(!skip_dct[i]){
1689                 int overflow;
1690                 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1691             // FIXME we could decide to change to quantizer instead of clipping
1692             // JS: I don't think that would be a good idea it could lower quality instead
1693             //     of improve it. Just INTRADC clipping deserves changes in quantizer
1694                 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
1695             }else
1696                 s->block_last_index[i]= -1;
1697         }
1698         if(s->avctx->quantizer_noise_shaping){
1699             for(i=0;i<mb_block_count;i++) {
1700                 if(!skip_dct[i]){
1701                     s->block_last_index[i] = dct_quantize_refine(s, s->block[i], weight[i], orig[i], i, s->qscale);
1702                 }
1703             }
1704         }
1705
1706         if(s->luma_elim_threshold && !s->mb_intra)
1707             for(i=0; i<4; i++)
1708                 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
1709         if(s->chroma_elim_threshold && !s->mb_intra)
1710             for(i=4; i<mb_block_count; i++)
1711                 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
1712
1713         if(s->flags & CODEC_FLAG_CBP_RD){
1714             for(i=0;i<mb_block_count;i++) {
1715                 if(s->block_last_index[i] == -1)
1716                     s->coded_score[i]= INT_MAX/256;
1717             }
1718         }
1719     }
1720
1721     if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){
1722         s->block_last_index[4]=
1723         s->block_last_index[5]= 0;
1724         s->block[4][0]=
1725         s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale;
1726     }
1727
1728     //non c quantize code returns incorrect block_last_index FIXME
1729     if(s->alternate_scan && s->dct_quantize != dct_quantize_c){
1730         for(i=0; i<mb_block_count; i++){
1731             int j;
1732             if(s->block_last_index[i]>0){
1733                 for(j=63; j>0; j--){
1734                     if(s->block[i][ s->intra_scantable.permutated[j] ]) break;
1735                 }
1736                 s->block_last_index[i]= j;
1737             }
1738         }
1739     }
1740
1741     /* huffman encode */
1742     switch(s->codec_id){ //FIXME funct ptr could be slightly faster
1743     case CODEC_ID_MPEG1VIDEO:
1744     case CODEC_ID_MPEG2VIDEO:
1745         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1746             mpeg1_encode_mb(s, s->block, motion_x, motion_y);
1747         break;
1748     case CODEC_ID_MPEG4:
1749         if (CONFIG_MPEG4_ENCODER)
1750             mpeg4_encode_mb(s, s->block, motion_x, motion_y);
1751         break;
1752     case CODEC_ID_MSMPEG4V2:
1753     case CODEC_ID_MSMPEG4V3:
1754     case CODEC_ID_WMV1:
1755         if (CONFIG_MSMPEG4_ENCODER)
1756             msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
1757         break;
1758     case CODEC_ID_WMV2:
1759         if (CONFIG_WMV2_ENCODER)
1760             ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
1761         break;
1762     case CODEC_ID_H261:
1763         if (CONFIG_H261_ENCODER)
1764             ff_h261_encode_mb(s, s->block, motion_x, motion_y);
1765         break;
1766     case CODEC_ID_H263:
1767     case CODEC_ID_H263P:
1768     case CODEC_ID_FLV1:
1769     case CODEC_ID_RV10:
1770     case CODEC_ID_RV20:
1771         if (CONFIG_H263_ENCODER)
1772             h263_encode_mb(s, s->block, motion_x, motion_y);
1773         break;
1774     case CODEC_ID_MJPEG:
1775         if (CONFIG_MJPEG_ENCODER)
1776             ff_mjpeg_encode_mb(s, s->block);
1777         break;
1778     default:
1779         assert(0);
1780     }
1781 }
1782
1783 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
1784 {
1785     if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y,  8, 6);
1786     else                                encode_mb_internal(s, motion_x, motion_y, 16, 8);
1787 }
1788
1789 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
1790     int i;
1791
1792     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
1793
1794     /* mpeg1 */
1795     d->mb_skip_run= s->mb_skip_run;
1796     for(i=0; i<3; i++)
1797         d->last_dc[i]= s->last_dc[i];
1798
1799     /* statistics */
1800     d->mv_bits= s->mv_bits;
1801     d->i_tex_bits= s->i_tex_bits;
1802     d->p_tex_bits= s->p_tex_bits;
1803     d->i_count= s->i_count;
1804     d->f_count= s->f_count;
1805     d->b_count= s->b_count;
1806     d->skip_count= s->skip_count;
1807     d->misc_bits= s->misc_bits;
1808     d->last_bits= 0;
1809
1810     d->mb_skipped= 0;
1811     d->qscale= s->qscale;
1812     d->dquant= s->dquant;
1813
1814     d->esc3_level_length= s->esc3_level_length;
1815 }
1816
1817 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
1818     int i;
1819
1820     memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
1821     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
1822
1823     /* mpeg1 */
1824     d->mb_skip_run= s->mb_skip_run;
1825     for(i=0; i<3; i++)
1826         d->last_dc[i]= s->last_dc[i];
1827
1828     /* statistics */
1829     d->mv_bits= s->mv_bits;
1830     d->i_tex_bits= s->i_tex_bits;
1831     d->p_tex_bits= s->p_tex_bits;
1832     d->i_count= s->i_count;
1833     d->f_count= s->f_count;
1834     d->b_count= s->b_count;
1835     d->skip_count= s->skip_count;
1836     d->misc_bits= s->misc_bits;
1837
1838     d->mb_intra= s->mb_intra;
1839     d->mb_skipped= s->mb_skipped;
1840     d->mv_type= s->mv_type;
1841     d->mv_dir= s->mv_dir;
1842     d->pb= s->pb;
1843     if(s->data_partitioning){
1844         d->pb2= s->pb2;
1845         d->tex_pb= s->tex_pb;
1846     }
1847     d->block= s->block;
1848     for(i=0; i<8; i++)
1849         d->block_last_index[i]= s->block_last_index[i];
1850     d->interlaced_dct= s->interlaced_dct;
1851     d->qscale= s->qscale;
1852
1853     d->esc3_level_length= s->esc3_level_length;
1854 }
1855
1856 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
1857                            PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
1858                            int *dmin, int *next_block, int motion_x, int motion_y)
1859 {
1860     int score;
1861     uint8_t *dest_backup[3];
1862
1863     copy_context_before_encode(s, backup, type);
1864
1865     s->block= s->blocks[*next_block];
1866     s->pb= pb[*next_block];
1867     if(s->data_partitioning){
1868         s->pb2   = pb2   [*next_block];
1869         s->tex_pb= tex_pb[*next_block];
1870     }
1871
1872     if(*next_block){
1873         memcpy(dest_backup, s->dest, sizeof(s->dest));
1874         s->dest[0] = s->rd_scratchpad;
1875         s->dest[1] = s->rd_scratchpad + 16*s->linesize;
1876         s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
1877         assert(s->linesize >= 32); //FIXME
1878     }
1879
1880     encode_mb(s, motion_x, motion_y);
1881
1882     score= put_bits_count(&s->pb);
1883     if(s->data_partitioning){
1884         score+= put_bits_count(&s->pb2);
1885         score+= put_bits_count(&s->tex_pb);
1886     }
1887
1888     if(s->avctx->mb_decision == FF_MB_DECISION_RD){
1889         MPV_decode_mb(s, s->block);
1890
1891         score *= s->lambda2;
1892         score += sse_mb(s) << FF_LAMBDA_SHIFT;
1893     }
1894
1895     if(*next_block){
1896         memcpy(s->dest, dest_backup, sizeof(s->dest));
1897     }
1898
1899     if(score<*dmin){
1900         *dmin= score;
1901         *next_block^=1;
1902
1903         copy_context_after_encode(best, s, type);
1904     }
1905 }
1906
1907 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
1908     uint32_t *sq = ff_squareTbl + 256;
1909     int acc=0;
1910     int x,y;
1911
1912     if(w==16 && h==16)
1913         return s->dsp.sse[0](NULL, src1, src2, stride, 16);
1914     else if(w==8 && h==8)
1915         return s->dsp.sse[1](NULL, src1, src2, stride, 8);
1916
1917     for(y=0; y<h; y++){
1918         for(x=0; x<w; x++){
1919             acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
1920         }
1921     }
1922
1923     assert(acc>=0);
1924
1925     return acc;
1926 }
1927
1928 static int sse_mb(MpegEncContext *s){
1929     int w= 16;
1930     int h= 16;
1931
1932     if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
1933     if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
1934
1935     if(w==16 && h==16)
1936       if(s->avctx->mb_cmp == FF_CMP_NSSE){
1937         return  s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
1938                +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
1939                +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
1940       }else{
1941         return  s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
1942                +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
1943                +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
1944       }
1945     else
1946         return  sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
1947                +sse(s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
1948                +sse(s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
1949 }
1950
1951 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
1952     MpegEncContext *s= *(void**)arg;
1953
1954
1955     s->me.pre_pass=1;
1956     s->me.dia_size= s->avctx->pre_dia_size;
1957     s->first_slice_line=1;
1958     for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
1959         for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
1960             ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
1961         }
1962         s->first_slice_line=0;
1963     }
1964
1965     s->me.pre_pass=0;
1966
1967     return 0;
1968 }
1969
1970 static int estimate_motion_thread(AVCodecContext *c, void *arg){
1971     MpegEncContext *s= *(void**)arg;
1972
1973     ff_check_alignment();
1974
1975     s->me.dia_size= s->avctx->dia_size;
1976     s->first_slice_line=1;
1977     for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
1978         s->mb_x=0; //for block init below
1979         ff_init_block_index(s);
1980         for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
1981             s->block_index[0]+=2;
1982             s->block_index[1]+=2;
1983             s->block_index[2]+=2;
1984             s->block_index[3]+=2;
1985
1986             /* compute motion vector & mb_type and store in context */
1987             if(s->pict_type==AV_PICTURE_TYPE_B)
1988                 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
1989             else
1990                 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
1991         }
1992         s->first_slice_line=0;
1993     }
1994     return 0;
1995 }
1996
1997 static int mb_var_thread(AVCodecContext *c, void *arg){
1998     MpegEncContext *s= *(void**)arg;
1999     int mb_x, mb_y;
2000
2001     ff_check_alignment();
2002
2003     for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2004         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2005             int xx = mb_x * 16;
2006             int yy = mb_y * 16;
2007             uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2008             int varc;
2009             int sum = s->dsp.pix_sum(pix, s->linesize);
2010
2011             varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8;
2012
2013             s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2014             s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2015             s->me.mb_var_sum_temp    += varc;
2016         }
2017     }
2018     return 0;
2019 }
2020
2021 static void write_slice_end(MpegEncContext *s){
2022     if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4){
2023         if(s->partitioned_frame){
2024             ff_mpeg4_merge_partitions(s);
2025         }
2026
2027         ff_mpeg4_stuffing(&s->pb);
2028     }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2029         ff_mjpeg_encode_stuffing(&s->pb);
2030     }
2031
2032     align_put_bits(&s->pb);
2033     flush_put_bits(&s->pb);
2034
2035     if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2036         s->misc_bits+= get_bits_diff(s);
2037 }
2038
2039 static int encode_thread(AVCodecContext *c, void *arg){
2040     MpegEncContext *s= *(void**)arg;
2041     int mb_x, mb_y, pdif = 0;
2042     int chr_h= 16>>s->chroma_y_shift;
2043     int i, j;
2044     MpegEncContext best_s, backup_s;
2045     uint8_t bit_buf[2][MAX_MB_BYTES];
2046     uint8_t bit_buf2[2][MAX_MB_BYTES];
2047     uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2048     PutBitContext pb[2], pb2[2], tex_pb[2];
2049 //printf("%d->%d\n", s->resync_mb_y, s->end_mb_y);
2050
2051     ff_check_alignment();
2052
2053     for(i=0; i<2; i++){
2054         init_put_bits(&pb    [i], bit_buf    [i], MAX_MB_BYTES);
2055         init_put_bits(&pb2   [i], bit_buf2   [i], MAX_MB_BYTES);
2056         init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2057     }
2058
2059     s->last_bits= put_bits_count(&s->pb);
2060     s->mv_bits=0;
2061     s->misc_bits=0;
2062     s->i_tex_bits=0;
2063     s->p_tex_bits=0;
2064     s->i_count=0;
2065     s->f_count=0;
2066     s->b_count=0;
2067     s->skip_count=0;
2068
2069     for(i=0; i<3; i++){
2070         /* init last dc values */
2071         /* note: quant matrix value (8) is implied here */
2072         s->last_dc[i] = 128 << s->intra_dc_precision;
2073
2074         s->current_picture.f.error[i] = 0;
2075     }
2076     s->mb_skip_run = 0;
2077     memset(s->last_mv, 0, sizeof(s->last_mv));
2078
2079     s->last_mv_dir = 0;
2080
2081     switch(s->codec_id){
2082     case CODEC_ID_H263:
2083     case CODEC_ID_H263P:
2084     case CODEC_ID_FLV1:
2085         if (CONFIG_H263_ENCODER)
2086             s->gob_index = ff_h263_get_gob_height(s);
2087         break;
2088     case CODEC_ID_MPEG4:
2089         if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2090             ff_mpeg4_init_partitions(s);
2091         break;
2092     }
2093
2094     s->resync_mb_x=0;
2095     s->resync_mb_y=0;
2096     s->first_slice_line = 1;
2097     s->ptr_lastgob = s->pb.buf;
2098     for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2099 //    printf("row %d at %X\n", s->mb_y, (int)s);
2100         s->mb_x=0;
2101         s->mb_y= mb_y;
2102
2103         ff_set_qscale(s, s->qscale);
2104         ff_init_block_index(s);
2105
2106         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2107             int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2108             int mb_type= s->mb_type[xy];
2109 //            int d;
2110             int dmin= INT_MAX;
2111             int dir;
2112
2113             if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2114                 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2115                 return -1;
2116             }
2117             if(s->data_partitioning){
2118                 if(   s->pb2   .buf_end - s->pb2   .buf - (put_bits_count(&s->    pb2)>>3) < MAX_MB_BYTES
2119                    || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2120                     av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2121                     return -1;
2122                 }
2123             }
2124
2125             s->mb_x = mb_x;
2126             s->mb_y = mb_y;  // moved into loop, can get changed by H.261
2127             ff_update_block_index(s);
2128
2129             if(CONFIG_H261_ENCODER && s->codec_id == CODEC_ID_H261){
2130                 ff_h261_reorder_mb_index(s);
2131                 xy= s->mb_y*s->mb_stride + s->mb_x;
2132                 mb_type= s->mb_type[xy];
2133             }
2134
2135             /* write gob / video packet header  */
2136             if(s->rtp_mode){
2137                 int current_packet_size, is_gob_start;
2138
2139                 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2140
2141                 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2142
2143                 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2144
2145                 switch(s->codec_id){
2146                 case CODEC_ID_H263:
2147                 case CODEC_ID_H263P:
2148                     if(!s->h263_slice_structured)
2149                         if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2150                     break;
2151                 case CODEC_ID_MPEG2VIDEO:
2152                     if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2153                 case CODEC_ID_MPEG1VIDEO:
2154                     if(s->mb_skip_run) is_gob_start=0;
2155                     break;
2156                 }
2157
2158                 if(is_gob_start){
2159                     if(s->start_mb_y != mb_y || mb_x!=0){
2160                         write_slice_end(s);
2161
2162                         if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame){
2163                             ff_mpeg4_init_partitions(s);
2164                         }
2165                     }
2166
2167                     assert((put_bits_count(&s->pb)&7) == 0);
2168                     current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2169
2170                     if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
2171                         int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2172                         int d= 100 / s->avctx->error_rate;
2173                         if(r % d == 0){
2174                             current_packet_size=0;
2175                             s->pb.buf_ptr= s->ptr_lastgob;
2176                             assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2177                         }
2178                     }
2179
2180                     if (s->avctx->rtp_callback){
2181                         int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2182                         s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2183                     }
2184
2185                     switch(s->codec_id){
2186                     case CODEC_ID_MPEG4:
2187                         if (CONFIG_MPEG4_ENCODER) {
2188                             ff_mpeg4_encode_video_packet_header(s);
2189                             ff_mpeg4_clean_buffers(s);
2190                         }
2191                     break;
2192                     case CODEC_ID_MPEG1VIDEO:
2193                     case CODEC_ID_MPEG2VIDEO:
2194                         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2195                             ff_mpeg1_encode_slice_header(s);
2196                             ff_mpeg1_clean_buffers(s);
2197                         }
2198                     break;
2199                     case CODEC_ID_H263:
2200                     case CODEC_ID_H263P:
2201                         if (CONFIG_H263_ENCODER)
2202                             h263_encode_gob_header(s, mb_y);
2203                     break;
2204                     }
2205
2206                     if(s->flags&CODEC_FLAG_PASS1){
2207                         int bits= put_bits_count(&s->pb);
2208                         s->misc_bits+= bits - s->last_bits;
2209                         s->last_bits= bits;
2210                     }
2211
2212                     s->ptr_lastgob += current_packet_size;
2213                     s->first_slice_line=1;
2214                     s->resync_mb_x=mb_x;
2215                     s->resync_mb_y=mb_y;
2216                 }
2217             }
2218
2219             if(  (s->resync_mb_x   == s->mb_x)
2220                && s->resync_mb_y+1 == s->mb_y){
2221                 s->first_slice_line=0;
2222             }
2223
2224             s->mb_skipped=0;
2225             s->dquant=0; //only for QP_RD
2226
2227             if(mb_type & (mb_type-1) || (s->flags & CODEC_FLAG_QP_RD)){ // more than 1 MB type possible or CODEC_FLAG_QP_RD
2228                 int next_block=0;
2229                 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2230
2231                 copy_context_before_encode(&backup_s, s, -1);
2232                 backup_s.pb= s->pb;
2233                 best_s.data_partitioning= s->data_partitioning;
2234                 best_s.partitioned_frame= s->partitioned_frame;
2235                 if(s->data_partitioning){
2236                     backup_s.pb2= s->pb2;
2237                     backup_s.tex_pb= s->tex_pb;
2238                 }
2239
2240                 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2241                     s->mv_dir = MV_DIR_FORWARD;
2242                     s->mv_type = MV_TYPE_16X16;
2243                     s->mb_intra= 0;
2244                     s->mv[0][0][0] = s->p_mv_table[xy][0];
2245                     s->mv[0][0][1] = s->p_mv_table[xy][1];
2246                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2247                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2248                 }
2249                 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2250                     s->mv_dir = MV_DIR_FORWARD;
2251                     s->mv_type = MV_TYPE_FIELD;
2252                     s->mb_intra= 0;
2253                     for(i=0; i<2; i++){
2254                         j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2255                         s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2256                         s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2257                     }
2258                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2259                                  &dmin, &next_block, 0, 0);
2260                 }
2261                 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2262                     s->mv_dir = MV_DIR_FORWARD;
2263                     s->mv_type = MV_TYPE_16X16;
2264                     s->mb_intra= 0;
2265                     s->mv[0][0][0] = 0;
2266                     s->mv[0][0][1] = 0;
2267                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2268                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2269                 }
2270                 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2271                     s->mv_dir = MV_DIR_FORWARD;
2272                     s->mv_type = MV_TYPE_8X8;
2273                     s->mb_intra= 0;
2274                     for(i=0; i<4; i++){
2275                         s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
2276                         s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
2277                     }
2278                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2279                                  &dmin, &next_block, 0, 0);
2280                 }
2281                 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2282                     s->mv_dir = MV_DIR_FORWARD;
2283                     s->mv_type = MV_TYPE_16X16;
2284                     s->mb_intra= 0;
2285                     s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2286                     s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2287                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2288                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2289                 }
2290                 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2291                     s->mv_dir = MV_DIR_BACKWARD;
2292                     s->mv_type = MV_TYPE_16X16;
2293                     s->mb_intra= 0;
2294                     s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2295                     s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2296                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2297                                  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2298                 }
2299                 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2300                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2301                     s->mv_type = MV_TYPE_16X16;
2302                     s->mb_intra= 0;
2303                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2304                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2305                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2306                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2307                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2308                                  &dmin, &next_block, 0, 0);
2309                 }
2310                 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2311                     s->mv_dir = MV_DIR_FORWARD;
2312                     s->mv_type = MV_TYPE_FIELD;
2313                     s->mb_intra= 0;
2314                     for(i=0; i<2; i++){
2315                         j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2316                         s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2317                         s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2318                     }
2319                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2320                                  &dmin, &next_block, 0, 0);
2321                 }
2322                 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2323                     s->mv_dir = MV_DIR_BACKWARD;
2324                     s->mv_type = MV_TYPE_FIELD;
2325                     s->mb_intra= 0;
2326                     for(i=0; i<2; i++){
2327                         j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2328                         s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2329                         s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2330                     }
2331                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2332                                  &dmin, &next_block, 0, 0);
2333                 }
2334                 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2335                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2336                     s->mv_type = MV_TYPE_FIELD;
2337                     s->mb_intra= 0;
2338                     for(dir=0; dir<2; dir++){
2339                         for(i=0; i<2; i++){
2340                             j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2341                             s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2342                             s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2343                         }
2344                     }
2345                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2346                                  &dmin, &next_block, 0, 0);
2347                 }
2348                 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2349                     s->mv_dir = 0;
2350                     s->mv_type = MV_TYPE_16X16;
2351                     s->mb_intra= 1;
2352                     s->mv[0][0][0] = 0;
2353                     s->mv[0][0][1] = 0;
2354                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2355                                  &dmin, &next_block, 0, 0);
2356                     if(s->h263_pred || s->h263_aic){
2357                         if(best_s.mb_intra)
2358                             s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2359                         else
2360                             ff_clean_intra_table_entries(s); //old mode?
2361                     }
2362                 }
2363
2364                 if((s->flags & CODEC_FLAG_QP_RD) && dmin < INT_MAX){
2365                     if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2366                         const int last_qp= backup_s.qscale;
2367                         int qpi, qp, dc[6];
2368                         DCTELEM ac[6][16];
2369                         const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2370                         static const int dquant_tab[4]={-1,1,-2,2};
2371
2372                         assert(backup_s.dquant == 0);
2373
2374                         //FIXME intra
2375                         s->mv_dir= best_s.mv_dir;
2376                         s->mv_type = MV_TYPE_16X16;
2377                         s->mb_intra= best_s.mb_intra;
2378                         s->mv[0][0][0] = best_s.mv[0][0][0];
2379                         s->mv[0][0][1] = best_s.mv[0][0][1];
2380                         s->mv[1][0][0] = best_s.mv[1][0][0];
2381                         s->mv[1][0][1] = best_s.mv[1][0][1];
2382
2383                         qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2384                         for(; qpi<4; qpi++){
2385                             int dquant= dquant_tab[qpi];
2386                             qp= last_qp + dquant;
2387                             if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2388                                 continue;
2389                             backup_s.dquant= dquant;
2390                             if(s->mb_intra && s->dc_val[0]){
2391                                 for(i=0; i<6; i++){
2392                                     dc[i]= s->dc_val[0][ s->block_index[i] ];
2393                                     memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16);
2394                                 }
2395                             }
2396
2397                             encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2398                                          &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2399                             if(best_s.qscale != qp){
2400                                 if(s->mb_intra && s->dc_val[0]){
2401                                     for(i=0; i<6; i++){
2402                                         s->dc_val[0][ s->block_index[i] ]= dc[i];
2403                                         memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16);
2404                                     }
2405                                 }
2406                             }
2407                         }
2408                     }
2409                 }
2410                 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2411                     int mx= s->b_direct_mv_table[xy][0];
2412                     int my= s->b_direct_mv_table[xy][1];
2413
2414                     backup_s.dquant = 0;
2415                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2416                     s->mb_intra= 0;
2417                     ff_mpeg4_set_direct_mv(s, mx, my);
2418                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2419                                  &dmin, &next_block, mx, my);
2420                 }
2421                 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2422                     backup_s.dquant = 0;
2423                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2424                     s->mb_intra= 0;
2425                     ff_mpeg4_set_direct_mv(s, 0, 0);
2426                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2427                                  &dmin, &next_block, 0, 0);
2428                 }
2429                 if(!best_s.mb_intra && s->flags2&CODEC_FLAG2_SKIP_RD){
2430                     int coded=0;
2431                     for(i=0; i<6; i++)
2432                         coded |= s->block_last_index[i];
2433                     if(coded){
2434                         int mx,my;
2435                         memcpy(s->mv, best_s.mv, sizeof(s->mv));
2436                         if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2437                             mx=my=0; //FIXME find the one we actually used
2438                             ff_mpeg4_set_direct_mv(s, mx, my);
2439                         }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2440                             mx= s->mv[1][0][0];
2441                             my= s->mv[1][0][1];
2442                         }else{
2443                             mx= s->mv[0][0][0];
2444                             my= s->mv[0][0][1];
2445                         }
2446
2447                         s->mv_dir= best_s.mv_dir;
2448                         s->mv_type = best_s.mv_type;
2449                         s->mb_intra= 0;
2450 /*                        s->mv[0][0][0] = best_s.mv[0][0][0];
2451                         s->mv[0][0][1] = best_s.mv[0][0][1];
2452                         s->mv[1][0][0] = best_s.mv[1][0][0];
2453                         s->mv[1][0][1] = best_s.mv[1][0][1];*/
2454                         backup_s.dquant= 0;
2455                         s->skipdct=1;
2456                         encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2457                                         &dmin, &next_block, mx, my);
2458                         s->skipdct=0;
2459                     }
2460                 }
2461
2462                 s->current_picture.f.qscale_table[xy] = best_s.qscale;
2463
2464                 copy_context_after_encode(s, &best_s, -1);
2465
2466                 pb_bits_count= put_bits_count(&s->pb);
2467                 flush_put_bits(&s->pb);
2468                 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2469                 s->pb= backup_s.pb;
2470
2471                 if(s->data_partitioning){
2472                     pb2_bits_count= put_bits_count(&s->pb2);
2473                     flush_put_bits(&s->pb2);
2474                     ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2475                     s->pb2= backup_s.pb2;
2476
2477                     tex_pb_bits_count= put_bits_count(&s->tex_pb);
2478                     flush_put_bits(&s->tex_pb);
2479                     ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2480                     s->tex_pb= backup_s.tex_pb;
2481                 }
2482                 s->last_bits= put_bits_count(&s->pb);
2483
2484                 if (CONFIG_H263_ENCODER &&
2485                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2486                     ff_h263_update_motion_val(s);
2487
2488                 if(next_block==0){ //FIXME 16 vs linesize16
2489                     s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad                     , s->linesize  ,16);
2490                     s->dsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize    , s->uvlinesize, 8);
2491                     s->dsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2492                 }
2493
2494                 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2495                     MPV_decode_mb(s, s->block);
2496             } else {
2497                 int motion_x = 0, motion_y = 0;
2498                 s->mv_type=MV_TYPE_16X16;
2499                 // only one MB-Type possible
2500
2501                 switch(mb_type){
2502                 case CANDIDATE_MB_TYPE_INTRA:
2503                     s->mv_dir = 0;
2504                     s->mb_intra= 1;
2505                     motion_x= s->mv[0][0][0] = 0;
2506                     motion_y= s->mv[0][0][1] = 0;
2507                     break;
2508                 case CANDIDATE_MB_TYPE_INTER:
2509                     s->mv_dir = MV_DIR_FORWARD;
2510                     s->mb_intra= 0;
2511                     motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2512                     motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2513                     break;
2514                 case CANDIDATE_MB_TYPE_INTER_I:
2515                     s->mv_dir = MV_DIR_FORWARD;
2516                     s->mv_type = MV_TYPE_FIELD;
2517                     s->mb_intra= 0;
2518                     for(i=0; i<2; i++){
2519                         j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2520                         s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2521                         s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2522                     }
2523                     break;
2524                 case CANDIDATE_MB_TYPE_INTER4V:
2525                     s->mv_dir = MV_DIR_FORWARD;
2526                     s->mv_type = MV_TYPE_8X8;
2527                     s->mb_intra= 0;
2528                     for(i=0; i<4; i++){
2529                         s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
2530                         s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
2531                     }
2532                     break;
2533                 case CANDIDATE_MB_TYPE_DIRECT:
2534                     if (CONFIG_MPEG4_ENCODER) {
2535                         s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2536                         s->mb_intra= 0;
2537                         motion_x=s->b_direct_mv_table[xy][0];
2538                         motion_y=s->b_direct_mv_table[xy][1];
2539                         ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2540                     }
2541                     break;
2542                 case CANDIDATE_MB_TYPE_DIRECT0:
2543                     if (CONFIG_MPEG4_ENCODER) {
2544                         s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2545                         s->mb_intra= 0;
2546                         ff_mpeg4_set_direct_mv(s, 0, 0);
2547                     }
2548                     break;
2549                 case CANDIDATE_MB_TYPE_BIDIR:
2550                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2551                     s->mb_intra= 0;
2552                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2553                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2554                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2555                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2556                     break;
2557                 case CANDIDATE_MB_TYPE_BACKWARD:
2558                     s->mv_dir = MV_DIR_BACKWARD;
2559                     s->mb_intra= 0;
2560                     motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2561                     motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2562                     break;
2563                 case CANDIDATE_MB_TYPE_FORWARD:
2564                     s->mv_dir = MV_DIR_FORWARD;
2565                     s->mb_intra= 0;
2566                     motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2567                     motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2568 //                    printf(" %d %d ", motion_x, motion_y);
2569                     break;
2570                 case CANDIDATE_MB_TYPE_FORWARD_I:
2571                     s->mv_dir = MV_DIR_FORWARD;
2572                     s->mv_type = MV_TYPE_FIELD;
2573                     s->mb_intra= 0;
2574                     for(i=0; i<2; i++){
2575                         j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2576                         s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2577                         s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2578                     }
2579                     break;
2580                 case CANDIDATE_MB_TYPE_BACKWARD_I:
2581                     s->mv_dir = MV_DIR_BACKWARD;
2582                     s->mv_type = MV_TYPE_FIELD;
2583                     s->mb_intra= 0;
2584                     for(i=0; i<2; i++){
2585                         j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2586                         s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2587                         s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2588                     }
2589                     break;
2590                 case CANDIDATE_MB_TYPE_BIDIR_I:
2591                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2592                     s->mv_type = MV_TYPE_FIELD;
2593                     s->mb_intra= 0;
2594                     for(dir=0; dir<2; dir++){
2595                         for(i=0; i<2; i++){
2596                             j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2597                             s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2598                             s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2599                         }
2600                     }
2601                     break;
2602                 default:
2603                     av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2604                 }
2605
2606                 encode_mb(s, motion_x, motion_y);
2607
2608                 // RAL: Update last macroblock type
2609                 s->last_mv_dir = s->mv_dir;
2610
2611                 if (CONFIG_H263_ENCODER &&
2612                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2613                     ff_h263_update_motion_val(s);
2614
2615                 MPV_decode_mb(s, s->block);
2616             }
2617
2618             /* clean the MV table in IPS frames for direct mode in B frames */
2619             if(s->mb_intra /* && I,P,S_TYPE */){
2620                 s->p_mv_table[xy][0]=0;
2621                 s->p_mv_table[xy][1]=0;
2622             }
2623
2624             if(s->flags&CODEC_FLAG_PSNR){
2625                 int w= 16;
2626                 int h= 16;
2627
2628                 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2629                 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2630
2631                 s->current_picture.f.error[0] += sse(
2632                     s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2633                     s->dest[0], w, h, s->linesize);
2634                 s->current_picture.f.error[1] += sse(
2635                     s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
2636                     s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2637                 s->current_picture.f.error[2] += sse(
2638                     s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
2639                     s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2640             }
2641             if(s->loop_filter){
2642                 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
2643                     ff_h263_loop_filter(s);
2644             }
2645 //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, put_bits_count(&s->pb));
2646         }
2647     }
2648
2649     //not beautiful here but we must write it before flushing so it has to be here
2650     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
2651         msmpeg4_encode_ext_header(s);
2652
2653     write_slice_end(s);
2654
2655     /* Send the last GOB if RTP */
2656     if (s->avctx->rtp_callback) {
2657         int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
2658         pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
2659         /* Call the RTP callback to send the last GOB */
2660         emms_c();
2661         s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
2662     }
2663
2664     return 0;
2665 }
2666
2667 #define MERGE(field) dst->field += src->field; src->field=0
2668 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
2669     MERGE(me.scene_change_score);
2670     MERGE(me.mc_mb_var_sum_temp);
2671     MERGE(me.mb_var_sum_temp);
2672 }
2673
2674 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
2675     int i;
2676
2677     MERGE(dct_count[0]); //note, the other dct vars are not part of the context
2678     MERGE(dct_count[1]);
2679     MERGE(mv_bits);
2680     MERGE(i_tex_bits);
2681     MERGE(p_tex_bits);
2682     MERGE(i_count);
2683     MERGE(f_count);
2684     MERGE(b_count);
2685     MERGE(skip_count);
2686     MERGE(misc_bits);
2687     MERGE(error_count);
2688     MERGE(padding_bug_score);
2689     MERGE(current_picture.f.error[0]);
2690     MERGE(current_picture.f.error[1]);
2691     MERGE(current_picture.f.error[2]);
2692
2693     if(dst->avctx->noise_reduction){
2694         for(i=0; i<64; i++){
2695             MERGE(dct_error_sum[0][i]);
2696             MERGE(dct_error_sum[1][i]);
2697         }
2698     }
2699
2700     assert(put_bits_count(&src->pb) % 8 ==0);
2701     assert(put_bits_count(&dst->pb) % 8 ==0);
2702     ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
2703     flush_put_bits(&dst->pb);
2704 }
2705
2706 static int estimate_qp(MpegEncContext *s, int dry_run){
2707     if (s->next_lambda){
2708         s->current_picture_ptr->f.quality =
2709         s->current_picture.f.quality = s->next_lambda;
2710         if(!dry_run) s->next_lambda= 0;
2711     } else if (!s->fixed_qscale) {
2712         s->current_picture_ptr->f.quality =
2713         s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
2714         if (s->current_picture.f.quality < 0)
2715             return -1;
2716     }
2717
2718     if(s->adaptive_quant){
2719         switch(s->codec_id){
2720         case CODEC_ID_MPEG4:
2721             if (CONFIG_MPEG4_ENCODER)
2722                 ff_clean_mpeg4_qscales(s);
2723             break;
2724         case CODEC_ID_H263:
2725         case CODEC_ID_H263P:
2726         case CODEC_ID_FLV1:
2727             if (CONFIG_H263_ENCODER)
2728                 ff_clean_h263_qscales(s);
2729             break;
2730         default:
2731             ff_init_qscale_tab(s);
2732         }
2733
2734         s->lambda= s->lambda_table[0];
2735         //FIXME broken
2736     }else
2737         s->lambda = s->current_picture.f.quality;
2738 //printf("%d %d\n", s->avctx->global_quality, s->current_picture.quality);
2739     update_qscale(s);
2740     return 0;
2741 }
2742
2743 /* must be called before writing the header */
2744 static void set_frame_distances(MpegEncContext * s){
2745     assert(s->current_picture_ptr->pts != AV_NOPTS_VALUE);
2746     s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
2747
2748     if(s->pict_type==AV_PICTURE_TYPE_B){
2749         s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
2750         assert(s->pb_time > 0 && s->pb_time < s->pp_time);
2751     }else{
2752         s->pp_time= s->time - s->last_non_b_time;
2753         s->last_non_b_time= s->time;
2754         assert(s->picture_number==0 || s->pp_time > 0);
2755     }
2756 }
2757
2758 static int encode_picture(MpegEncContext *s, int picture_number)
2759 {
2760     int i;
2761     int bits;
2762     int context_count = s->avctx->thread_count;
2763
2764     s->picture_number = picture_number;
2765
2766     /* Reset the average MB variance */
2767     s->me.mb_var_sum_temp    =
2768     s->me.mc_mb_var_sum_temp = 0;
2769
2770     /* we need to initialize some time vars before we can encode b-frames */
2771     // RAL: Condition added for MPEG1VIDEO
2772     if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
2773         set_frame_distances(s);
2774     if(CONFIG_MPEG4_ENCODER && s->codec_id == CODEC_ID_MPEG4)
2775         ff_set_mpeg4_time(s);
2776
2777     s->me.scene_change_score=0;
2778
2779 //    s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
2780
2781     if(s->pict_type==AV_PICTURE_TYPE_I){
2782         if(s->msmpeg4_version >= 3) s->no_rounding=1;
2783         else                        s->no_rounding=0;
2784     }else if(s->pict_type!=AV_PICTURE_TYPE_B){
2785         if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
2786             s->no_rounding ^= 1;
2787     }
2788
2789     if(s->flags & CODEC_FLAG_PASS2){
2790         if (estimate_qp(s,1) < 0)
2791             return -1;
2792         ff_get_2pass_fcode(s);
2793     }else if(!(s->flags & CODEC_FLAG_QSCALE)){
2794         if(s->pict_type==AV_PICTURE_TYPE_B)
2795             s->lambda= s->last_lambda_for[s->pict_type];
2796         else
2797             s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
2798         update_qscale(s);
2799     }
2800
2801     s->mb_intra=0; //for the rate distortion & bit compare functions
2802     for(i=1; i<context_count; i++){
2803         ff_update_duplicate_context(s->thread_context[i], s);
2804     }
2805
2806     if(ff_init_me(s)<0)
2807         return -1;
2808
2809     /* Estimate motion for every MB */
2810     if(s->pict_type != AV_PICTURE_TYPE_I){
2811         s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
2812         s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
2813         if(s->pict_type != AV_PICTURE_TYPE_B && s->avctx->me_threshold==0){
2814             if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
2815                 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
2816             }
2817         }
2818
2819         s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
2820     }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
2821         /* I-Frame */
2822         for(i=0; i<s->mb_stride*s->mb_height; i++)
2823             s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
2824
2825         if(!s->fixed_qscale){
2826             /* finding spatial complexity for I-frame rate control */
2827             s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
2828         }
2829     }
2830     for(i=1; i<context_count; i++){
2831         merge_context_after_me(s, s->thread_context[i]);
2832     }
2833     s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
2834     s->current_picture.   mb_var_sum= s->current_picture_ptr->   mb_var_sum= s->me.   mb_var_sum_temp;
2835     emms_c();
2836
2837     if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
2838         s->pict_type= AV_PICTURE_TYPE_I;
2839         for(i=0; i<s->mb_stride*s->mb_height; i++)
2840             s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
2841 //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
2842     }
2843
2844     if(!s->umvplus){
2845         if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
2846             s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
2847
2848             if(s->flags & CODEC_FLAG_INTERLACED_ME){
2849                 int a,b;
2850                 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
2851                 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
2852                 s->f_code= FFMAX3(s->f_code, a, b);
2853             }
2854
2855             ff_fix_long_p_mvs(s);
2856             ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
2857             if(s->flags & CODEC_FLAG_INTERLACED_ME){
2858                 int j;
2859                 for(i=0; i<2; i++){
2860                     for(j=0; j<2; j++)
2861                         ff_fix_long_mvs(s, s->p_field_select_table[i], j,
2862                                         s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
2863                 }
2864             }
2865         }
2866
2867         if(s->pict_type==AV_PICTURE_TYPE_B){
2868             int a, b;
2869
2870             a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
2871             b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
2872             s->f_code = FFMAX(a, b);
2873
2874             a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
2875             b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
2876             s->b_code = FFMAX(a, b);
2877
2878             ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
2879             ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
2880             ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
2881             ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
2882             if(s->flags & CODEC_FLAG_INTERLACED_ME){
2883                 int dir, j;
2884                 for(dir=0; dir<2; dir++){
2885                     for(i=0; i<2; i++){
2886                         for(j=0; j<2; j++){
2887                             int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
2888                                           : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
2889                             ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
2890                                             s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
2891                         }
2892                     }
2893                 }
2894             }
2895         }
2896     }
2897
2898     if (estimate_qp(s, 0) < 0)
2899         return -1;
2900
2901     if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
2902         s->qscale= 3; //reduce clipping problems
2903
2904     if (s->out_format == FMT_MJPEG) {
2905         /* for mjpeg, we do include qscale in the matrix */
2906         for(i=1;i<64;i++){
2907             int j= s->dsp.idct_permutation[i];
2908
2909             s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
2910         }
2911         s->y_dc_scale_table=
2912         s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
2913         s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
2914         ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
2915                        s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
2916         s->qscale= 8;
2917     }
2918
2919     //FIXME var duplication
2920     s->current_picture_ptr->f.key_frame =
2921     s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
2922     s->current_picture_ptr->f.pict_type =
2923     s->current_picture.f.pict_type = s->pict_type;
2924
2925     if (s->current_picture.f.key_frame)
2926         s->picture_in_gop_number=0;
2927
2928     s->last_bits= put_bits_count(&s->pb);
2929     switch(s->out_format) {
2930     case FMT_MJPEG:
2931         if (CONFIG_MJPEG_ENCODER)
2932             ff_mjpeg_encode_picture_header(s);
2933         break;
2934     case FMT_H261:
2935         if (CONFIG_H261_ENCODER)
2936             ff_h261_encode_picture_header(s, picture_number);
2937         break;
2938     case FMT_H263:
2939         if (CONFIG_WMV2_ENCODER && s->codec_id == CODEC_ID_WMV2)
2940             ff_wmv2_encode_picture_header(s, picture_number);
2941         else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
2942             msmpeg4_encode_picture_header(s, picture_number);
2943         else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
2944             mpeg4_encode_picture_header(s, picture_number);
2945         else if (CONFIG_RV10_ENCODER && s->codec_id == CODEC_ID_RV10)
2946             rv10_encode_picture_header(s, picture_number);
2947         else if (CONFIG_RV20_ENCODER && s->codec_id == CODEC_ID_RV20)
2948             rv20_encode_picture_header(s, picture_number);
2949         else if (CONFIG_FLV_ENCODER && s->codec_id == CODEC_ID_FLV1)
2950             ff_flv_encode_picture_header(s, picture_number);
2951         else if (CONFIG_H263_ENCODER)
2952             h263_encode_picture_header(s, picture_number);
2953         break;
2954     case FMT_MPEG1:
2955         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2956             mpeg1_encode_picture_header(s, picture_number);
2957         break;
2958     case FMT_H264:
2959         break;
2960     default:
2961         assert(0);
2962     }
2963     bits= put_bits_count(&s->pb);
2964     s->header_bits= bits - s->last_bits;
2965
2966     for(i=1; i<context_count; i++){
2967         update_duplicate_context_after_me(s->thread_context[i], s);
2968     }
2969     s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
2970     for(i=1; i<context_count; i++){
2971         merge_context_after_encode(s, s->thread_context[i]);
2972     }
2973     emms_c();
2974     return 0;
2975 }
2976
2977 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block){
2978     const int intra= s->mb_intra;
2979     int i;
2980
2981     s->dct_count[intra]++;
2982
2983     for(i=0; i<64; i++){
2984         int level= block[i];
2985
2986         if(level){
2987             if(level>0){
2988                 s->dct_error_sum[intra][i] += level;
2989                 level -= s->dct_offset[intra][i];
2990                 if(level<0) level=0;
2991             }else{
2992                 s->dct_error_sum[intra][i] -= level;
2993                 level += s->dct_offset[intra][i];
2994                 if(level>0) level=0;
2995             }
2996             block[i]= level;
2997         }
2998     }
2999 }
3000
3001 static int dct_quantize_trellis_c(MpegEncContext *s,
3002                                   DCTELEM *block, int n,
3003                                   int qscale, int *overflow){
3004     const int *qmat;
3005     const uint8_t *scantable= s->intra_scantable.scantable;
3006     const uint8_t *perm_scantable= s->intra_scantable.permutated;
3007     int max=0;
3008     unsigned int threshold1, threshold2;
3009     int bias=0;
3010     int run_tab[65];
3011     int level_tab[65];
3012     int score_tab[65];
3013     int survivor[65];
3014     int survivor_count;
3015     int last_run=0;
3016     int last_level=0;
3017     int last_score= 0;
3018     int last_i;
3019     int coeff[2][64];
3020     int coeff_count[64];
3021     int qmul, qadd, start_i, last_non_zero, i, dc;
3022     const int esc_length= s->ac_esc_length;
3023     uint8_t * length;
3024     uint8_t * last_length;
3025     const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3026
3027     s->dsp.fdct (block);
3028
3029     if(s->dct_error_sum)
3030         s->denoise_dct(s, block);
3031     qmul= qscale*16;
3032     qadd= ((qscale-1)|1)*8;
3033
3034     if (s->mb_intra) {
3035         int q;
3036         if (!s->h263_aic) {
3037             if (n < 4)
3038                 q = s->y_dc_scale;
3039             else
3040                 q = s->c_dc_scale;
3041             q = q << 3;
3042         } else{
3043             /* For AIC we skip quant/dequant of INTRADC */
3044             q = 1 << 3;
3045             qadd=0;
3046         }
3047
3048         /* note: block[0] is assumed to be positive */
3049         block[0] = (block[0] + (q >> 1)) / q;
3050         start_i = 1;
3051         last_non_zero = 0;
3052         qmat = s->q_intra_matrix[qscale];
3053         if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3054             bias= 1<<(QMAT_SHIFT-1);
3055         length     = s->intra_ac_vlc_length;
3056         last_length= s->intra_ac_vlc_last_length;
3057     } else {
3058         start_i = 0;
3059         last_non_zero = -1;
3060         qmat = s->q_inter_matrix[qscale];
3061         length     = s->inter_ac_vlc_length;
3062         last_length= s->inter_ac_vlc_last_length;
3063     }
3064     last_i= start_i;
3065
3066     threshold1= (1<<QMAT_SHIFT) - bias - 1;
3067     threshold2= (threshold1<<1);
3068
3069     for(i=63; i>=start_i; i--) {
3070         const int j = scantable[i];
3071         int level = block[j] * qmat[j];
3072
3073         if(((unsigned)(level+threshold1))>threshold2){
3074             last_non_zero = i;
3075             break;
3076         }
3077     }
3078
3079     for(i=start_i; i<=last_non_zero; i++) {
3080         const int j = scantable[i];
3081         int level = block[j] * qmat[j];
3082
3083 //        if(   bias+level >= (1<<(QMAT_SHIFT - 3))
3084 //           || bias-level >= (1<<(QMAT_SHIFT - 3))){
3085         if(((unsigned)(level+threshold1))>threshold2){
3086             if(level>0){
3087                 level= (bias + level)>>QMAT_SHIFT;
3088                 coeff[0][i]= level;
3089                 coeff[1][i]= level-1;
3090 //                coeff[2][k]= level-2;
3091             }else{
3092                 level= (bias - level)>>QMAT_SHIFT;
3093                 coeff[0][i]= -level;
3094                 coeff[1][i]= -level+1;
3095 //                coeff[2][k]= -level+2;
3096             }
3097             coeff_count[i]= FFMIN(level, 2);
3098             assert(coeff_count[i]);
3099             max |=level;
3100         }else{
3101             coeff[0][i]= (level>>31)|1;
3102             coeff_count[i]= 1;
3103         }
3104     }
3105
3106     *overflow= s->max_qcoeff < max; //overflow might have happened
3107
3108     if(last_non_zero < start_i){
3109         memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
3110         return last_non_zero;
3111     }
3112
3113     score_tab[start_i]= 0;
3114     survivor[0]= start_i;
3115     survivor_count= 1;
3116
3117     for(i=start_i; i<=last_non_zero; i++){
3118         int level_index, j, zero_distortion;
3119         int dct_coeff= FFABS(block[ scantable[i] ]);
3120         int best_score=256*256*256*120;
3121
3122         if (   s->dsp.fdct == fdct_ifast
3123 #ifndef FAAN_POSTSCALE
3124             || s->dsp.fdct == ff_faandct
3125 #endif
3126            )
3127             dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3128         zero_distortion= dct_coeff*dct_coeff;
3129
3130         for(level_index=0; level_index < coeff_count[i]; level_index++){
3131             int distortion;
3132             int level= coeff[level_index][i];
3133             const int alevel= FFABS(level);
3134             int unquant_coeff;
3135
3136             assert(level);
3137
3138             if(s->out_format == FMT_H263){
3139                 unquant_coeff= alevel*qmul + qadd;
3140             }else{ //MPEG1
3141                 j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3142                 if(s->mb_intra){
3143                         unquant_coeff = (int)(  alevel  * qscale * s->intra_matrix[j]) >> 3;
3144                         unquant_coeff =   (unquant_coeff - 1) | 1;
3145                 }else{
3146                         unquant_coeff = (((  alevel  << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3147                         unquant_coeff =   (unquant_coeff - 1) | 1;
3148                 }
3149                 unquant_coeff<<= 3;
3150             }
3151
3152             distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3153             level+=64;
3154             if((level&(~127)) == 0){
3155                 for(j=survivor_count-1; j>=0; j--){
3156                     int run= i - survivor[j];
3157                     int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3158                     score += score_tab[i-run];
3159
3160                     if(score < best_score){
3161                         best_score= score;
3162                         run_tab[i+1]= run;
3163                         level_tab[i+1]= level-64;
3164                     }
3165                 }
3166
3167                 if(s->out_format == FMT_H263){
3168                     for(j=survivor_count-1; j>=0; j--){
3169                         int run= i - survivor[j];
3170                         int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3171                         score += score_tab[i-run];
3172                         if(score < last_score){
3173                             last_score= score;
3174                             last_run= run;
3175                             last_level= level-64;
3176                             last_i= i+1;
3177                         }
3178                     }
3179                 }
3180             }else{
3181                 distortion += esc_length*lambda;
3182                 for(j=survivor_count-1; j>=0; j--){
3183                     int run= i - survivor[j];
3184                     int score= distortion + score_tab[i-run];
3185
3186                     if(score < best_score){
3187                         best_score= score;
3188                         run_tab[i+1]= run;
3189                         level_tab[i+1]= level-64;
3190                     }
3191                 }
3192
3193                 if(s->out_format == FMT_H263){
3194                   for(j=survivor_count-1; j>=0; j--){
3195                         int run= i - survivor[j];
3196                         int score= distortion + score_tab[i-run];
3197                         if(score < last_score){
3198                             last_score= score;
3199                             last_run= run;
3200                             last_level= level-64;
3201                             last_i= i+1;
3202                         }
3203                     }
3204                 }
3205             }
3206         }
3207
3208         score_tab[i+1]= best_score;
3209
3210         //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3211         if(last_non_zero <= 27){
3212             for(; survivor_count; survivor_count--){
3213                 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3214                     break;
3215             }
3216         }else{
3217             for(; survivor_count; survivor_count--){
3218                 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3219                     break;
3220             }
3221         }
3222
3223         survivor[ survivor_count++ ]= i+1;
3224     }
3225
3226     if(s->out_format != FMT_H263){
3227         last_score= 256*256*256*120;
3228         for(i= survivor[0]; i<=last_non_zero + 1; i++){
3229             int score= score_tab[i];
3230             if(i) score += lambda*2; //FIXME exacter?
3231
3232             if(score < last_score){
3233                 last_score= score;
3234                 last_i= i;
3235                 last_level= level_tab[i];
3236                 last_run= run_tab[i];
3237             }
3238         }
3239     }
3240
3241     s->coded_score[n] = last_score;
3242
3243     dc= FFABS(block[0]);
3244     last_non_zero= last_i - 1;
3245     memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
3246
3247     if(last_non_zero < start_i)
3248         return last_non_zero;
3249
3250     if(last_non_zero == 0 && start_i == 0){
3251         int best_level= 0;
3252         int best_score= dc * dc;
3253
3254         for(i=0; i<coeff_count[0]; i++){
3255             int level= coeff[i][0];
3256             int alevel= FFABS(level);
3257             int unquant_coeff, score, distortion;
3258
3259             if(s->out_format == FMT_H263){
3260                     unquant_coeff= (alevel*qmul + qadd)>>3;
3261             }else{ //MPEG1
3262                     unquant_coeff = (((  alevel  << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3263                     unquant_coeff =   (unquant_coeff - 1) | 1;
3264             }
3265             unquant_coeff = (unquant_coeff + 4) >> 3;
3266             unquant_coeff<<= 3 + 3;
3267
3268             distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3269             level+=64;
3270             if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3271             else                    score= distortion + esc_length*lambda;
3272
3273             if(score < best_score){
3274                 best_score= score;
3275                 best_level= level - 64;
3276             }
3277         }
3278         block[0]= best_level;
3279         s->coded_score[n] = best_score - dc*dc;
3280         if(best_level == 0) return -1;
3281         else                return last_non_zero;
3282     }
3283
3284     i= last_i;
3285     assert(last_level);
3286
3287     block[ perm_scantable[last_non_zero] ]= last_level;
3288     i -= last_run + 1;
3289
3290     for(; i>start_i; i -= run_tab[i] + 1){
3291         block[ perm_scantable[i-1] ]= level_tab[i];
3292     }
3293
3294     return last_non_zero;
3295 }
3296
3297 //#define REFINE_STATS 1
3298 static int16_t basis[64][64];
3299
3300 static void build_basis(uint8_t *perm){
3301     int i, j, x, y;
3302     emms_c();
3303     for(i=0; i<8; i++){
3304         for(j=0; j<8; j++){
3305             for(y=0; y<8; y++){
3306                 for(x=0; x<8; x++){
3307                     double s= 0.25*(1<<BASIS_SHIFT);
3308                     int index= 8*i + j;
3309                     int perm_index= perm[index];
3310                     if(i==0) s*= sqrt(0.5);
3311                     if(j==0) s*= sqrt(0.5);
3312                     basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3313                 }
3314             }
3315         }
3316     }
3317 }
3318
3319 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3320                         DCTELEM *block, int16_t *weight, DCTELEM *orig,
3321                         int n, int qscale){
3322     int16_t rem[64];
3323     LOCAL_ALIGNED_16(DCTELEM, d1, [64]);
3324     const uint8_t *scantable= s->intra_scantable.scantable;
3325     const uint8_t *perm_scantable= s->intra_scantable.permutated;
3326 //    unsigned int threshold1, threshold2;
3327 //    int bias=0;
3328     int run_tab[65];
3329     int prev_run=0;
3330     int prev_level=0;
3331     int qmul, qadd, start_i, last_non_zero, i, dc;
3332     uint8_t * length;
3333     uint8_t * last_length;
3334     int lambda;
3335     int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3336 #ifdef REFINE_STATS
3337 static int count=0;
3338 static int after_last=0;
3339 static int to_zero=0;
3340 static int from_zero=0;
3341 static int raise=0;
3342 static int lower=0;
3343 static int messed_sign=0;
3344 #endif
3345
3346     if(basis[0][0] == 0)
3347         build_basis(s->dsp.idct_permutation);
3348
3349     qmul= qscale*2;
3350     qadd= (qscale-1)|1;
3351     if (s->mb_intra) {
3352         if (!s->h263_aic) {
3353             if (n < 4)
3354                 q = s->y_dc_scale;
3355             else
3356                 q = s->c_dc_scale;
3357         } else{
3358             /* For AIC we skip quant/dequant of INTRADC */
3359             q = 1;
3360             qadd=0;
3361         }
3362         q <<= RECON_SHIFT-3;
3363         /* note: block[0] is assumed to be positive */
3364         dc= block[0]*q;
3365 //        block[0] = (block[0] + (q >> 1)) / q;
3366         start_i = 1;
3367 //        if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3368 //            bias= 1<<(QMAT_SHIFT-1);
3369         length     = s->intra_ac_vlc_length;
3370         last_length= s->intra_ac_vlc_last_length;
3371     } else {
3372         dc= 0;
3373         start_i = 0;
3374         length     = s->inter_ac_vlc_length;
3375         last_length= s->inter_ac_vlc_last_length;
3376     }
3377     last_non_zero = s->block_last_index[n];
3378
3379 #ifdef REFINE_STATS
3380 {START_TIMER
3381 #endif
3382     dc += (1<<(RECON_SHIFT-1));
3383     for(i=0; i<64; i++){
3384         rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME  use orig dirrectly instead of copying to rem[]
3385     }
3386 #ifdef REFINE_STATS
3387 STOP_TIMER("memset rem[]")}
3388 #endif
3389     sum=0;
3390     for(i=0; i<64; i++){
3391         int one= 36;
3392         int qns=4;
3393         int w;
3394
3395         w= FFABS(weight[i]) + qns*one;
3396         w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3397
3398         weight[i] = w;
3399 //        w=weight[i] = (63*qns + (w/2)) / w;
3400
3401         assert(w>0);
3402         assert(w<(1<<6));
3403         sum += w*w;
3404     }
3405     lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3406 #ifdef REFINE_STATS
3407 {START_TIMER
3408 #endif
3409     run=0;
3410     rle_index=0;
3411     for(i=start_i; i<=last_non_zero; i++){
3412         int j= perm_scantable[i];
3413         const int level= block[j];
3414         int coeff;
3415
3416         if(level){
3417             if(level<0) coeff= qmul*level - qadd;
3418             else        coeff= qmul*level + qadd;
3419             run_tab[rle_index++]=run;
3420             run=0;
3421
3422             s->dsp.add_8x8basis(rem, basis[j], coeff);
3423         }else{
3424             run++;
3425         }
3426     }
3427 #ifdef REFINE_STATS
3428 if(last_non_zero>0){
3429 STOP_TIMER("init rem[]")
3430 }
3431 }
3432
3433 {START_TIMER
3434 #endif
3435     for(;;){
3436         int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3437         int best_coeff=0;
3438         int best_change=0;
3439         int run2, best_unquant_change=0, analyze_gradient;
3440 #ifdef REFINE_STATS
3441 {START_TIMER
3442 #endif
3443         analyze_gradient = last_non_zero > 2 || s->avctx->quantizer_noise_shaping >= 3;
3444
3445         if(analyze_gradient){
3446 #ifdef REFINE_STATS
3447 {START_TIMER
3448 #endif
3449             for(i=0; i<64; i++){
3450                 int w= weight[i];
3451
3452                 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3453             }
3454 #ifdef REFINE_STATS
3455 STOP_TIMER("rem*w*w")}
3456 {START_TIMER
3457 #endif
3458             s->dsp.fdct(d1);
3459 #ifdef REFINE_STATS
3460 STOP_TIMER("dct")}
3461 #endif
3462         }
3463
3464         if(start_i){
3465             const int level= block[0];
3466             int change, old_coeff;
3467
3468             assert(s->mb_intra);
3469
3470             old_coeff= q*level;
3471
3472             for(change=-1; change<=1; change+=2){
3473                 int new_level= level + change;
3474                 int score, new_coeff;
3475
3476                 new_coeff= q*new_level;
3477                 if(new_coeff >= 2048 || new_coeff < 0)
3478                     continue;
3479
3480                 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
3481                 if(score<best_score){
3482                     best_score= score;
3483                     best_coeff= 0;
3484                     best_change= change;
3485                     best_unquant_change= new_coeff - old_coeff;
3486                 }
3487             }
3488         }
3489
3490         run=0;
3491         rle_index=0;
3492         run2= run_tab[rle_index++];
3493         prev_level=0;
3494         prev_run=0;
3495
3496         for(i=start_i; i<64; i++){
3497             int j= perm_scantable[i];
3498             const int level= block[j];
3499             int change, old_coeff;
3500
3501             if(s->avctx->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
3502                 break;
3503
3504             if(level){
3505                 if(level<0) old_coeff= qmul*level - qadd;
3506                 else        old_coeff= qmul*level + qadd;
3507                 run2= run_tab[rle_index++]; //FIXME ! maybe after last
3508             }else{
3509                 old_coeff=0;
3510                 run2--;
3511                 assert(run2>=0 || i >= last_non_zero );
3512             }
3513
3514             for(change=-1; change<=1; change+=2){
3515                 int new_level= level + change;
3516                 int score, new_coeff, unquant_change;
3517
3518                 score=0;
3519                 if(s->avctx->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
3520                    continue;
3521
3522                 if(new_level){
3523                     if(new_level<0) new_coeff= qmul*new_level - qadd;
3524                     else            new_coeff= qmul*new_level + qadd;
3525                     if(new_coeff >= 2048 || new_coeff <= -2048)
3526                         continue;
3527                     //FIXME check for overflow
3528
3529                     if(level){
3530                         if(level < 63 && level > -63){
3531                             if(i < last_non_zero)
3532                                 score +=   length[UNI_AC_ENC_INDEX(run, new_level+64)]
3533                                          - length[UNI_AC_ENC_INDEX(run, level+64)];
3534                             else
3535                                 score +=   last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
3536                                          - last_length[UNI_AC_ENC_INDEX(run, level+64)];
3537                         }
3538                     }else{
3539                         assert(FFABS(new_level)==1);
3540
3541                         if(analyze_gradient){
3542                             int g= d1[ scantable[i] ];
3543                             if(g && (g^new_level) >= 0)
3544                                 continue;
3545                         }
3546
3547                         if(i < last_non_zero){
3548                             int next_i= i + run2 + 1;
3549                             int next_level= block[ perm_scantable[next_i] ] + 64;
3550
3551                             if(next_level&(~127))
3552                                 next_level= 0;
3553
3554                             if(next_i < last_non_zero)
3555                                 score +=   length[UNI_AC_ENC_INDEX(run, 65)]
3556                                          + length[UNI_AC_ENC_INDEX(run2, next_level)]
3557                                          - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3558                             else
3559                                 score +=  length[UNI_AC_ENC_INDEX(run, 65)]
3560                                         + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3561                                         - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3562                         }else{
3563                             score += last_length[UNI_AC_ENC_INDEX(run, 65)];
3564                             if(prev_level){
3565                                 score +=  length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3566                                         - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3567                             }
3568                         }
3569                     }
3570                 }else{
3571                     new_coeff=0;
3572                     assert(FFABS(level)==1);
3573
3574                     if(i < last_non_zero){
3575                         int next_i= i + run2 + 1;
3576                         int next_level= block[ perm_scantable[next_i] ] + 64;
3577
3578                         if(next_level&(~127))
3579                             next_level= 0;
3580
3581                         if(next_i < last_non_zero)
3582                             score +=   length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3583                                      - length[UNI_AC_ENC_INDEX(run2, next_level)]
3584                                      - length[UNI_AC_ENC_INDEX(run, 65)];
3585                         else
3586                             score +=   last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3587                                      - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3588                                      - length[UNI_AC_ENC_INDEX(run, 65)];
3589                     }else{
3590                         score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
3591                         if(prev_level){
3592                             score +=  last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3593                                     - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3594                         }
3595                     }
3596                 }
3597
3598                 score *= lambda;
3599
3600                 unquant_change= new_coeff - old_coeff;
3601                 assert((score < 100*lambda && score > -100*lambda) || lambda==0);
3602
3603                 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
3604                 if(score<best_score){
3605                     best_score= score;
3606                     best_coeff= i;
3607                     best_change= change;
3608                     best_unquant_change= unquant_change;
3609                 }
3610             }
3611             if(level){
3612                 prev_level= level + 64;
3613                 if(prev_level&(~127))
3614                     prev_level= 0;
3615                 prev_run= run;
3616                 run=0;
3617             }else{
3618                 run++;
3619             }
3620         }
3621 #ifdef REFINE_STATS
3622 STOP_TIMER("iterative step")}
3623 #endif
3624
3625         if(best_change){
3626             int j= perm_scantable[ best_coeff ];
3627
3628             block[j] += best_change;
3629
3630             if(best_coeff > last_non_zero){
3631                 last_non_zero= best_coeff;
3632                 assert(block[j]);
3633 #ifdef REFINE_STATS
3634 after_last++;
3635 #endif
3636             }else{
3637 #ifdef REFINE_STATS
3638 if(block[j]){
3639     if(block[j] - best_change){
3640         if(FFABS(block[j]) > FFABS(block[j] - best_change)){
3641             raise++;
3642         }else{
3643             lower++;
3644         }
3645     }else{
3646         from_zero++;
3647     }
3648 }else{
3649     to_zero++;
3650 }
3651 #endif
3652                 for(; last_non_zero>=start_i; last_non_zero--){
3653                     if(block[perm_scantable[last_non_zero]])
3654                         break;
3655                 }
3656             }
3657 #ifdef REFINE_STATS
3658 count++;
3659 if(256*256*256*64 % count == 0){
3660     printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
3661 }
3662 #endif
3663             run=0;
3664             rle_index=0;
3665             for(i=start_i; i<=last_non_zero; i++){
3666                 int j= perm_scantable[i];
3667                 const int level= block[j];
3668
3669                  if(level){
3670                      run_tab[rle_index++]=run;
3671                      run=0;
3672                  }else{
3673                      run++;
3674                  }
3675             }
3676
3677             s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
3678         }else{
3679             break;
3680         }
3681     }
3682 #ifdef REFINE_STATS
3683 if(last_non_zero>0){
3684 STOP_TIMER("iterative search")
3685 }
3686 }
3687 #endif
3688
3689     return last_non_zero;
3690 }
3691
3692 int dct_quantize_c(MpegEncContext *s,
3693                         DCTELEM *block, int n,
3694                         int qscale, int *overflow)
3695 {
3696     int i, j, level, last_non_zero, q, start_i;
3697     const int *qmat;
3698     const uint8_t *scantable= s->intra_scantable.scantable;
3699     int bias;
3700     int max=0;
3701     unsigned int threshold1, threshold2;
3702
3703     s->dsp.fdct (block);
3704
3705     if(s->dct_error_sum)
3706         s->denoise_dct(s, block);
3707
3708     if (s->mb_intra) {
3709         if (!s->h263_aic) {
3710             if (n < 4)
3711                 q = s->y_dc_scale;
3712             else
3713                 q = s->c_dc_scale;
3714             q = q << 3;
3715         } else
3716             /* For AIC we skip quant/dequant of INTRADC */
3717             q = 1 << 3;
3718
3719         /* note: block[0] is assumed to be positive */
3720         block[0] = (block[0] + (q >> 1)) / q;
3721         start_i = 1;
3722         last_non_zero = 0;
3723         qmat = s->q_intra_matrix[qscale];
3724         bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3725     } else {
3726         start_i = 0;
3727         last_non_zero = -1;
3728         qmat = s->q_inter_matrix[qscale];
3729         bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3730     }
3731     threshold1= (1<<QMAT_SHIFT) - bias - 1;
3732     threshold2= (threshold1<<1);
3733     for(i=63;i>=start_i;i--) {
3734         j = scantable[i];
3735         level = block[j] * qmat[j];
3736
3737         if(((unsigned)(level+threshold1))>threshold2){
3738             last_non_zero = i;
3739             break;
3740         }else{
3741             block[j]=0;
3742         }
3743     }
3744     for(i=start_i; i<=last_non_zero; i++) {
3745         j = scantable[i];
3746         level = block[j] * qmat[j];
3747
3748 //        if(   bias+level >= (1<<QMAT_SHIFT)
3749 //           || bias-level >= (1<<QMAT_SHIFT)){
3750         if(((unsigned)(level+threshold1))>threshold2){
3751             if(level>0){
3752                 level= (bias + level)>>QMAT_SHIFT;
3753                 block[j]= level;
3754             }else{
3755                 level= (bias - level)>>QMAT_SHIFT;
3756                 block[j]= -level;
3757             }
3758             max |=level;
3759         }else{
3760             block[j]=0;
3761         }
3762     }
3763     *overflow= s->max_qcoeff < max; //overflow might have happened
3764
3765     /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
3766     if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
3767         ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
3768
3769     return last_non_zero;
3770 }
3771
3772 AVCodec ff_h263_encoder = {
3773     .name           = "h263",
3774     .type           = AVMEDIA_TYPE_VIDEO,
3775     .id             = CODEC_ID_H263,
3776     .priv_data_size = sizeof(MpegEncContext),
3777     .init           = MPV_encode_init,
3778     .encode         = MPV_encode_picture,
3779     .close          = MPV_encode_end,
3780     .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
3781     .long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
3782 };
3783
3784 AVCodec ff_h263p_encoder = {
3785     .name           = "h263p",
3786     .type           = AVMEDIA_TYPE_VIDEO,
3787     .id             = CODEC_ID_H263P,
3788     .priv_data_size = sizeof(MpegEncContext),
3789     .init           = MPV_encode_init,
3790     .encode         = MPV_encode_picture,
3791     .close          = MPV_encode_end,
3792     .capabilities = CODEC_CAP_SLICE_THREADS,
3793     .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
3794     .long_name= NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
3795 };
3796
3797 AVCodec ff_msmpeg4v2_encoder = {
3798     .name           = "msmpeg4v2",
3799     .type           = AVMEDIA_TYPE_VIDEO,
3800     .id             = CODEC_ID_MSMPEG4V2,
3801     .priv_data_size = sizeof(MpegEncContext),
3802     .init           = MPV_encode_init,
3803     .encode         = MPV_encode_picture,
3804     .close          = MPV_encode_end,
3805     .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
3806     .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
3807 };
3808
3809 AVCodec ff_msmpeg4v3_encoder = {
3810     .name           = "msmpeg4",
3811     .type           = AVMEDIA_TYPE_VIDEO,
3812     .id             = CODEC_ID_MSMPEG4V3,
3813     .priv_data_size = sizeof(MpegEncContext),
3814     .init           = MPV_encode_init,
3815     .encode         = MPV_encode_picture,
3816     .close          = MPV_encode_end,
3817     .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
3818     .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
3819 };
3820
3821 AVCodec ff_wmv1_encoder = {
3822     .name           = "wmv1",
3823     .type           = AVMEDIA_TYPE_VIDEO,
3824     .id             = CODEC_ID_WMV1,
3825     .priv_data_size = sizeof(MpegEncContext),
3826     .init           = MPV_encode_init,
3827     .encode         = MPV_encode_picture,
3828     .close          = MPV_encode_end,
3829     .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
3830     .long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
3831 };