]> git.sesse.net Git - ffmpeg/blob - libavcodec/mpegvideo_enc.c
mpegvideo: move encoding-only initialization from common_init() to encode_init()
[ffmpeg] / libavcodec / mpegvideo_enc.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29
30 #include <stdint.h>
31
32 #include "libavutil/internal.h"
33 #include "libavutil/intmath.h"
34 #include "libavutil/mathematics.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/opt.h"
37 #include "avcodec.h"
38 #include "dct.h"
39 #include "dsputil.h"
40 #include "mpeg12.h"
41 #include "mpegvideo.h"
42 #include "h261.h"
43 #include "h263.h"
44 #include "mathops.h"
45 #include "mjpegenc.h"
46 #include "msmpeg4.h"
47 #include "faandct.h"
48 #include "thread.h"
49 #include "aandcttab.h"
50 #include "flv.h"
51 #include "mpeg4video.h"
52 #include "internal.h"
53 #include "bytestream.h"
54 #include <limits.h>
55
56 static int encode_picture(MpegEncContext *s, int picture_number);
57 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
58 static int sse_mb(MpegEncContext *s);
59 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
60 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
61
62 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
63 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
64
65 const AVOption ff_mpv_generic_options[] = {
66     FF_MPV_COMMON_OPTS
67     { NULL },
68 };
69
70 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
71                        uint16_t (*qmat16)[2][64],
72                        const uint16_t *quant_matrix,
73                        int bias, int qmin, int qmax, int intra)
74 {
75     int qscale;
76     int shift = 0;
77
78     for (qscale = qmin; qscale <= qmax; qscale++) {
79         int i;
80         if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
81             dsp->fdct == ff_jpeg_fdct_islow_10 ||
82             dsp->fdct == ff_faandct) {
83             for (i = 0; i < 64; i++) {
84                 const int j = dsp->idct_permutation[i];
85                 /* 16 <= qscale * quant_matrix[i] <= 7905
86                  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
87                  *             19952 <=              x  <= 249205026
88                  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
89                  *           3444240 >= (1 << 36) / (x) >= 275 */
90
91                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
92                                         (qscale * quant_matrix[j]));
93             }
94         } else if (dsp->fdct == ff_fdct_ifast) {
95             for (i = 0; i < 64; i++) {
96                 const int j = dsp->idct_permutation[i];
97                 /* 16 <= qscale * quant_matrix[i] <= 7905
98                  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
99                  *             19952 <=              x  <= 249205026
100                  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
101                  *           3444240 >= (1 << 36) / (x) >= 275 */
102
103                 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
104                                         (ff_aanscales[i] * qscale *
105                                          quant_matrix[j]));
106             }
107         } else {
108             for (i = 0; i < 64; i++) {
109                 const int j = dsp->idct_permutation[i];
110                 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
111                  * Assume x = qscale * quant_matrix[i]
112                  * So             16 <=              x  <= 7905
113                  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
114                  * so          32768 >= (1 << 19) / (x) >= 67 */
115                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
116                                         (qscale * quant_matrix[j]));
117                 //qmat  [qscale][i] = (1 << QMAT_SHIFT_MMX) /
118                 //                    (qscale * quant_matrix[i]);
119                 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
120                                        (qscale * quant_matrix[j]);
121
122                 if (qmat16[qscale][0][i] == 0 ||
123                     qmat16[qscale][0][i] == 128 * 256)
124                     qmat16[qscale][0][i] = 128 * 256 - 1;
125                 qmat16[qscale][1][i] =
126                     ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
127                                 qmat16[qscale][0][i]);
128             }
129         }
130
131         for (i = intra; i < 64; i++) {
132             int64_t max = 8191;
133             if (dsp->fdct == ff_fdct_ifast) {
134                 max = (8191LL * ff_aanscales[i]) >> 14;
135             }
136             while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
137                 shift++;
138             }
139         }
140     }
141     if (shift) {
142         av_log(NULL, AV_LOG_INFO,
143                "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
144                QMAT_SHIFT - shift);
145     }
146 }
147
148 static inline void update_qscale(MpegEncContext *s)
149 {
150     s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
151                 (FF_LAMBDA_SHIFT + 7);
152     s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
153
154     s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
155                  FF_LAMBDA_SHIFT;
156 }
157
158 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
159 {
160     int i;
161
162     if (matrix) {
163         put_bits(pb, 1, 1);
164         for (i = 0; i < 64; i++) {
165             put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
166         }
167     } else
168         put_bits(pb, 1, 0);
169 }
170
171 /**
172  * init s->current_picture.qscale_table from s->lambda_table
173  */
174 void ff_init_qscale_tab(MpegEncContext *s)
175 {
176     int8_t * const qscale_table = s->current_picture.qscale_table;
177     int i;
178
179     for (i = 0; i < s->mb_num; i++) {
180         unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
181         int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
182         qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
183                                                   s->avctx->qmax);
184     }
185 }
186
187 static void update_duplicate_context_after_me(MpegEncContext *dst,
188                                               MpegEncContext *src)
189 {
190 #define COPY(a) dst->a= src->a
191     COPY(pict_type);
192     COPY(current_picture);
193     COPY(f_code);
194     COPY(b_code);
195     COPY(qscale);
196     COPY(lambda);
197     COPY(lambda2);
198     COPY(picture_in_gop_number);
199     COPY(gop_picture_number);
200     COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
201     COPY(progressive_frame);    // FIXME don't set in encode_header
202     COPY(partitioned_frame);    // FIXME don't set in encode_header
203 #undef COPY
204 }
205
206 /**
207  * Set the given MpegEncContext to defaults for encoding.
208  * the changed fields will not depend upon the prior state of the MpegEncContext.
209  */
210 static void MPV_encode_defaults(MpegEncContext *s)
211 {
212     int i;
213     ff_MPV_common_defaults(s);
214
215     for (i = -16; i < 16; i++) {
216         default_fcode_tab[i + MAX_MV] = 1;
217     }
218     s->me.mv_penalty = default_mv_penalty;
219     s->fcode_tab     = default_fcode_tab;
220
221     s->input_picture_number  = 0;
222     s->picture_in_gop_number = 0;
223 }
224
225 /* init video encoder */
226 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
227 {
228     MpegEncContext *s = avctx->priv_data;
229     int i, ret;
230     int chroma_h_shift, chroma_v_shift;
231
232     MPV_encode_defaults(s);
233
234     switch (avctx->codec_id) {
235     case AV_CODEC_ID_MPEG2VIDEO:
236         if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
237             avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
238             av_log(avctx, AV_LOG_ERROR,
239                    "only YUV420 and YUV422 are supported\n");
240             return -1;
241         }
242         break;
243     case AV_CODEC_ID_LJPEG:
244         if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
245             avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
246             avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
247             avctx->pix_fmt != AV_PIX_FMT_BGRA     &&
248             ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
249               avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
250               avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
251              avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
252             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
253             return -1;
254         }
255         break;
256     case AV_CODEC_ID_MJPEG:
257         if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
258             avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
259             ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
260               avctx->pix_fmt != AV_PIX_FMT_YUV422P) ||
261              avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
262             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
263             return -1;
264         }
265         break;
266     default:
267         if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
268             av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
269             return -1;
270         }
271     }
272
273     switch (avctx->pix_fmt) {
274     case AV_PIX_FMT_YUVJ422P:
275     case AV_PIX_FMT_YUV422P:
276         s->chroma_format = CHROMA_422;
277         break;
278     case AV_PIX_FMT_YUVJ420P:
279     case AV_PIX_FMT_YUV420P:
280     default:
281         s->chroma_format = CHROMA_420;
282         break;
283     }
284
285     s->bit_rate = avctx->bit_rate;
286     s->width    = avctx->width;
287     s->height   = avctx->height;
288     if (avctx->gop_size > 600 &&
289         avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
290         av_log(avctx, AV_LOG_ERROR,
291                "Warning keyframe interval too large! reducing it ...\n");
292         avctx->gop_size = 600;
293     }
294     s->gop_size     = avctx->gop_size;
295     s->avctx        = avctx;
296     s->flags        = avctx->flags;
297     s->flags2       = avctx->flags2;
298     if (avctx->max_b_frames > MAX_B_FRAMES) {
299         av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
300                "is %d.\n", MAX_B_FRAMES);
301     }
302     s->max_b_frames = avctx->max_b_frames;
303     s->codec_id     = avctx->codec->id;
304     s->strict_std_compliance = avctx->strict_std_compliance;
305     s->quarter_sample     = (avctx->flags & CODEC_FLAG_QPEL) != 0;
306     s->mpeg_quant         = avctx->mpeg_quant;
307     s->rtp_mode           = !!avctx->rtp_payload_size;
308     s->intra_dc_precision = avctx->intra_dc_precision;
309     s->user_specified_pts = AV_NOPTS_VALUE;
310
311     if (s->gop_size <= 1) {
312         s->intra_only = 1;
313         s->gop_size   = 12;
314     } else {
315         s->intra_only = 0;
316     }
317
318     s->me_method = avctx->me_method;
319
320     /* Fixed QSCALE */
321     s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
322
323     s->adaptive_quant = (s->avctx->lumi_masking ||
324                          s->avctx->dark_masking ||
325                          s->avctx->temporal_cplx_masking ||
326                          s->avctx->spatial_cplx_masking  ||
327                          s->avctx->p_masking      ||
328                          s->avctx->border_masking ||
329                          (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
330                         !s->fixed_qscale;
331
332     s->loop_filter      = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
333
334     if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
335         av_log(avctx, AV_LOG_ERROR,
336                "a vbv buffer size is needed, "
337                "for encoding with a maximum bitrate\n");
338         return -1;
339     }
340
341     if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
342         av_log(avctx, AV_LOG_INFO,
343                "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
344     }
345
346     if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
347         av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
348         return -1;
349     }
350
351     if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
352         av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
353         return -1;
354     }
355
356     if (avctx->rc_max_rate &&
357         avctx->rc_max_rate == avctx->bit_rate &&
358         avctx->rc_max_rate != avctx->rc_min_rate) {
359         av_log(avctx, AV_LOG_INFO,
360                "impossible bitrate constraints, this will fail\n");
361     }
362
363     if (avctx->rc_buffer_size &&
364         avctx->bit_rate * (int64_t)avctx->time_base.num >
365             avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
366         av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
367         return -1;
368     }
369
370     if (!s->fixed_qscale &&
371         avctx->bit_rate * av_q2d(avctx->time_base) >
372             avctx->bit_rate_tolerance) {
373         av_log(avctx, AV_LOG_ERROR,
374                "bitrate tolerance too small for bitrate\n");
375         return -1;
376     }
377
378     if (s->avctx->rc_max_rate &&
379         s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
380         (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
381          s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
382         90000LL * (avctx->rc_buffer_size - 1) >
383             s->avctx->rc_max_rate * 0xFFFFLL) {
384         av_log(avctx, AV_LOG_INFO,
385                "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
386                "specified vbv buffer is too large for the given bitrate!\n");
387     }
388
389     if ((s->flags & CODEC_FLAG_4MV)  && s->codec_id != AV_CODEC_ID_MPEG4 &&
390         s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
391         s->codec_id != AV_CODEC_ID_FLV1) {
392         av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
393         return -1;
394     }
395
396     if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
397         av_log(avctx, AV_LOG_ERROR,
398                "OBMC is only supported with simple mb decision\n");
399         return -1;
400     }
401
402     if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
403         av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
404         return -1;
405     }
406
407     if (s->max_b_frames                    &&
408         s->codec_id != AV_CODEC_ID_MPEG4      &&
409         s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
410         s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
411         av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
412         return -1;
413     }
414
415     if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
416          s->codec_id == AV_CODEC_ID_H263  ||
417          s->codec_id == AV_CODEC_ID_H263P) &&
418         (avctx->sample_aspect_ratio.num > 255 ||
419          avctx->sample_aspect_ratio.den > 255)) {
420         av_log(avctx, AV_LOG_ERROR,
421                "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
422                avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
423         return -1;
424     }
425
426     if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
427         s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
428         av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
429         return -1;
430     }
431
432     // FIXME mpeg2 uses that too
433     if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
434         av_log(avctx, AV_LOG_ERROR,
435                "mpeg2 style quantization not supported by codec\n");
436         return -1;
437     }
438
439     if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
440         av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
441         return -1;
442     }
443
444     if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
445         s->avctx->mb_decision != FF_MB_DECISION_RD) {
446         av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
447         return -1;
448     }
449
450     if (s->avctx->scenechange_threshold < 1000000000 &&
451         (s->flags & CODEC_FLAG_CLOSED_GOP)) {
452         av_log(avctx, AV_LOG_ERROR,
453                "closed gop with scene change detection are not supported yet, "
454                "set threshold to 1000000000\n");
455         return -1;
456     }
457
458     if (s->flags & CODEC_FLAG_LOW_DELAY) {
459         if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
460             av_log(avctx, AV_LOG_ERROR,
461                   "low delay forcing is only available for mpeg2\n");
462             return -1;
463         }
464         if (s->max_b_frames != 0) {
465             av_log(avctx, AV_LOG_ERROR,
466                    "b frames cannot be used with low delay\n");
467             return -1;
468         }
469     }
470
471     if (s->q_scale_type == 1) {
472         if (avctx->qmax > 12) {
473             av_log(avctx, AV_LOG_ERROR,
474                    "non linear quant only supports qmax <= 12 currently\n");
475             return -1;
476         }
477     }
478
479     if (s->avctx->thread_count > 1         &&
480         s->codec_id != AV_CODEC_ID_MPEG4      &&
481         s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
482         s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
483         (s->codec_id != AV_CODEC_ID_H263P)) {
484         av_log(avctx, AV_LOG_ERROR,
485                "multi threaded encoding not supported by codec\n");
486         return -1;
487     }
488
489     if (s->avctx->thread_count < 1) {
490         av_log(avctx, AV_LOG_ERROR,
491                "automatic thread number detection not supported by codec,"
492                "patch welcome\n");
493         return -1;
494     }
495
496     if (s->avctx->thread_count > 1)
497         s->rtp_mode = 1;
498
499     if (!avctx->time_base.den || !avctx->time_base.num) {
500         av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
501         return -1;
502     }
503
504     i = (INT_MAX / 2 + 128) >> 8;
505     if (avctx->mb_threshold >= i) {
506         av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
507                i - 1);
508         return -1;
509     }
510
511     if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
512         av_log(avctx, AV_LOG_INFO,
513                "notice: b_frame_strategy only affects the first pass\n");
514         avctx->b_frame_strategy = 0;
515     }
516
517     i = av_gcd(avctx->time_base.den, avctx->time_base.num);
518     if (i > 1) {
519         av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
520         avctx->time_base.den /= i;
521         avctx->time_base.num /= i;
522         //return -1;
523     }
524
525     if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
526         s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
527         // (a + x * 3 / 8) / x
528         s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
529         s->inter_quant_bias = 0;
530     } else {
531         s->intra_quant_bias = 0;
532         // (a - x / 4) / x
533         s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
534     }
535
536     if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
537         s->intra_quant_bias = avctx->intra_quant_bias;
538     if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
539         s->inter_quant_bias = avctx->inter_quant_bias;
540
541     av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
542                                      &chroma_v_shift);
543
544     if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
545         s->avctx->time_base.den > (1 << 16) - 1) {
546         av_log(avctx, AV_LOG_ERROR,
547                "timebase %d/%d not supported by MPEG 4 standard, "
548                "the maximum admitted value for the timebase denominator "
549                "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
550                (1 << 16) - 1);
551         return -1;
552     }
553     s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
554
555     switch (avctx->codec->id) {
556     case AV_CODEC_ID_MPEG1VIDEO:
557         s->out_format = FMT_MPEG1;
558         s->low_delay  = !!(s->flags & CODEC_FLAG_LOW_DELAY);
559         avctx->delay  = s->low_delay ? 0 : (s->max_b_frames + 1);
560         break;
561     case AV_CODEC_ID_MPEG2VIDEO:
562         s->out_format = FMT_MPEG1;
563         s->low_delay  = !!(s->flags & CODEC_FLAG_LOW_DELAY);
564         avctx->delay  = s->low_delay ? 0 : (s->max_b_frames + 1);
565         s->rtp_mode   = 1;
566         break;
567     case AV_CODEC_ID_LJPEG:
568     case AV_CODEC_ID_MJPEG:
569         s->out_format = FMT_MJPEG;
570         s->intra_only = 1; /* force intra only for jpeg */
571         if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
572             avctx->pix_fmt   == AV_PIX_FMT_BGRA) {
573             s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
574             s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
575             s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
576         } else {
577             s->mjpeg_vsample[0] = 2;
578             s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
579             s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
580             s->mjpeg_hsample[0] = 2;
581             s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
582             s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
583         }
584         if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
585             ff_mjpeg_encode_init(s) < 0)
586             return -1;
587         avctx->delay = 0;
588         s->low_delay = 1;
589         break;
590     case AV_CODEC_ID_H261:
591         if (!CONFIG_H261_ENCODER)
592             return -1;
593         if (ff_h261_get_picture_format(s->width, s->height) < 0) {
594             av_log(avctx, AV_LOG_ERROR,
595                    "The specified picture size of %dx%d is not valid for the "
596                    "H.261 codec.\nValid sizes are 176x144, 352x288\n",
597                     s->width, s->height);
598             return -1;
599         }
600         s->out_format = FMT_H261;
601         avctx->delay  = 0;
602         s->low_delay  = 1;
603         break;
604     case AV_CODEC_ID_H263:
605         if (!CONFIG_H263_ENCODER)
606         return -1;
607         if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
608                              s->width, s->height) == 8) {
609             av_log(avctx, AV_LOG_INFO,
610                    "The specified picture size of %dx%d is not valid for "
611                    "the H.263 codec.\nValid sizes are 128x96, 176x144, "
612                    "352x288, 704x576, and 1408x1152."
613                    "Try H.263+.\n", s->width, s->height);
614             return -1;
615         }
616         s->out_format = FMT_H263;
617         avctx->delay  = 0;
618         s->low_delay  = 1;
619         break;
620     case AV_CODEC_ID_H263P:
621         s->out_format = FMT_H263;
622         s->h263_plus  = 1;
623         /* Fx */
624         s->h263_aic        = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
625         s->modified_quant  = s->h263_aic;
626         s->loop_filter     = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
627         s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
628
629         /* /Fx */
630         /* These are just to be sure */
631         avctx->delay = 0;
632         s->low_delay = 1;
633         break;
634     case AV_CODEC_ID_FLV1:
635         s->out_format      = FMT_H263;
636         s->h263_flv        = 2; /* format = 1; 11-bit codes */
637         s->unrestricted_mv = 1;
638         s->rtp_mode  = 0; /* don't allow GOB */
639         avctx->delay = 0;
640         s->low_delay = 1;
641         break;
642     case AV_CODEC_ID_RV10:
643         s->out_format = FMT_H263;
644         avctx->delay  = 0;
645         s->low_delay  = 1;
646         break;
647     case AV_CODEC_ID_RV20:
648         s->out_format      = FMT_H263;
649         avctx->delay       = 0;
650         s->low_delay       = 1;
651         s->modified_quant  = 1;
652         s->h263_aic        = 1;
653         s->h263_plus       = 1;
654         s->loop_filter     = 1;
655         s->unrestricted_mv = 0;
656         break;
657     case AV_CODEC_ID_MPEG4:
658         s->out_format      = FMT_H263;
659         s->h263_pred       = 1;
660         s->unrestricted_mv = 1;
661         s->low_delay       = s->max_b_frames ? 0 : 1;
662         avctx->delay       = s->low_delay ? 0 : (s->max_b_frames + 1);
663         break;
664     case AV_CODEC_ID_MSMPEG4V2:
665         s->out_format      = FMT_H263;
666         s->h263_pred       = 1;
667         s->unrestricted_mv = 1;
668         s->msmpeg4_version = 2;
669         avctx->delay       = 0;
670         s->low_delay       = 1;
671         break;
672     case AV_CODEC_ID_MSMPEG4V3:
673         s->out_format        = FMT_H263;
674         s->h263_pred         = 1;
675         s->unrestricted_mv   = 1;
676         s->msmpeg4_version   = 3;
677         s->flipflop_rounding = 1;
678         avctx->delay         = 0;
679         s->low_delay         = 1;
680         break;
681     case AV_CODEC_ID_WMV1:
682         s->out_format        = FMT_H263;
683         s->h263_pred         = 1;
684         s->unrestricted_mv   = 1;
685         s->msmpeg4_version   = 4;
686         s->flipflop_rounding = 1;
687         avctx->delay         = 0;
688         s->low_delay         = 1;
689         break;
690     case AV_CODEC_ID_WMV2:
691         s->out_format        = FMT_H263;
692         s->h263_pred         = 1;
693         s->unrestricted_mv   = 1;
694         s->msmpeg4_version   = 5;
695         s->flipflop_rounding = 1;
696         avctx->delay         = 0;
697         s->low_delay         = 1;
698         break;
699     default:
700         return -1;
701     }
702
703     avctx->has_b_frames = !s->low_delay;
704
705     s->encoding = 1;
706
707     s->progressive_frame    =
708     s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
709                                                 CODEC_FLAG_INTERLACED_ME) ||
710                                 s->alternate_scan);
711
712     /* init */
713     if (ff_MPV_common_init(s) < 0)
714         return -1;
715
716     if (ARCH_X86)
717         ff_MPV_encode_init_x86(s);
718
719     s->avctx->coded_frame = &s->current_picture.f;
720
721     if (s->msmpeg4_version) {
722         FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
723                           2 * 2 * (MAX_LEVEL + 1) *
724                           (MAX_RUN + 1) * 2 * sizeof(int), fail);
725     }
726     FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
727
728     FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,   64 * 32 * sizeof(int), fail);
729     FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,   64 * 32 * sizeof(int), fail);
730     FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
731     FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
732     FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
733                       MAX_PICTURE_COUNT * sizeof(Picture *), fail);
734     FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
735                       MAX_PICTURE_COUNT * sizeof(Picture *), fail);
736
737     if (s->avctx->noise_reduction) {
738         FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
739                           2 * 64 * sizeof(uint16_t), fail);
740     }
741
742     ff_h263dsp_init(&s->h263dsp);
743     if (!s->dct_quantize)
744         s->dct_quantize = ff_dct_quantize_c;
745     if (!s->denoise_dct)
746         s->denoise_dct  = denoise_dct_c;
747     s->fast_dct_quantize = s->dct_quantize;
748     if (avctx->trellis)
749         s->dct_quantize  = dct_quantize_trellis_c;
750
751     if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
752         s->chroma_qscale_table = ff_h263_chroma_qscale_table;
753
754     s->quant_precision = 5;
755
756     ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
757     ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
758
759     if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
760         ff_h261_encode_init(s);
761     if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
762         ff_h263_encode_init(s);
763     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
764         ff_msmpeg4_encode_init(s);
765     if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
766         && s->out_format == FMT_MPEG1)
767         ff_mpeg1_encode_init(s);
768
769     /* init q matrix */
770     for (i = 0; i < 64; i++) {
771         int j = s->dsp.idct_permutation[i];
772         if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
773             s->mpeg_quant) {
774             s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
775             s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
776         } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
777             s->intra_matrix[j] =
778             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
779         } else {
780             /* mpeg1/2 */
781             s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
782             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
783         }
784         if (s->avctx->intra_matrix)
785             s->intra_matrix[j] = s->avctx->intra_matrix[i];
786         if (s->avctx->inter_matrix)
787             s->inter_matrix[j] = s->avctx->inter_matrix[i];
788     }
789
790     /* precompute matrix */
791     /* for mjpeg, we do include qscale in the matrix */
792     if (s->out_format != FMT_MJPEG) {
793         ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
794                           s->intra_matrix, s->intra_quant_bias, avctx->qmin,
795                           31, 1);
796         ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
797                           s->inter_matrix, s->inter_quant_bias, avctx->qmin,
798                           31, 0);
799     }
800
801     if (ff_rate_control_init(s) < 0)
802         return -1;
803
804 #if FF_API_ERROR_RATE
805     FF_DISABLE_DEPRECATION_WARNINGS
806     if (avctx->error_rate)
807         s->error_rate = avctx->error_rate;
808     FF_ENABLE_DEPRECATION_WARNINGS;
809 #endif
810
811     if (avctx->b_frame_strategy == 2) {
812         for (i = 0; i < s->max_b_frames + 2; i++) {
813             s->tmp_frames[i] = av_frame_alloc();
814             if (!s->tmp_frames[i])
815                 return AVERROR(ENOMEM);
816
817             s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
818             s->tmp_frames[i]->width  = s->width  >> avctx->brd_scale;
819             s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
820
821             ret = av_frame_get_buffer(s->tmp_frames[i], 32);
822             if (ret < 0)
823                 return ret;
824         }
825     }
826
827     return 0;
828 fail:
829     ff_MPV_encode_end(avctx);
830     return AVERROR_UNKNOWN;
831 }
832
833 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
834 {
835     MpegEncContext *s = avctx->priv_data;
836     int i;
837
838     ff_rate_control_uninit(s);
839
840     ff_MPV_common_end(s);
841     if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
842         s->out_format == FMT_MJPEG)
843         ff_mjpeg_encode_close(s);
844
845     av_freep(&avctx->extradata);
846
847     for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
848         av_frame_free(&s->tmp_frames[i]);
849
850     return 0;
851 }
852
853 static int get_sae(uint8_t *src, int ref, int stride)
854 {
855     int x,y;
856     int acc = 0;
857
858     for (y = 0; y < 16; y++) {
859         for (x = 0; x < 16; x++) {
860             acc += FFABS(src[x + y * stride] - ref);
861         }
862     }
863
864     return acc;
865 }
866
867 static int get_intra_count(MpegEncContext *s, uint8_t *src,
868                            uint8_t *ref, int stride)
869 {
870     int x, y, w, h;
871     int acc = 0;
872
873     w = s->width  & ~15;
874     h = s->height & ~15;
875
876     for (y = 0; y < h; y += 16) {
877         for (x = 0; x < w; x += 16) {
878             int offset = x + y * stride;
879             int sad  = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
880                                      16);
881             int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
882             int sae  = get_sae(src + offset, mean, stride);
883
884             acc += sae + 500 < sad;
885         }
886     }
887     return acc;
888 }
889
890
891 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
892 {
893     Picture *pic = NULL;
894     int64_t pts;
895     int i, display_picture_number = 0, ret;
896     const int encoding_delay = s->max_b_frames ? s->max_b_frames :
897                                                  (s->low_delay ? 0 : 1);
898     int direct = 1;
899
900     if (pic_arg) {
901         pts = pic_arg->pts;
902         display_picture_number = s->input_picture_number++;
903
904         if (pts != AV_NOPTS_VALUE) {
905             if (s->user_specified_pts != AV_NOPTS_VALUE) {
906                 int64_t time = pts;
907                 int64_t last = s->user_specified_pts;
908
909                 if (time <= last) {
910                     av_log(s->avctx, AV_LOG_ERROR,
911                            "Error, Invalid timestamp=%"PRId64", "
912                            "last=%"PRId64"\n", pts, s->user_specified_pts);
913                     return -1;
914                 }
915
916                 if (!s->low_delay && display_picture_number == 1)
917                     s->dts_delta = time - last;
918             }
919             s->user_specified_pts = pts;
920         } else {
921             if (s->user_specified_pts != AV_NOPTS_VALUE) {
922                 s->user_specified_pts =
923                 pts = s->user_specified_pts + 1;
924                 av_log(s->avctx, AV_LOG_INFO,
925                        "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
926                        pts);
927             } else {
928                 pts = display_picture_number;
929             }
930         }
931     }
932
933     if (pic_arg) {
934         if (!pic_arg->buf[0]);
935             direct = 0;
936         if (pic_arg->linesize[0] != s->linesize)
937             direct = 0;
938         if (pic_arg->linesize[1] != s->uvlinesize)
939             direct = 0;
940         if (pic_arg->linesize[2] != s->uvlinesize)
941             direct = 0;
942
943         av_dlog(s->avctx, "%d %d %td %td\n", pic_arg->linesize[0],
944                 pic_arg->linesize[1], s->linesize, s->uvlinesize);
945
946         if (direct) {
947             i = ff_find_unused_picture(s, 1);
948             if (i < 0)
949                 return i;
950
951             pic = &s->picture[i];
952             pic->reference = 3;
953
954             if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0)
955                 return ret;
956             if (ff_alloc_picture(s, pic, 1) < 0) {
957                 return -1;
958             }
959         } else {
960             i = ff_find_unused_picture(s, 0);
961             if (i < 0)
962                 return i;
963
964             pic = &s->picture[i];
965             pic->reference = 3;
966
967             if (ff_alloc_picture(s, pic, 0) < 0) {
968                 return -1;
969             }
970
971             if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
972                 pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
973                 pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
974                 // empty
975             } else {
976                 int h_chroma_shift, v_chroma_shift;
977                 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
978                                                  &h_chroma_shift,
979                                                  &v_chroma_shift);
980
981                 for (i = 0; i < 3; i++) {
982                     int src_stride = pic_arg->linesize[i];
983                     int dst_stride = i ? s->uvlinesize : s->linesize;
984                     int h_shift = i ? h_chroma_shift : 0;
985                     int v_shift = i ? v_chroma_shift : 0;
986                     int w = s->width  >> h_shift;
987                     int h = s->height >> v_shift;
988                     uint8_t *src = pic_arg->data[i];
989                     uint8_t *dst = pic->f.data[i];
990
991                     if (!s->avctx->rc_buffer_size)
992                         dst += INPLACE_OFFSET;
993
994                     if (src_stride == dst_stride)
995                         memcpy(dst, src, src_stride * h);
996                     else {
997                         while (h--) {
998                             memcpy(dst, src, w);
999                             dst += dst_stride;
1000                             src += src_stride;
1001                         }
1002                     }
1003                 }
1004             }
1005         }
1006         ret = av_frame_copy_props(&pic->f, pic_arg);
1007         if (ret < 0)
1008             return ret;
1009
1010         pic->f.display_picture_number = display_picture_number;
1011         pic->f.pts = pts; // we set this here to avoid modifiying pic_arg
1012     }
1013
1014     /* shift buffer entries */
1015     for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1016         s->input_picture[i - 1] = s->input_picture[i];
1017
1018     s->input_picture[encoding_delay] = (Picture*) pic;
1019
1020     return 0;
1021 }
1022
1023 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1024 {
1025     int x, y, plane;
1026     int score = 0;
1027     int64_t score64 = 0;
1028
1029     for (plane = 0; plane < 3; plane++) {
1030         const int stride = p->f.linesize[plane];
1031         const int bw = plane ? 1 : 2;
1032         for (y = 0; y < s->mb_height * bw; y++) {
1033             for (x = 0; x < s->mb_width * bw; x++) {
1034                 int off = p->shared ? 0 : 16;
1035                 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
1036                 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
1037                 int v   = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1038
1039                 switch (s->avctx->frame_skip_exp) {
1040                 case 0: score    =  FFMAX(score, v);          break;
1041                 case 1: score   += FFABS(v);                  break;
1042                 case 2: score   += v * v;                     break;
1043                 case 3: score64 += FFABS(v * v * (int64_t)v); break;
1044                 case 4: score64 += v * v * (int64_t)(v * v);  break;
1045                 }
1046             }
1047         }
1048     }
1049
1050     if (score)
1051         score64 = score;
1052
1053     if (score64 < s->avctx->frame_skip_threshold)
1054         return 1;
1055     if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1056         return 1;
1057     return 0;
1058 }
1059
1060 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1061 {
1062     AVPacket pkt = { 0 };
1063     int ret, got_output;
1064
1065     av_init_packet(&pkt);
1066     ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1067     if (ret < 0)
1068         return ret;
1069
1070     ret = pkt.size;
1071     av_free_packet(&pkt);
1072     return ret;
1073 }
1074
1075 static int estimate_best_b_count(MpegEncContext *s)
1076 {
1077     AVCodec *codec    = avcodec_find_encoder(s->avctx->codec_id);
1078     AVCodecContext *c = avcodec_alloc_context3(NULL);
1079     const int scale = s->avctx->brd_scale;
1080     int i, j, out_size, p_lambda, b_lambda, lambda2;
1081     int64_t best_rd  = INT64_MAX;
1082     int best_b_count = -1;
1083
1084     assert(scale >= 0 && scale <= 3);
1085
1086     //emms_c();
1087     //s->next_picture_ptr->quality;
1088     p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1089     //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1090     b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1091     if (!b_lambda) // FIXME we should do this somewhere else
1092         b_lambda = p_lambda;
1093     lambda2  = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1094                FF_LAMBDA_SHIFT;
1095
1096     c->width        = s->width  >> scale;
1097     c->height       = s->height >> scale;
1098     c->flags        = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1099                       CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1100     c->flags       |= s->avctx->flags & CODEC_FLAG_QPEL;
1101     c->mb_decision  = s->avctx->mb_decision;
1102     c->me_cmp       = s->avctx->me_cmp;
1103     c->mb_cmp       = s->avctx->mb_cmp;
1104     c->me_sub_cmp   = s->avctx->me_sub_cmp;
1105     c->pix_fmt      = AV_PIX_FMT_YUV420P;
1106     c->time_base    = s->avctx->time_base;
1107     c->max_b_frames = s->max_b_frames;
1108
1109     if (avcodec_open2(c, codec, NULL) < 0)
1110         return -1;
1111
1112     for (i = 0; i < s->max_b_frames + 2; i++) {
1113         Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1114                                                 s->next_picture_ptr;
1115
1116         if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1117             pre_input = *pre_input_ptr;
1118
1119             if (!pre_input.shared && i) {
1120                 pre_input.f.data[0] += INPLACE_OFFSET;
1121                 pre_input.f.data[1] += INPLACE_OFFSET;
1122                 pre_input.f.data[2] += INPLACE_OFFSET;
1123             }
1124
1125             s->dsp.shrink[scale](s->tmp_frames[i]->data[0], s->tmp_frames[i]->linesize[0],
1126                                  pre_input.f.data[0], pre_input.f.linesize[0],
1127                                  c->width,      c->height);
1128             s->dsp.shrink[scale](s->tmp_frames[i]->data[1], s->tmp_frames[i]->linesize[1],
1129                                  pre_input.f.data[1], pre_input.f.linesize[1],
1130                                  c->width >> 1, c->height >> 1);
1131             s->dsp.shrink[scale](s->tmp_frames[i]->data[2], s->tmp_frames[i]->linesize[2],
1132                                  pre_input.f.data[2], pre_input.f.linesize[2],
1133                                  c->width >> 1, c->height >> 1);
1134         }
1135     }
1136
1137     for (j = 0; j < s->max_b_frames + 1; j++) {
1138         int64_t rd = 0;
1139
1140         if (!s->input_picture[j])
1141             break;
1142
1143         c->error[0] = c->error[1] = c->error[2] = 0;
1144
1145         s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1146         s->tmp_frames[0]->quality   = 1 * FF_QP2LAMBDA;
1147
1148         out_size = encode_frame(c, s->tmp_frames[0]);
1149
1150         //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1151
1152         for (i = 0; i < s->max_b_frames + 1; i++) {
1153             int is_p = i % (j + 1) == j || i == s->max_b_frames;
1154
1155             s->tmp_frames[i + 1]->pict_type = is_p ?
1156                                      AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1157             s->tmp_frames[i + 1]->quality   = is_p ? p_lambda : b_lambda;
1158
1159             out_size = encode_frame(c, s->tmp_frames[i + 1]);
1160
1161             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1162         }
1163
1164         /* get the delayed frames */
1165         while (out_size) {
1166             out_size = encode_frame(c, NULL);
1167             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1168         }
1169
1170         rd += c->error[0] + c->error[1] + c->error[2];
1171
1172         if (rd < best_rd) {
1173             best_rd = rd;
1174             best_b_count = j;
1175         }
1176     }
1177
1178     avcodec_close(c);
1179     av_freep(&c);
1180
1181     return best_b_count;
1182 }
1183
1184 static int select_input_picture(MpegEncContext *s)
1185 {
1186     int i, ret;
1187
1188     for (i = 1; i < MAX_PICTURE_COUNT; i++)
1189         s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1190     s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1191
1192     /* set next picture type & ordering */
1193     if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1194         if (/*s->picture_in_gop_number >= s->gop_size ||*/
1195             s->next_picture_ptr == NULL || s->intra_only) {
1196             s->reordered_input_picture[0] = s->input_picture[0];
1197             s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1198             s->reordered_input_picture[0]->f.coded_picture_number =
1199                 s->coded_picture_number++;
1200         } else {
1201             int b_frames;
1202
1203             if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1204                 if (s->picture_in_gop_number < s->gop_size &&
1205                     skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1206                     // FIXME check that te gop check above is +-1 correct
1207                     av_frame_unref(&s->input_picture[0]->f);
1208
1209                     emms_c();
1210                     ff_vbv_update(s, 0);
1211
1212                     goto no_output_pic;
1213                 }
1214             }
1215
1216             if (s->flags & CODEC_FLAG_PASS2) {
1217                 for (i = 0; i < s->max_b_frames + 1; i++) {
1218                     int pict_num = s->input_picture[0]->f.display_picture_number + i;
1219
1220                     if (pict_num >= s->rc_context.num_entries)
1221                         break;
1222                     if (!s->input_picture[i]) {
1223                         s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1224                         break;
1225                     }
1226
1227                     s->input_picture[i]->f.pict_type =
1228                         s->rc_context.entry[pict_num].new_pict_type;
1229                 }
1230             }
1231
1232             if (s->avctx->b_frame_strategy == 0) {
1233                 b_frames = s->max_b_frames;
1234                 while (b_frames && !s->input_picture[b_frames])
1235                     b_frames--;
1236             } else if (s->avctx->b_frame_strategy == 1) {
1237                 for (i = 1; i < s->max_b_frames + 1; i++) {
1238                     if (s->input_picture[i] &&
1239                         s->input_picture[i]->b_frame_score == 0) {
1240                         s->input_picture[i]->b_frame_score =
1241                             get_intra_count(s,
1242                                             s->input_picture[i    ]->f.data[0],
1243                                             s->input_picture[i - 1]->f.data[0],
1244                                             s->linesize) + 1;
1245                     }
1246                 }
1247                 for (i = 0; i < s->max_b_frames + 1; i++) {
1248                     if (s->input_picture[i] == NULL ||
1249                         s->input_picture[i]->b_frame_score - 1 >
1250                             s->mb_num / s->avctx->b_sensitivity)
1251                         break;
1252                 }
1253
1254                 b_frames = FFMAX(0, i - 1);
1255
1256                 /* reset scores */
1257                 for (i = 0; i < b_frames + 1; i++) {
1258                     s->input_picture[i]->b_frame_score = 0;
1259                 }
1260             } else if (s->avctx->b_frame_strategy == 2) {
1261                 b_frames = estimate_best_b_count(s);
1262             } else {
1263                 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1264                 b_frames = 0;
1265             }
1266
1267             emms_c();
1268
1269             for (i = b_frames - 1; i >= 0; i--) {
1270                 int type = s->input_picture[i]->f.pict_type;
1271                 if (type && type != AV_PICTURE_TYPE_B)
1272                     b_frames = i;
1273             }
1274             if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1275                 b_frames == s->max_b_frames) {
1276                 av_log(s->avctx, AV_LOG_ERROR,
1277                        "warning, too many b frames in a row\n");
1278             }
1279
1280             if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1281                 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1282                     s->gop_size > s->picture_in_gop_number) {
1283                     b_frames = s->gop_size - s->picture_in_gop_number - 1;
1284                 } else {
1285                     if (s->flags & CODEC_FLAG_CLOSED_GOP)
1286                         b_frames = 0;
1287                     s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1288                 }
1289             }
1290
1291             if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1292                 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1293                 b_frames--;
1294
1295             s->reordered_input_picture[0] = s->input_picture[b_frames];
1296             if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1297                 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1298             s->reordered_input_picture[0]->f.coded_picture_number =
1299                 s->coded_picture_number++;
1300             for (i = 0; i < b_frames; i++) {
1301                 s->reordered_input_picture[i + 1] = s->input_picture[i];
1302                 s->reordered_input_picture[i + 1]->f.pict_type =
1303                     AV_PICTURE_TYPE_B;
1304                 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1305                     s->coded_picture_number++;
1306             }
1307         }
1308     }
1309 no_output_pic:
1310     if (s->reordered_input_picture[0]) {
1311         s->reordered_input_picture[0]->reference =
1312            s->reordered_input_picture[0]->f.pict_type !=
1313                AV_PICTURE_TYPE_B ? 3 : 0;
1314
1315         ff_mpeg_unref_picture(s, &s->new_picture);
1316         if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, s->reordered_input_picture[0])))
1317             return ret;
1318
1319         if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1320             // input is a shared pix, so we can't modifiy it -> alloc a new
1321             // one & ensure that the shared one is reuseable
1322
1323             Picture *pic;
1324             int i = ff_find_unused_picture(s, 0);
1325             if (i < 0)
1326                 return i;
1327             pic = &s->picture[i];
1328
1329             pic->reference = s->reordered_input_picture[0]->reference;
1330             if (ff_alloc_picture(s, pic, 0) < 0) {
1331                 return -1;
1332             }
1333
1334             ret = av_frame_copy_props(&pic->f, &s->reordered_input_picture[0]->f);
1335             if (ret < 0)
1336                 return ret;
1337
1338             /* mark us unused / free shared pic */
1339             av_frame_unref(&s->reordered_input_picture[0]->f);
1340             s->reordered_input_picture[0]->shared = 0;
1341
1342             s->current_picture_ptr = pic;
1343         } else {
1344             // input is not a shared pix -> reuse buffer for current_pix
1345             s->current_picture_ptr = s->reordered_input_picture[0];
1346             for (i = 0; i < 4; i++) {
1347                 s->new_picture.f.data[i] += INPLACE_OFFSET;
1348             }
1349         }
1350         ff_mpeg_unref_picture(s, &s->current_picture);
1351         if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1352                                        s->current_picture_ptr)) < 0)
1353             return ret;
1354
1355         s->picture_number = s->new_picture.f.display_picture_number;
1356     } else {
1357         ff_mpeg_unref_picture(s, &s->new_picture);
1358     }
1359     return 0;
1360 }
1361
1362 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1363                           const AVFrame *pic_arg, int *got_packet)
1364 {
1365     MpegEncContext *s = avctx->priv_data;
1366     int i, stuffing_count, ret;
1367     int context_count = s->slice_context_count;
1368
1369     s->picture_in_gop_number++;
1370
1371     if (load_input_picture(s, pic_arg) < 0)
1372         return -1;
1373
1374     if (select_input_picture(s) < 0) {
1375         return -1;
1376     }
1377
1378     /* output? */
1379     if (s->new_picture.f.data[0]) {
1380         if (!pkt->data &&
1381             (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1382             return ret;
1383         if (s->mb_info) {
1384             s->mb_info_ptr = av_packet_new_side_data(pkt,
1385                                  AV_PKT_DATA_H263_MB_INFO,
1386                                  s->mb_width*s->mb_height*12);
1387             s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1388         }
1389
1390         for (i = 0; i < context_count; i++) {
1391             int start_y = s->thread_context[i]->start_mb_y;
1392             int   end_y = s->thread_context[i]->  end_mb_y;
1393             int h       = s->mb_height;
1394             uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1395             uint8_t *end   = pkt->data + (size_t)(((int64_t) pkt->size) *   end_y / h);
1396
1397             init_put_bits(&s->thread_context[i]->pb, start, end - start);
1398         }
1399
1400         s->pict_type = s->new_picture.f.pict_type;
1401         //emms_c();
1402         ff_MPV_frame_start(s, avctx);
1403 vbv_retry:
1404         if (encode_picture(s, s->picture_number) < 0)
1405             return -1;
1406
1407         avctx->header_bits = s->header_bits;
1408         avctx->mv_bits     = s->mv_bits;
1409         avctx->misc_bits   = s->misc_bits;
1410         avctx->i_tex_bits  = s->i_tex_bits;
1411         avctx->p_tex_bits  = s->p_tex_bits;
1412         avctx->i_count     = s->i_count;
1413         // FIXME f/b_count in avctx
1414         avctx->p_count     = s->mb_num - s->i_count - s->skip_count;
1415         avctx->skip_count  = s->skip_count;
1416
1417         ff_MPV_frame_end(s);
1418
1419         if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1420             ff_mjpeg_encode_picture_trailer(s);
1421
1422         if (avctx->rc_buffer_size) {
1423             RateControlContext *rcc = &s->rc_context;
1424             int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1425
1426             if (put_bits_count(&s->pb) > max_size &&
1427                 s->lambda < s->avctx->lmax) {
1428                 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1429                                        (s->qscale + 1) / s->qscale);
1430                 if (s->adaptive_quant) {
1431                     int i;
1432                     for (i = 0; i < s->mb_height * s->mb_stride; i++)
1433                         s->lambda_table[i] =
1434                             FFMAX(s->lambda_table[i] + 1,
1435                                   s->lambda_table[i] * (s->qscale + 1) /
1436                                   s->qscale);
1437                 }
1438                 s->mb_skipped = 0;        // done in MPV_frame_start()
1439                 // done in encode_picture() so we must undo it
1440                 if (s->pict_type == AV_PICTURE_TYPE_P) {
1441                     if (s->flipflop_rounding          ||
1442                         s->codec_id == AV_CODEC_ID_H263P ||
1443                         s->codec_id == AV_CODEC_ID_MPEG4)
1444                         s->no_rounding ^= 1;
1445                 }
1446                 if (s->pict_type != AV_PICTURE_TYPE_B) {
1447                     s->time_base       = s->last_time_base;
1448                     s->last_non_b_time = s->time - s->pp_time;
1449                 }
1450                 for (i = 0; i < context_count; i++) {
1451                     PutBitContext *pb = &s->thread_context[i]->pb;
1452                     init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1453                 }
1454                 goto vbv_retry;
1455             }
1456
1457             assert(s->avctx->rc_max_rate);
1458         }
1459
1460         if (s->flags & CODEC_FLAG_PASS1)
1461             ff_write_pass1_stats(s);
1462
1463         for (i = 0; i < 4; i++) {
1464             s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1465             avctx->error[i] += s->current_picture_ptr->f.error[i];
1466         }
1467
1468         if (s->flags & CODEC_FLAG_PASS1)
1469             assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1470                    avctx->i_tex_bits + avctx->p_tex_bits ==
1471                        put_bits_count(&s->pb));
1472         flush_put_bits(&s->pb);
1473         s->frame_bits  = put_bits_count(&s->pb);
1474
1475         stuffing_count = ff_vbv_update(s, s->frame_bits);
1476         if (stuffing_count) {
1477             if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1478                     stuffing_count + 50) {
1479                 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1480                 return -1;
1481             }
1482
1483             switch (s->codec_id) {
1484             case AV_CODEC_ID_MPEG1VIDEO:
1485             case AV_CODEC_ID_MPEG2VIDEO:
1486                 while (stuffing_count--) {
1487                     put_bits(&s->pb, 8, 0);
1488                 }
1489             break;
1490             case AV_CODEC_ID_MPEG4:
1491                 put_bits(&s->pb, 16, 0);
1492                 put_bits(&s->pb, 16, 0x1C3);
1493                 stuffing_count -= 4;
1494                 while (stuffing_count--) {
1495                     put_bits(&s->pb, 8, 0xFF);
1496                 }
1497             break;
1498             default:
1499                 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1500             }
1501             flush_put_bits(&s->pb);
1502             s->frame_bits  = put_bits_count(&s->pb);
1503         }
1504
1505         /* update mpeg1/2 vbv_delay for CBR */
1506         if (s->avctx->rc_max_rate                          &&
1507             s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1508             s->out_format == FMT_MPEG1                     &&
1509             90000LL * (avctx->rc_buffer_size - 1) <=
1510                 s->avctx->rc_max_rate * 0xFFFFLL) {
1511             int vbv_delay, min_delay;
1512             double inbits  = s->avctx->rc_max_rate *
1513                              av_q2d(s->avctx->time_base);
1514             int    minbits = s->frame_bits - 8 *
1515                              (s->vbv_delay_ptr - s->pb.buf - 1);
1516             double bits    = s->rc_context.buffer_index + minbits - inbits;
1517
1518             if (bits < 0)
1519                 av_log(s->avctx, AV_LOG_ERROR,
1520                        "Internal error, negative bits\n");
1521
1522             assert(s->repeat_first_field == 0);
1523
1524             vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1525             min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1526                         s->avctx->rc_max_rate;
1527
1528             vbv_delay = FFMAX(vbv_delay, min_delay);
1529
1530             assert(vbv_delay < 0xFFFF);
1531
1532             s->vbv_delay_ptr[0] &= 0xF8;
1533             s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1534             s->vbv_delay_ptr[1]  = vbv_delay >> 5;
1535             s->vbv_delay_ptr[2] &= 0x07;
1536             s->vbv_delay_ptr[2] |= vbv_delay << 3;
1537             avctx->vbv_delay     = vbv_delay * 300;
1538         }
1539         s->total_bits     += s->frame_bits;
1540         avctx->frame_bits  = s->frame_bits;
1541
1542         pkt->pts = s->current_picture.f.pts;
1543         if (!s->low_delay) {
1544             if (!s->current_picture.f.coded_picture_number)
1545                 pkt->dts = pkt->pts - s->dts_delta;
1546             else
1547                 pkt->dts = s->reordered_pts;
1548             s->reordered_pts = s->input_picture[0]->f.pts;
1549         } else
1550             pkt->dts = pkt->pts;
1551         if (s->current_picture.f.key_frame)
1552             pkt->flags |= AV_PKT_FLAG_KEY;
1553         if (s->mb_info)
1554             av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1555     } else {
1556         s->frame_bits = 0;
1557     }
1558     assert((s->frame_bits & 7) == 0);
1559
1560     pkt->size = s->frame_bits / 8;
1561     *got_packet = !!pkt->size;
1562     return 0;
1563 }
1564
1565 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1566                                                 int n, int threshold)
1567 {
1568     static const char tab[64] = {
1569         3, 2, 2, 1, 1, 1, 1, 1,
1570         1, 1, 1, 1, 1, 1, 1, 1,
1571         1, 1, 1, 1, 1, 1, 1, 1,
1572         0, 0, 0, 0, 0, 0, 0, 0,
1573         0, 0, 0, 0, 0, 0, 0, 0,
1574         0, 0, 0, 0, 0, 0, 0, 0,
1575         0, 0, 0, 0, 0, 0, 0, 0,
1576         0, 0, 0, 0, 0, 0, 0, 0
1577     };
1578     int score = 0;
1579     int run = 0;
1580     int i;
1581     int16_t *block = s->block[n];
1582     const int last_index = s->block_last_index[n];
1583     int skip_dc;
1584
1585     if (threshold < 0) {
1586         skip_dc = 0;
1587         threshold = -threshold;
1588     } else
1589         skip_dc = 1;
1590
1591     /* Are all we could set to zero already zero? */
1592     if (last_index <= skip_dc - 1)
1593         return;
1594
1595     for (i = 0; i <= last_index; i++) {
1596         const int j = s->intra_scantable.permutated[i];
1597         const int level = FFABS(block[j]);
1598         if (level == 1) {
1599             if (skip_dc && i == 0)
1600                 continue;
1601             score += tab[run];
1602             run = 0;
1603         } else if (level > 1) {
1604             return;
1605         } else {
1606             run++;
1607         }
1608     }
1609     if (score >= threshold)
1610         return;
1611     for (i = skip_dc; i <= last_index; i++) {
1612         const int j = s->intra_scantable.permutated[i];
1613         block[j] = 0;
1614     }
1615     if (block[0])
1616         s->block_last_index[n] = 0;
1617     else
1618         s->block_last_index[n] = -1;
1619 }
1620
1621 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1622                                int last_index)
1623 {
1624     int i;
1625     const int maxlevel = s->max_qcoeff;
1626     const int minlevel = s->min_qcoeff;
1627     int overflow = 0;
1628
1629     if (s->mb_intra) {
1630         i = 1; // skip clipping of intra dc
1631     } else
1632         i = 0;
1633
1634     for (; i <= last_index; i++) {
1635         const int j = s->intra_scantable.permutated[i];
1636         int level = block[j];
1637
1638         if (level > maxlevel) {
1639             level = maxlevel;
1640             overflow++;
1641         } else if (level < minlevel) {
1642             level = minlevel;
1643             overflow++;
1644         }
1645
1646         block[j] = level;
1647     }
1648
1649     if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1650         av_log(s->avctx, AV_LOG_INFO,
1651                "warning, clipping %d dct coefficients to %d..%d\n",
1652                overflow, minlevel, maxlevel);
1653 }
1654
1655 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1656 {
1657     int x, y;
1658     // FIXME optimize
1659     for (y = 0; y < 8; y++) {
1660         for (x = 0; x < 8; x++) {
1661             int x2, y2;
1662             int sum = 0;
1663             int sqr = 0;
1664             int count = 0;
1665
1666             for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1667                 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1668                     int v = ptr[x2 + y2 * stride];
1669                     sum += v;
1670                     sqr += v * v;
1671                     count++;
1672                 }
1673             }
1674             weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1675         }
1676     }
1677 }
1678
1679 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1680                                                 int motion_x, int motion_y,
1681                                                 int mb_block_height,
1682                                                 int mb_block_count)
1683 {
1684     int16_t weight[8][64];
1685     int16_t orig[8][64];
1686     const int mb_x = s->mb_x;
1687     const int mb_y = s->mb_y;
1688     int i;
1689     int skip_dct[8];
1690     int dct_offset = s->linesize * 8; // default for progressive frames
1691     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1692     ptrdiff_t wrap_y, wrap_c;
1693
1694     for (i = 0; i < mb_block_count; i++)
1695         skip_dct[i] = s->skipdct;
1696
1697     if (s->adaptive_quant) {
1698         const int last_qp = s->qscale;
1699         const int mb_xy = mb_x + mb_y * s->mb_stride;
1700
1701         s->lambda = s->lambda_table[mb_xy];
1702         update_qscale(s);
1703
1704         if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1705             s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
1706             s->dquant = s->qscale - last_qp;
1707
1708             if (s->out_format == FMT_H263) {
1709                 s->dquant = av_clip(s->dquant, -2, 2);
1710
1711                 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1712                     if (!s->mb_intra) {
1713                         if (s->pict_type == AV_PICTURE_TYPE_B) {
1714                             if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1715                                 s->dquant = 0;
1716                         }
1717                         if (s->mv_type == MV_TYPE_8X8)
1718                             s->dquant = 0;
1719                     }
1720                 }
1721             }
1722         }
1723         ff_set_qscale(s, last_qp + s->dquant);
1724     } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1725         ff_set_qscale(s, s->qscale + s->dquant);
1726
1727     wrap_y = s->linesize;
1728     wrap_c = s->uvlinesize;
1729     ptr_y  = s->new_picture.f.data[0] +
1730              (mb_y * 16 * wrap_y)              + mb_x * 16;
1731     ptr_cb = s->new_picture.f.data[1] +
1732              (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1733     ptr_cr = s->new_picture.f.data[2] +
1734              (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1735
1736     if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1737         uint8_t *ebuf = s->edge_emu_buffer + 32;
1738         s->vdsp.emulated_edge_mc(ebuf, ptr_y,
1739                                  wrap_y, wrap_y,
1740                                  16, 16, mb_x * 16, mb_y * 16,
1741                                  s->width, s->height);
1742         ptr_y = ebuf;
1743         s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb,
1744                                  wrap_c, wrap_c,
1745                                  8, mb_block_height, mb_x * 8, mb_y * 8,
1746                                  s->width >> 1, s->height >> 1);
1747         ptr_cb = ebuf + 18 * wrap_y;
1748         s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr,
1749                                  wrap_c, wrap_c,
1750                                  8, mb_block_height, mb_x * 8, mb_y * 8,
1751                                  s->width >> 1, s->height >> 1);
1752         ptr_cr = ebuf + 18 * wrap_y + 8;
1753     }
1754
1755     if (s->mb_intra) {
1756         if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1757             int progressive_score, interlaced_score;
1758
1759             s->interlaced_dct = 0;
1760             progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1761                                                     NULL, wrap_y, 8) +
1762                                 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1763                                                     NULL, wrap_y, 8) - 400;
1764
1765             if (progressive_score > 0) {
1766                 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1767                                                        NULL, wrap_y * 2, 8) +
1768                                    s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1769                                                        NULL, wrap_y * 2, 8);
1770                 if (progressive_score > interlaced_score) {
1771                     s->interlaced_dct = 1;
1772
1773                     dct_offset = wrap_y;
1774                     wrap_y <<= 1;
1775                     if (s->chroma_format == CHROMA_422)
1776                         wrap_c <<= 1;
1777                 }
1778             }
1779         }
1780
1781         s->dsp.get_pixels(s->block[0], ptr_y                  , wrap_y);
1782         s->dsp.get_pixels(s->block[1], ptr_y              + 8 , wrap_y);
1783         s->dsp.get_pixels(s->block[2], ptr_y + dct_offset     , wrap_y);
1784         s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1785
1786         if (s->flags & CODEC_FLAG_GRAY) {
1787             skip_dct[4] = 1;
1788             skip_dct[5] = 1;
1789         } else {
1790             s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1791             s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1792             if (!s->chroma_y_shift) { /* 422 */
1793                 s->dsp.get_pixels(s->block[6],
1794                                   ptr_cb + (dct_offset >> 1), wrap_c);
1795                 s->dsp.get_pixels(s->block[7],
1796                                   ptr_cr + (dct_offset >> 1), wrap_c);
1797             }
1798         }
1799     } else {
1800         op_pixels_func (*op_pix)[4];
1801         qpel_mc_func (*op_qpix)[16];
1802         uint8_t *dest_y, *dest_cb, *dest_cr;
1803
1804         dest_y  = s->dest[0];
1805         dest_cb = s->dest[1];
1806         dest_cr = s->dest[2];
1807
1808         if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1809             op_pix  = s->hdsp.put_pixels_tab;
1810             op_qpix = s->dsp.put_qpel_pixels_tab;
1811         } else {
1812             op_pix  = s->hdsp.put_no_rnd_pixels_tab;
1813             op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1814         }
1815
1816         if (s->mv_dir & MV_DIR_FORWARD) {
1817             ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1818                           s->last_picture.f.data,
1819                           op_pix, op_qpix);
1820             op_pix  = s->hdsp.avg_pixels_tab;
1821             op_qpix = s->dsp.avg_qpel_pixels_tab;
1822         }
1823         if (s->mv_dir & MV_DIR_BACKWARD) {
1824             ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1825                           s->next_picture.f.data,
1826                           op_pix, op_qpix);
1827         }
1828
1829         if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1830             int progressive_score, interlaced_score;
1831
1832             s->interlaced_dct = 0;
1833             progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1834                                                     ptr_y,              wrap_y,
1835                                                     8) +
1836                                 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1837                                                     ptr_y + wrap_y * 8, wrap_y,
1838                                                     8) - 400;
1839
1840             if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1841                 progressive_score -= 400;
1842
1843             if (progressive_score > 0) {
1844                 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1845                                                        ptr_y,
1846                                                        wrap_y * 2, 8) +
1847                                    s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1848                                                        ptr_y + wrap_y,
1849                                                        wrap_y * 2, 8);
1850
1851                 if (progressive_score > interlaced_score) {
1852                     s->interlaced_dct = 1;
1853
1854                     dct_offset = wrap_y;
1855                     wrap_y <<= 1;
1856                     if (s->chroma_format == CHROMA_422)
1857                         wrap_c <<= 1;
1858                 }
1859             }
1860         }
1861
1862         s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1863         s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1864         s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1865                            dest_y + dct_offset, wrap_y);
1866         s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1867                            dest_y + dct_offset + 8, wrap_y);
1868
1869         if (s->flags & CODEC_FLAG_GRAY) {
1870             skip_dct[4] = 1;
1871             skip_dct[5] = 1;
1872         } else {
1873             s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1874             s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1875             if (!s->chroma_y_shift) { /* 422 */
1876                 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
1877                                    dest_cb + (dct_offset >> 1), wrap_c);
1878                 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
1879                                    dest_cr + (dct_offset >> 1), wrap_c);
1880             }
1881         }
1882         /* pre quantization */
1883         if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1884                 2 * s->qscale * s->qscale) {
1885             // FIXME optimize
1886             if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1887                               wrap_y, 8) < 20 * s->qscale)
1888                 skip_dct[0] = 1;
1889             if (s->dsp.sad[1](NULL, ptr_y + 8,
1890                               dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1891                 skip_dct[1] = 1;
1892             if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1893                               dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1894                 skip_dct[2] = 1;
1895             if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1896                               dest_y + dct_offset + 8,
1897                               wrap_y, 8) < 20 * s->qscale)
1898                 skip_dct[3] = 1;
1899             if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1900                               wrap_c, 8) < 20 * s->qscale)
1901                 skip_dct[4] = 1;
1902             if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1903                               wrap_c, 8) < 20 * s->qscale)
1904                 skip_dct[5] = 1;
1905             if (!s->chroma_y_shift) { /* 422 */
1906                 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
1907                                   dest_cb + (dct_offset >> 1),
1908                                   wrap_c, 8) < 20 * s->qscale)
1909                     skip_dct[6] = 1;
1910                 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
1911                                   dest_cr + (dct_offset >> 1),
1912                                   wrap_c, 8) < 20 * s->qscale)
1913                     skip_dct[7] = 1;
1914             }
1915         }
1916     }
1917
1918     if (s->quantizer_noise_shaping) {
1919         if (!skip_dct[0])
1920             get_visual_weight(weight[0], ptr_y                 , wrap_y);
1921         if (!skip_dct[1])
1922             get_visual_weight(weight[1], ptr_y              + 8, wrap_y);
1923         if (!skip_dct[2])
1924             get_visual_weight(weight[2], ptr_y + dct_offset    , wrap_y);
1925         if (!skip_dct[3])
1926             get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1927         if (!skip_dct[4])
1928             get_visual_weight(weight[4], ptr_cb                , wrap_c);
1929         if (!skip_dct[5])
1930             get_visual_weight(weight[5], ptr_cr                , wrap_c);
1931         if (!s->chroma_y_shift) { /* 422 */
1932             if (!skip_dct[6])
1933                 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
1934                                   wrap_c);
1935             if (!skip_dct[7])
1936                 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
1937                                   wrap_c);
1938         }
1939         memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
1940     }
1941
1942     /* DCT & quantize */
1943     assert(s->out_format != FMT_MJPEG || s->qscale == 8);
1944     {
1945         for (i = 0; i < mb_block_count; i++) {
1946             if (!skip_dct[i]) {
1947                 int overflow;
1948                 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1949                 // FIXME we could decide to change to quantizer instead of
1950                 // clipping
1951                 // JS: I don't think that would be a good idea it could lower
1952                 //     quality instead of improve it. Just INTRADC clipping
1953                 //     deserves changes in quantizer
1954                 if (overflow)
1955                     clip_coeffs(s, s->block[i], s->block_last_index[i]);
1956             } else
1957                 s->block_last_index[i] = -1;
1958         }
1959         if (s->quantizer_noise_shaping) {
1960             for (i = 0; i < mb_block_count; i++) {
1961                 if (!skip_dct[i]) {
1962                     s->block_last_index[i] =
1963                         dct_quantize_refine(s, s->block[i], weight[i],
1964                                             orig[i], i, s->qscale);
1965                 }
1966             }
1967         }
1968
1969         if (s->luma_elim_threshold && !s->mb_intra)
1970             for (i = 0; i < 4; i++)
1971                 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
1972         if (s->chroma_elim_threshold && !s->mb_intra)
1973             for (i = 4; i < mb_block_count; i++)
1974                 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
1975
1976         if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
1977             for (i = 0; i < mb_block_count; i++) {
1978                 if (s->block_last_index[i] == -1)
1979                     s->coded_score[i] = INT_MAX / 256;
1980             }
1981         }
1982     }
1983
1984     if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
1985         s->block_last_index[4] =
1986         s->block_last_index[5] = 0;
1987         s->block[4][0] =
1988         s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
1989     }
1990
1991     // non c quantize code returns incorrect block_last_index FIXME
1992     if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
1993         for (i = 0; i < mb_block_count; i++) {
1994             int j;
1995             if (s->block_last_index[i] > 0) {
1996                 for (j = 63; j > 0; j--) {
1997                     if (s->block[i][s->intra_scantable.permutated[j]])
1998                         break;
1999                 }
2000                 s->block_last_index[i] = j;
2001             }
2002         }
2003     }
2004
2005     /* huffman encode */
2006     switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2007     case AV_CODEC_ID_MPEG1VIDEO:
2008     case AV_CODEC_ID_MPEG2VIDEO:
2009         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2010             ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2011         break;
2012     case AV_CODEC_ID_MPEG4:
2013         if (CONFIG_MPEG4_ENCODER)
2014             ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2015         break;
2016     case AV_CODEC_ID_MSMPEG4V2:
2017     case AV_CODEC_ID_MSMPEG4V3:
2018     case AV_CODEC_ID_WMV1:
2019         if (CONFIG_MSMPEG4_ENCODER)
2020             ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2021         break;
2022     case AV_CODEC_ID_WMV2:
2023         if (CONFIG_WMV2_ENCODER)
2024             ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2025         break;
2026     case AV_CODEC_ID_H261:
2027         if (CONFIG_H261_ENCODER)
2028             ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2029         break;
2030     case AV_CODEC_ID_H263:
2031     case AV_CODEC_ID_H263P:
2032     case AV_CODEC_ID_FLV1:
2033     case AV_CODEC_ID_RV10:
2034     case AV_CODEC_ID_RV20:
2035         if (CONFIG_H263_ENCODER)
2036             ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2037         break;
2038     case AV_CODEC_ID_MJPEG:
2039         if (CONFIG_MJPEG_ENCODER)
2040             ff_mjpeg_encode_mb(s, s->block);
2041         break;
2042     default:
2043         assert(0);
2044     }
2045 }
2046
2047 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2048 {
2049     if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y,  8, 6);
2050     else                                encode_mb_internal(s, motion_x, motion_y, 16, 8);
2051 }
2052
2053 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2054     int i;
2055
2056     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2057
2058     /* mpeg1 */
2059     d->mb_skip_run= s->mb_skip_run;
2060     for(i=0; i<3; i++)
2061         d->last_dc[i] = s->last_dc[i];
2062
2063     /* statistics */
2064     d->mv_bits= s->mv_bits;
2065     d->i_tex_bits= s->i_tex_bits;
2066     d->p_tex_bits= s->p_tex_bits;
2067     d->i_count= s->i_count;
2068     d->f_count= s->f_count;
2069     d->b_count= s->b_count;
2070     d->skip_count= s->skip_count;
2071     d->misc_bits= s->misc_bits;
2072     d->last_bits= 0;
2073
2074     d->mb_skipped= 0;
2075     d->qscale= s->qscale;
2076     d->dquant= s->dquant;
2077
2078     d->esc3_level_length= s->esc3_level_length;
2079 }
2080
2081 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2082     int i;
2083
2084     memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2085     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2086
2087     /* mpeg1 */
2088     d->mb_skip_run= s->mb_skip_run;
2089     for(i=0; i<3; i++)
2090         d->last_dc[i] = s->last_dc[i];
2091
2092     /* statistics */
2093     d->mv_bits= s->mv_bits;
2094     d->i_tex_bits= s->i_tex_bits;
2095     d->p_tex_bits= s->p_tex_bits;
2096     d->i_count= s->i_count;
2097     d->f_count= s->f_count;
2098     d->b_count= s->b_count;
2099     d->skip_count= s->skip_count;
2100     d->misc_bits= s->misc_bits;
2101
2102     d->mb_intra= s->mb_intra;
2103     d->mb_skipped= s->mb_skipped;
2104     d->mv_type= s->mv_type;
2105     d->mv_dir= s->mv_dir;
2106     d->pb= s->pb;
2107     if(s->data_partitioning){
2108         d->pb2= s->pb2;
2109         d->tex_pb= s->tex_pb;
2110     }
2111     d->block= s->block;
2112     for(i=0; i<8; i++)
2113         d->block_last_index[i]= s->block_last_index[i];
2114     d->interlaced_dct= s->interlaced_dct;
2115     d->qscale= s->qscale;
2116
2117     d->esc3_level_length= s->esc3_level_length;
2118 }
2119
2120 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2121                            PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2122                            int *dmin, int *next_block, int motion_x, int motion_y)
2123 {
2124     int score;
2125     uint8_t *dest_backup[3];
2126
2127     copy_context_before_encode(s, backup, type);
2128
2129     s->block= s->blocks[*next_block];
2130     s->pb= pb[*next_block];
2131     if(s->data_partitioning){
2132         s->pb2   = pb2   [*next_block];
2133         s->tex_pb= tex_pb[*next_block];
2134     }
2135
2136     if(*next_block){
2137         memcpy(dest_backup, s->dest, sizeof(s->dest));
2138         s->dest[0] = s->rd_scratchpad;
2139         s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2140         s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2141         assert(s->linesize >= 32); //FIXME
2142     }
2143
2144     encode_mb(s, motion_x, motion_y);
2145
2146     score= put_bits_count(&s->pb);
2147     if(s->data_partitioning){
2148         score+= put_bits_count(&s->pb2);
2149         score+= put_bits_count(&s->tex_pb);
2150     }
2151
2152     if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2153         ff_MPV_decode_mb(s, s->block);
2154
2155         score *= s->lambda2;
2156         score += sse_mb(s) << FF_LAMBDA_SHIFT;
2157     }
2158
2159     if(*next_block){
2160         memcpy(s->dest, dest_backup, sizeof(s->dest));
2161     }
2162
2163     if(score<*dmin){
2164         *dmin= score;
2165         *next_block^=1;
2166
2167         copy_context_after_encode(best, s, type);
2168     }
2169 }
2170
2171 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2172     uint32_t *sq = ff_squareTbl + 256;
2173     int acc=0;
2174     int x,y;
2175
2176     if(w==16 && h==16)
2177         return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2178     else if(w==8 && h==8)
2179         return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2180
2181     for(y=0; y<h; y++){
2182         for(x=0; x<w; x++){
2183             acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2184         }
2185     }
2186
2187     assert(acc>=0);
2188
2189     return acc;
2190 }
2191
2192 static int sse_mb(MpegEncContext *s){
2193     int w= 16;
2194     int h= 16;
2195
2196     if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2197     if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2198
2199     if(w==16 && h==16)
2200       if(s->avctx->mb_cmp == FF_CMP_NSSE){
2201         return  s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2202                +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2203                +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2204       }else{
2205         return  s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2206                +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2207                +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2208       }
2209     else
2210         return  sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2211                +sse(s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2212                +sse(s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2213 }
2214
2215 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2216     MpegEncContext *s= *(void**)arg;
2217
2218
2219     s->me.pre_pass=1;
2220     s->me.dia_size= s->avctx->pre_dia_size;
2221     s->first_slice_line=1;
2222     for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2223         for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2224             ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2225         }
2226         s->first_slice_line=0;
2227     }
2228
2229     s->me.pre_pass=0;
2230
2231     return 0;
2232 }
2233
2234 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2235     MpegEncContext *s= *(void**)arg;
2236
2237     ff_check_alignment();
2238
2239     s->me.dia_size= s->avctx->dia_size;
2240     s->first_slice_line=1;
2241     for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2242         s->mb_x=0; //for block init below
2243         ff_init_block_index(s);
2244         for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2245             s->block_index[0]+=2;
2246             s->block_index[1]+=2;
2247             s->block_index[2]+=2;
2248             s->block_index[3]+=2;
2249
2250             /* compute motion vector & mb_type and store in context */
2251             if(s->pict_type==AV_PICTURE_TYPE_B)
2252                 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2253             else
2254                 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2255         }
2256         s->first_slice_line=0;
2257     }
2258     return 0;
2259 }
2260
2261 static int mb_var_thread(AVCodecContext *c, void *arg){
2262     MpegEncContext *s= *(void**)arg;
2263     int mb_x, mb_y;
2264
2265     ff_check_alignment();
2266
2267     for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2268         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2269             int xx = mb_x * 16;
2270             int yy = mb_y * 16;
2271             uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2272             int varc;
2273             int sum = s->dsp.pix_sum(pix, s->linesize);
2274
2275             varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2276
2277             s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2278             s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2279             s->me.mb_var_sum_temp    += varc;
2280         }
2281     }
2282     return 0;
2283 }
2284
2285 static void write_slice_end(MpegEncContext *s){
2286     if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2287         if(s->partitioned_frame){
2288             ff_mpeg4_merge_partitions(s);
2289         }
2290
2291         ff_mpeg4_stuffing(&s->pb);
2292     }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2293         ff_mjpeg_encode_stuffing(&s->pb);
2294     }
2295
2296     avpriv_align_put_bits(&s->pb);
2297     flush_put_bits(&s->pb);
2298
2299     if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2300         s->misc_bits+= get_bits_diff(s);
2301 }
2302
2303 static void write_mb_info(MpegEncContext *s)
2304 {
2305     uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2306     int offset = put_bits_count(&s->pb);
2307     int mba  = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2308     int gobn = s->mb_y / s->gob_index;
2309     int pred_x, pred_y;
2310     if (CONFIG_H263_ENCODER)
2311         ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2312     bytestream_put_le32(&ptr, offset);
2313     bytestream_put_byte(&ptr, s->qscale);
2314     bytestream_put_byte(&ptr, gobn);
2315     bytestream_put_le16(&ptr, mba);
2316     bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2317     bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2318     /* 4MV not implemented */
2319     bytestream_put_byte(&ptr, 0); /* hmv2 */
2320     bytestream_put_byte(&ptr, 0); /* vmv2 */
2321 }
2322
2323 static void update_mb_info(MpegEncContext *s, int startcode)
2324 {
2325     if (!s->mb_info)
2326         return;
2327     if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2328         s->mb_info_size += 12;
2329         s->prev_mb_info = s->last_mb_info;
2330     }
2331     if (startcode) {
2332         s->prev_mb_info = put_bits_count(&s->pb)/8;
2333         /* This might have incremented mb_info_size above, and we return without
2334          * actually writing any info into that slot yet. But in that case,
2335          * this will be called again at the start of the after writing the
2336          * start code, actually writing the mb info. */
2337         return;
2338     }
2339
2340     s->last_mb_info = put_bits_count(&s->pb)/8;
2341     if (!s->mb_info_size)
2342         s->mb_info_size += 12;
2343     write_mb_info(s);
2344 }
2345
2346 static int encode_thread(AVCodecContext *c, void *arg){
2347     MpegEncContext *s= *(void**)arg;
2348     int mb_x, mb_y, pdif = 0;
2349     int chr_h= 16>>s->chroma_y_shift;
2350     int i, j;
2351     MpegEncContext best_s, backup_s;
2352     uint8_t bit_buf[2][MAX_MB_BYTES];
2353     uint8_t bit_buf2[2][MAX_MB_BYTES];
2354     uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2355     PutBitContext pb[2], pb2[2], tex_pb[2];
2356
2357     ff_check_alignment();
2358
2359     for(i=0; i<2; i++){
2360         init_put_bits(&pb    [i], bit_buf    [i], MAX_MB_BYTES);
2361         init_put_bits(&pb2   [i], bit_buf2   [i], MAX_MB_BYTES);
2362         init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2363     }
2364
2365     s->last_bits= put_bits_count(&s->pb);
2366     s->mv_bits=0;
2367     s->misc_bits=0;
2368     s->i_tex_bits=0;
2369     s->p_tex_bits=0;
2370     s->i_count=0;
2371     s->f_count=0;
2372     s->b_count=0;
2373     s->skip_count=0;
2374
2375     for(i=0; i<3; i++){
2376         /* init last dc values */
2377         /* note: quant matrix value (8) is implied here */
2378         s->last_dc[i] = 128 << s->intra_dc_precision;
2379
2380         s->current_picture.f.error[i] = 0;
2381     }
2382     s->mb_skip_run = 0;
2383     memset(s->last_mv, 0, sizeof(s->last_mv));
2384
2385     s->last_mv_dir = 0;
2386
2387     switch(s->codec_id){
2388     case AV_CODEC_ID_H263:
2389     case AV_CODEC_ID_H263P:
2390     case AV_CODEC_ID_FLV1:
2391         if (CONFIG_H263_ENCODER)
2392             s->gob_index = ff_h263_get_gob_height(s);
2393         break;
2394     case AV_CODEC_ID_MPEG4:
2395         if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2396             ff_mpeg4_init_partitions(s);
2397         break;
2398     }
2399
2400     s->resync_mb_x=0;
2401     s->resync_mb_y=0;
2402     s->first_slice_line = 1;
2403     s->ptr_lastgob = s->pb.buf;
2404     for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2405         s->mb_x=0;
2406         s->mb_y= mb_y;
2407
2408         ff_set_qscale(s, s->qscale);
2409         ff_init_block_index(s);
2410
2411         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2412             int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2413             int mb_type= s->mb_type[xy];
2414 //            int d;
2415             int dmin= INT_MAX;
2416             int dir;
2417
2418             if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2419                 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2420                 return -1;
2421             }
2422             if(s->data_partitioning){
2423                 if(   s->pb2   .buf_end - s->pb2   .buf - (put_bits_count(&s->    pb2)>>3) < MAX_MB_BYTES
2424                    || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2425                     av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2426                     return -1;
2427                 }
2428             }
2429
2430             s->mb_x = mb_x;
2431             s->mb_y = mb_y;  // moved into loop, can get changed by H.261
2432             ff_update_block_index(s);
2433
2434             if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2435                 ff_h261_reorder_mb_index(s);
2436                 xy= s->mb_y*s->mb_stride + s->mb_x;
2437                 mb_type= s->mb_type[xy];
2438             }
2439
2440             /* write gob / video packet header  */
2441             if(s->rtp_mode){
2442                 int current_packet_size, is_gob_start;
2443
2444                 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2445
2446                 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2447
2448                 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2449
2450                 switch(s->codec_id){
2451                 case AV_CODEC_ID_H263:
2452                 case AV_CODEC_ID_H263P:
2453                     if(!s->h263_slice_structured)
2454                         if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2455                     break;
2456                 case AV_CODEC_ID_MPEG2VIDEO:
2457                     if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2458                 case AV_CODEC_ID_MPEG1VIDEO:
2459                     if(s->mb_skip_run) is_gob_start=0;
2460                     break;
2461                 }
2462
2463                 if(is_gob_start){
2464                     if(s->start_mb_y != mb_y || mb_x!=0){
2465                         write_slice_end(s);
2466
2467                         if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2468                             ff_mpeg4_init_partitions(s);
2469                         }
2470                     }
2471
2472                     assert((put_bits_count(&s->pb)&7) == 0);
2473                     current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2474
2475                     if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2476                         int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2477                         int d = 100 / s->error_rate;
2478                         if(r % d == 0){
2479                             current_packet_size=0;
2480                             s->pb.buf_ptr= s->ptr_lastgob;
2481                             assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2482                         }
2483                     }
2484
2485                     if (s->avctx->rtp_callback){
2486                         int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2487                         s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2488                     }
2489                     update_mb_info(s, 1);
2490
2491                     switch(s->codec_id){
2492                     case AV_CODEC_ID_MPEG4:
2493                         if (CONFIG_MPEG4_ENCODER) {
2494                             ff_mpeg4_encode_video_packet_header(s);
2495                             ff_mpeg4_clean_buffers(s);
2496                         }
2497                     break;
2498                     case AV_CODEC_ID_MPEG1VIDEO:
2499                     case AV_CODEC_ID_MPEG2VIDEO:
2500                         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2501                             ff_mpeg1_encode_slice_header(s);
2502                             ff_mpeg1_clean_buffers(s);
2503                         }
2504                     break;
2505                     case AV_CODEC_ID_H263:
2506                     case AV_CODEC_ID_H263P:
2507                         if (CONFIG_H263_ENCODER)
2508                             ff_h263_encode_gob_header(s, mb_y);
2509                     break;
2510                     }
2511
2512                     if(s->flags&CODEC_FLAG_PASS1){
2513                         int bits= put_bits_count(&s->pb);
2514                         s->misc_bits+= bits - s->last_bits;
2515                         s->last_bits= bits;
2516                     }
2517
2518                     s->ptr_lastgob += current_packet_size;
2519                     s->first_slice_line=1;
2520                     s->resync_mb_x=mb_x;
2521                     s->resync_mb_y=mb_y;
2522                 }
2523             }
2524
2525             if(  (s->resync_mb_x   == s->mb_x)
2526                && s->resync_mb_y+1 == s->mb_y){
2527                 s->first_slice_line=0;
2528             }
2529
2530             s->mb_skipped=0;
2531             s->dquant=0; //only for QP_RD
2532
2533             update_mb_info(s, 0);
2534
2535             if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2536                 int next_block=0;
2537                 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2538
2539                 copy_context_before_encode(&backup_s, s, -1);
2540                 backup_s.pb= s->pb;
2541                 best_s.data_partitioning= s->data_partitioning;
2542                 best_s.partitioned_frame= s->partitioned_frame;
2543                 if(s->data_partitioning){
2544                     backup_s.pb2= s->pb2;
2545                     backup_s.tex_pb= s->tex_pb;
2546                 }
2547
2548                 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2549                     s->mv_dir = MV_DIR_FORWARD;
2550                     s->mv_type = MV_TYPE_16X16;
2551                     s->mb_intra= 0;
2552                     s->mv[0][0][0] = s->p_mv_table[xy][0];
2553                     s->mv[0][0][1] = s->p_mv_table[xy][1];
2554                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2555                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2556                 }
2557                 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2558                     s->mv_dir = MV_DIR_FORWARD;
2559                     s->mv_type = MV_TYPE_FIELD;
2560                     s->mb_intra= 0;
2561                     for(i=0; i<2; i++){
2562                         j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2563                         s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2564                         s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2565                     }
2566                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2567                                  &dmin, &next_block, 0, 0);
2568                 }
2569                 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2570                     s->mv_dir = MV_DIR_FORWARD;
2571                     s->mv_type = MV_TYPE_16X16;
2572                     s->mb_intra= 0;
2573                     s->mv[0][0][0] = 0;
2574                     s->mv[0][0][1] = 0;
2575                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2576                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2577                 }
2578                 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2579                     s->mv_dir = MV_DIR_FORWARD;
2580                     s->mv_type = MV_TYPE_8X8;
2581                     s->mb_intra= 0;
2582                     for(i=0; i<4; i++){
2583                         s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2584                         s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2585                     }
2586                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2587                                  &dmin, &next_block, 0, 0);
2588                 }
2589                 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2590                     s->mv_dir = MV_DIR_FORWARD;
2591                     s->mv_type = MV_TYPE_16X16;
2592                     s->mb_intra= 0;
2593                     s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2594                     s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2595                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2596                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2597                 }
2598                 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2599                     s->mv_dir = MV_DIR_BACKWARD;
2600                     s->mv_type = MV_TYPE_16X16;
2601                     s->mb_intra= 0;
2602                     s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2603                     s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2604                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2605                                  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2606                 }
2607                 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2608                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2609                     s->mv_type = MV_TYPE_16X16;
2610                     s->mb_intra= 0;
2611                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2612                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2613                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2614                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2615                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2616                                  &dmin, &next_block, 0, 0);
2617                 }
2618                 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2619                     s->mv_dir = MV_DIR_FORWARD;
2620                     s->mv_type = MV_TYPE_FIELD;
2621                     s->mb_intra= 0;
2622                     for(i=0; i<2; i++){
2623                         j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2624                         s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2625                         s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2626                     }
2627                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2628                                  &dmin, &next_block, 0, 0);
2629                 }
2630                 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2631                     s->mv_dir = MV_DIR_BACKWARD;
2632                     s->mv_type = MV_TYPE_FIELD;
2633                     s->mb_intra= 0;
2634                     for(i=0; i<2; i++){
2635                         j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2636                         s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2637                         s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2638                     }
2639                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2640                                  &dmin, &next_block, 0, 0);
2641                 }
2642                 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2643                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2644                     s->mv_type = MV_TYPE_FIELD;
2645                     s->mb_intra= 0;
2646                     for(dir=0; dir<2; dir++){
2647                         for(i=0; i<2; i++){
2648                             j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2649                             s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2650                             s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2651                         }
2652                     }
2653                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2654                                  &dmin, &next_block, 0, 0);
2655                 }
2656                 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2657                     s->mv_dir = 0;
2658                     s->mv_type = MV_TYPE_16X16;
2659                     s->mb_intra= 1;
2660                     s->mv[0][0][0] = 0;
2661                     s->mv[0][0][1] = 0;
2662                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2663                                  &dmin, &next_block, 0, 0);
2664                     if(s->h263_pred || s->h263_aic){
2665                         if(best_s.mb_intra)
2666                             s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2667                         else
2668                             ff_clean_intra_table_entries(s); //old mode?
2669                     }
2670                 }
2671
2672                 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2673                     if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2674                         const int last_qp= backup_s.qscale;
2675                         int qpi, qp, dc[6];
2676                         int16_t ac[6][16];
2677                         const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2678                         static const int dquant_tab[4]={-1,1,-2,2};
2679
2680                         assert(backup_s.dquant == 0);
2681
2682                         //FIXME intra
2683                         s->mv_dir= best_s.mv_dir;
2684                         s->mv_type = MV_TYPE_16X16;
2685                         s->mb_intra= best_s.mb_intra;
2686                         s->mv[0][0][0] = best_s.mv[0][0][0];
2687                         s->mv[0][0][1] = best_s.mv[0][0][1];
2688                         s->mv[1][0][0] = best_s.mv[1][0][0];
2689                         s->mv[1][0][1] = best_s.mv[1][0][1];
2690
2691                         qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2692                         for(; qpi<4; qpi++){
2693                             int dquant= dquant_tab[qpi];
2694                             qp= last_qp + dquant;
2695                             if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2696                                 continue;
2697                             backup_s.dquant= dquant;
2698                             if(s->mb_intra && s->dc_val[0]){
2699                                 for(i=0; i<6; i++){
2700                                     dc[i]= s->dc_val[0][ s->block_index[i] ];
2701                                     memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
2702                                 }
2703                             }
2704
2705                             encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2706                                          &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2707                             if(best_s.qscale != qp){
2708                                 if(s->mb_intra && s->dc_val[0]){
2709                                     for(i=0; i<6; i++){
2710                                         s->dc_val[0][ s->block_index[i] ]= dc[i];
2711                                         memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
2712                                     }
2713                                 }
2714                             }
2715                         }
2716                     }
2717                 }
2718                 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2719                     int mx= s->b_direct_mv_table[xy][0];
2720                     int my= s->b_direct_mv_table[xy][1];
2721
2722                     backup_s.dquant = 0;
2723                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2724                     s->mb_intra= 0;
2725                     ff_mpeg4_set_direct_mv(s, mx, my);
2726                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2727                                  &dmin, &next_block, mx, my);
2728                 }
2729                 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2730                     backup_s.dquant = 0;
2731                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2732                     s->mb_intra= 0;
2733                     ff_mpeg4_set_direct_mv(s, 0, 0);
2734                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2735                                  &dmin, &next_block, 0, 0);
2736                 }
2737                 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2738                     int coded=0;
2739                     for(i=0; i<6; i++)
2740                         coded |= s->block_last_index[i];
2741                     if(coded){
2742                         int mx,my;
2743                         memcpy(s->mv, best_s.mv, sizeof(s->mv));
2744                         if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2745                             mx=my=0; //FIXME find the one we actually used
2746                             ff_mpeg4_set_direct_mv(s, mx, my);
2747                         }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2748                             mx= s->mv[1][0][0];
2749                             my= s->mv[1][0][1];
2750                         }else{
2751                             mx= s->mv[0][0][0];
2752                             my= s->mv[0][0][1];
2753                         }
2754
2755                         s->mv_dir= best_s.mv_dir;
2756                         s->mv_type = best_s.mv_type;
2757                         s->mb_intra= 0;
2758 /*                        s->mv[0][0][0] = best_s.mv[0][0][0];
2759                         s->mv[0][0][1] = best_s.mv[0][0][1];
2760                         s->mv[1][0][0] = best_s.mv[1][0][0];
2761                         s->mv[1][0][1] = best_s.mv[1][0][1];*/
2762                         backup_s.dquant= 0;
2763                         s->skipdct=1;
2764                         encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2765                                         &dmin, &next_block, mx, my);
2766                         s->skipdct=0;
2767                     }
2768                 }
2769
2770                 s->current_picture.qscale_table[xy] = best_s.qscale;
2771
2772                 copy_context_after_encode(s, &best_s, -1);
2773
2774                 pb_bits_count= put_bits_count(&s->pb);
2775                 flush_put_bits(&s->pb);
2776                 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2777                 s->pb= backup_s.pb;
2778
2779                 if(s->data_partitioning){
2780                     pb2_bits_count= put_bits_count(&s->pb2);
2781                     flush_put_bits(&s->pb2);
2782                     avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2783                     s->pb2= backup_s.pb2;
2784
2785                     tex_pb_bits_count= put_bits_count(&s->tex_pb);
2786                     flush_put_bits(&s->tex_pb);
2787                     avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2788                     s->tex_pb= backup_s.tex_pb;
2789                 }
2790                 s->last_bits= put_bits_count(&s->pb);
2791
2792                 if (CONFIG_H263_ENCODER &&
2793                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2794                     ff_h263_update_motion_val(s);
2795
2796                 if(next_block==0){ //FIXME 16 vs linesize16
2797                     s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad                     , s->linesize  ,16);
2798                     s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize    , s->uvlinesize, 8);
2799                     s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2800                 }
2801
2802                 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2803                     ff_MPV_decode_mb(s, s->block);
2804             } else {
2805                 int motion_x = 0, motion_y = 0;
2806                 s->mv_type=MV_TYPE_16X16;
2807                 // only one MB-Type possible
2808
2809                 switch(mb_type){
2810                 case CANDIDATE_MB_TYPE_INTRA:
2811                     s->mv_dir = 0;
2812                     s->mb_intra= 1;
2813                     motion_x= s->mv[0][0][0] = 0;
2814                     motion_y= s->mv[0][0][1] = 0;
2815                     break;
2816                 case CANDIDATE_MB_TYPE_INTER:
2817                     s->mv_dir = MV_DIR_FORWARD;
2818                     s->mb_intra= 0;
2819                     motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2820                     motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2821                     break;
2822                 case CANDIDATE_MB_TYPE_INTER_I:
2823                     s->mv_dir = MV_DIR_FORWARD;
2824                     s->mv_type = MV_TYPE_FIELD;
2825                     s->mb_intra= 0;
2826                     for(i=0; i<2; i++){
2827                         j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2828                         s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2829                         s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2830                     }
2831                     break;
2832                 case CANDIDATE_MB_TYPE_INTER4V:
2833                     s->mv_dir = MV_DIR_FORWARD;
2834                     s->mv_type = MV_TYPE_8X8;
2835                     s->mb_intra= 0;
2836                     for(i=0; i<4; i++){
2837                         s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2838                         s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2839                     }
2840                     break;
2841                 case CANDIDATE_MB_TYPE_DIRECT:
2842                     if (CONFIG_MPEG4_ENCODER) {
2843                         s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2844                         s->mb_intra= 0;
2845                         motion_x=s->b_direct_mv_table[xy][0];
2846                         motion_y=s->b_direct_mv_table[xy][1];
2847                         ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2848                     }
2849                     break;
2850                 case CANDIDATE_MB_TYPE_DIRECT0:
2851                     if (CONFIG_MPEG4_ENCODER) {
2852                         s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2853                         s->mb_intra= 0;
2854                         ff_mpeg4_set_direct_mv(s, 0, 0);
2855                     }
2856                     break;
2857                 case CANDIDATE_MB_TYPE_BIDIR:
2858                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2859                     s->mb_intra= 0;
2860                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2861                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2862                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2863                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2864                     break;
2865                 case CANDIDATE_MB_TYPE_BACKWARD:
2866                     s->mv_dir = MV_DIR_BACKWARD;
2867                     s->mb_intra= 0;
2868                     motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2869                     motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2870                     break;
2871                 case CANDIDATE_MB_TYPE_FORWARD:
2872                     s->mv_dir = MV_DIR_FORWARD;
2873                     s->mb_intra= 0;
2874                     motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2875                     motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2876                     break;
2877                 case CANDIDATE_MB_TYPE_FORWARD_I:
2878                     s->mv_dir = MV_DIR_FORWARD;
2879                     s->mv_type = MV_TYPE_FIELD;
2880                     s->mb_intra= 0;
2881                     for(i=0; i<2; i++){
2882                         j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2883                         s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2884                         s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2885                     }
2886                     break;
2887                 case CANDIDATE_MB_TYPE_BACKWARD_I:
2888                     s->mv_dir = MV_DIR_BACKWARD;
2889                     s->mv_type = MV_TYPE_FIELD;
2890                     s->mb_intra= 0;
2891                     for(i=0; i<2; i++){
2892                         j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2893                         s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2894                         s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2895                     }
2896                     break;
2897                 case CANDIDATE_MB_TYPE_BIDIR_I:
2898                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2899                     s->mv_type = MV_TYPE_FIELD;
2900                     s->mb_intra= 0;
2901                     for(dir=0; dir<2; dir++){
2902                         for(i=0; i<2; i++){
2903                             j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2904                             s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2905                             s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2906                         }
2907                     }
2908                     break;
2909                 default:
2910                     av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2911                 }
2912
2913                 encode_mb(s, motion_x, motion_y);
2914
2915                 // RAL: Update last macroblock type
2916                 s->last_mv_dir = s->mv_dir;
2917
2918                 if (CONFIG_H263_ENCODER &&
2919                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2920                     ff_h263_update_motion_val(s);
2921
2922                 ff_MPV_decode_mb(s, s->block);
2923             }
2924
2925             /* clean the MV table in IPS frames for direct mode in B frames */
2926             if(s->mb_intra /* && I,P,S_TYPE */){
2927                 s->p_mv_table[xy][0]=0;
2928                 s->p_mv_table[xy][1]=0;
2929             }
2930
2931             if(s->flags&CODEC_FLAG_PSNR){
2932                 int w= 16;
2933                 int h= 16;
2934
2935                 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2936                 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2937
2938                 s->current_picture.f.error[0] += sse(
2939                     s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2940                     s->dest[0], w, h, s->linesize);
2941                 s->current_picture.f.error[1] += sse(
2942                     s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
2943                     s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2944                 s->current_picture.f.error[2] += sse(
2945                     s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
2946                     s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2947             }
2948             if(s->loop_filter){
2949                 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
2950                     ff_h263_loop_filter(s);
2951             }
2952             av_dlog(s->avctx, "MB %d %d bits\n",
2953                     s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
2954         }
2955     }
2956
2957     //not beautiful here but we must write it before flushing so it has to be here
2958     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
2959         ff_msmpeg4_encode_ext_header(s);
2960
2961     write_slice_end(s);
2962
2963     /* Send the last GOB if RTP */
2964     if (s->avctx->rtp_callback) {
2965         int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
2966         pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
2967         /* Call the RTP callback to send the last GOB */
2968         emms_c();
2969         s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
2970     }
2971
2972     return 0;
2973 }
2974
2975 #define MERGE(field) dst->field += src->field; src->field=0
2976 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
2977     MERGE(me.scene_change_score);
2978     MERGE(me.mc_mb_var_sum_temp);
2979     MERGE(me.mb_var_sum_temp);
2980 }
2981
2982 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
2983     int i;
2984
2985     MERGE(dct_count[0]); //note, the other dct vars are not part of the context
2986     MERGE(dct_count[1]);
2987     MERGE(mv_bits);
2988     MERGE(i_tex_bits);
2989     MERGE(p_tex_bits);
2990     MERGE(i_count);
2991     MERGE(f_count);
2992     MERGE(b_count);
2993     MERGE(skip_count);
2994     MERGE(misc_bits);
2995     MERGE(er.error_count);
2996     MERGE(padding_bug_score);
2997     MERGE(current_picture.f.error[0]);
2998     MERGE(current_picture.f.error[1]);
2999     MERGE(current_picture.f.error[2]);
3000
3001     if(dst->avctx->noise_reduction){
3002         for(i=0; i<64; i++){
3003             MERGE(dct_error_sum[0][i]);
3004             MERGE(dct_error_sum[1][i]);
3005         }
3006     }
3007
3008     assert(put_bits_count(&src->pb) % 8 ==0);
3009     assert(put_bits_count(&dst->pb) % 8 ==0);
3010     avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3011     flush_put_bits(&dst->pb);
3012 }
3013
3014 static int estimate_qp(MpegEncContext *s, int dry_run){
3015     if (s->next_lambda){
3016         s->current_picture_ptr->f.quality =
3017         s->current_picture.f.quality = s->next_lambda;
3018         if(!dry_run) s->next_lambda= 0;
3019     } else if (!s->fixed_qscale) {
3020         s->current_picture_ptr->f.quality =
3021         s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
3022         if (s->current_picture.f.quality < 0)
3023             return -1;
3024     }
3025
3026     if(s->adaptive_quant){
3027         switch(s->codec_id){
3028         case AV_CODEC_ID_MPEG4:
3029             if (CONFIG_MPEG4_ENCODER)
3030                 ff_clean_mpeg4_qscales(s);
3031             break;
3032         case AV_CODEC_ID_H263:
3033         case AV_CODEC_ID_H263P:
3034         case AV_CODEC_ID_FLV1:
3035             if (CONFIG_H263_ENCODER)
3036                 ff_clean_h263_qscales(s);
3037             break;
3038         default:
3039             ff_init_qscale_tab(s);
3040         }
3041
3042         s->lambda= s->lambda_table[0];
3043         //FIXME broken
3044     }else
3045         s->lambda = s->current_picture.f.quality;
3046     update_qscale(s);
3047     return 0;
3048 }
3049
3050 /* must be called before writing the header */
3051 static void set_frame_distances(MpegEncContext * s){
3052     assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3053     s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3054
3055     if(s->pict_type==AV_PICTURE_TYPE_B){
3056         s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3057         assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3058     }else{
3059         s->pp_time= s->time - s->last_non_b_time;
3060         s->last_non_b_time= s->time;
3061         assert(s->picture_number==0 || s->pp_time > 0);
3062     }
3063 }
3064
3065 static int encode_picture(MpegEncContext *s, int picture_number)
3066 {
3067     int i, ret;
3068     int bits;
3069     int context_count = s->slice_context_count;
3070
3071     s->picture_number = picture_number;
3072
3073     /* Reset the average MB variance */
3074     s->me.mb_var_sum_temp    =
3075     s->me.mc_mb_var_sum_temp = 0;
3076
3077     /* we need to initialize some time vars before we can encode b-frames */
3078     // RAL: Condition added for MPEG1VIDEO
3079     if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3080         set_frame_distances(s);
3081     if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3082         ff_set_mpeg4_time(s);
3083
3084     s->me.scene_change_score=0;
3085
3086 //    s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3087
3088     if(s->pict_type==AV_PICTURE_TYPE_I){
3089         if(s->msmpeg4_version >= 3) s->no_rounding=1;
3090         else                        s->no_rounding=0;
3091     }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3092         if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3093             s->no_rounding ^= 1;
3094     }
3095
3096     if(s->flags & CODEC_FLAG_PASS2){
3097         if (estimate_qp(s,1) < 0)
3098             return -1;
3099         ff_get_2pass_fcode(s);
3100     }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3101         if(s->pict_type==AV_PICTURE_TYPE_B)
3102             s->lambda= s->last_lambda_for[s->pict_type];
3103         else
3104             s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3105         update_qscale(s);
3106     }
3107
3108     s->mb_intra=0; //for the rate distortion & bit compare functions
3109     for(i=1; i<context_count; i++){
3110         ret = ff_update_duplicate_context(s->thread_context[i], s);
3111         if (ret < 0)
3112             return ret;
3113     }
3114
3115     if(ff_init_me(s)<0)
3116         return -1;
3117
3118     /* Estimate motion for every MB */
3119     if(s->pict_type != AV_PICTURE_TYPE_I){
3120         s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3121         s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3122         if (s->pict_type != AV_PICTURE_TYPE_B) {
3123             if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3124                 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3125             }
3126         }
3127
3128         s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3129     }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3130         /* I-Frame */
3131         for(i=0; i<s->mb_stride*s->mb_height; i++)
3132             s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3133
3134         if(!s->fixed_qscale){
3135             /* finding spatial complexity for I-frame rate control */
3136             s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3137         }
3138     }
3139     for(i=1; i<context_count; i++){
3140         merge_context_after_me(s, s->thread_context[i]);
3141     }
3142     s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3143     s->current_picture.   mb_var_sum= s->current_picture_ptr->   mb_var_sum= s->me.   mb_var_sum_temp;
3144     emms_c();
3145
3146     if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3147         s->pict_type= AV_PICTURE_TYPE_I;
3148         for(i=0; i<s->mb_stride*s->mb_height; i++)
3149             s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3150         av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3151                 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3152     }
3153
3154     if(!s->umvplus){
3155         if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3156             s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3157
3158             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3159                 int a,b;
3160                 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3161                 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3162                 s->f_code= FFMAX3(s->f_code, a, b);
3163             }
3164
3165             ff_fix_long_p_mvs(s);
3166             ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3167             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3168                 int j;
3169                 for(i=0; i<2; i++){
3170                     for(j=0; j<2; j++)
3171                         ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3172                                         s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3173                 }
3174             }
3175         }
3176
3177         if(s->pict_type==AV_PICTURE_TYPE_B){
3178             int a, b;
3179
3180             a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3181             b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3182             s->f_code = FFMAX(a, b);
3183
3184             a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3185             b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3186             s->b_code = FFMAX(a, b);
3187
3188             ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3189             ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3190             ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3191             ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3192             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3193                 int dir, j;
3194                 for(dir=0; dir<2; dir++){
3195                     for(i=0; i<2; i++){
3196                         for(j=0; j<2; j++){
3197                             int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3198                                           : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3199                             ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3200                                             s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3201                         }
3202                     }
3203                 }
3204             }
3205         }
3206     }
3207
3208     if (estimate_qp(s, 0) < 0)
3209         return -1;
3210
3211     if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3212         s->qscale= 3; //reduce clipping problems
3213
3214     if (s->out_format == FMT_MJPEG) {
3215         /* for mjpeg, we do include qscale in the matrix */
3216         for(i=1;i<64;i++){
3217             int j= s->dsp.idct_permutation[i];
3218
3219             s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3220         }
3221         s->y_dc_scale_table=
3222         s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3223         s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3224         ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3225                        s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3226         s->qscale= 8;
3227     }
3228
3229     //FIXME var duplication
3230     s->current_picture_ptr->f.key_frame =
3231     s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3232     s->current_picture_ptr->f.pict_type =
3233     s->current_picture.f.pict_type = s->pict_type;
3234
3235     if (s->current_picture.f.key_frame)
3236         s->picture_in_gop_number=0;
3237
3238     s->last_bits= put_bits_count(&s->pb);
3239     switch(s->out_format) {
3240     case FMT_MJPEG:
3241         if (CONFIG_MJPEG_ENCODER)
3242             ff_mjpeg_encode_picture_header(s);
3243         break;
3244     case FMT_H261:
3245         if (CONFIG_H261_ENCODER)
3246             ff_h261_encode_picture_header(s, picture_number);
3247         break;
3248     case FMT_H263:
3249         if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3250             ff_wmv2_encode_picture_header(s, picture_number);
3251         else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3252             ff_msmpeg4_encode_picture_header(s, picture_number);
3253         else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3254             ff_mpeg4_encode_picture_header(s, picture_number);
3255         else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3256             ff_rv10_encode_picture_header(s, picture_number);
3257         else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3258             ff_rv20_encode_picture_header(s, picture_number);
3259         else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3260             ff_flv_encode_picture_header(s, picture_number);
3261         else if (CONFIG_H263_ENCODER)
3262             ff_h263_encode_picture_header(s, picture_number);
3263         break;
3264     case FMT_MPEG1:
3265         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3266             ff_mpeg1_encode_picture_header(s, picture_number);
3267         break;
3268     default:
3269         assert(0);
3270     }
3271     bits= put_bits_count(&s->pb);
3272     s->header_bits= bits - s->last_bits;
3273
3274     for(i=1; i<context_count; i++){
3275         update_duplicate_context_after_me(s->thread_context[i], s);
3276     }
3277     s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3278     for(i=1; i<context_count; i++){
3279         merge_context_after_encode(s, s->thread_context[i]);
3280     }
3281     emms_c();
3282     return 0;
3283 }
3284
3285 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3286     const int intra= s->mb_intra;
3287     int i;
3288
3289     s->dct_count[intra]++;
3290
3291     for(i=0; i<64; i++){
3292         int level= block[i];
3293
3294         if(level){
3295             if(level>0){
3296                 s->dct_error_sum[intra][i] += level;
3297                 level -= s->dct_offset[intra][i];
3298                 if(level<0) level=0;
3299             }else{
3300                 s->dct_error_sum[intra][i] -= level;
3301                 level += s->dct_offset[intra][i];
3302                 if(level>0) level=0;
3303             }
3304             block[i]= level;
3305         }
3306     }
3307 }
3308
3309 static int dct_quantize_trellis_c(MpegEncContext *s,
3310                                   int16_t *block, int n,
3311                                   int qscale, int *overflow){
3312     const int *qmat;
3313     const uint8_t *scantable= s->intra_scantable.scantable;
3314     const uint8_t *perm_scantable= s->intra_scantable.permutated;
3315     int max=0;
3316     unsigned int threshold1, threshold2;
3317     int bias=0;
3318     int run_tab[65];
3319     int level_tab[65];
3320     int score_tab[65];
3321     int survivor[65];
3322     int survivor_count;
3323     int last_run=0;
3324     int last_level=0;
3325     int last_score= 0;
3326     int last_i;
3327     int coeff[2][64];
3328     int coeff_count[64];
3329     int qmul, qadd, start_i, last_non_zero, i, dc;
3330     const int esc_length= s->ac_esc_length;
3331     uint8_t * length;
3332     uint8_t * last_length;
3333     const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3334
3335     s->dsp.fdct (block);
3336
3337     if(s->dct_error_sum)
3338         s->denoise_dct(s, block);
3339     qmul= qscale*16;
3340     qadd= ((qscale-1)|1)*8;
3341
3342     if (s->mb_intra) {
3343         int q;
3344         if (!s->h263_aic) {
3345             if (n < 4)
3346                 q = s->y_dc_scale;
3347             else
3348                 q = s->c_dc_scale;
3349             q = q << 3;
3350         } else{
3351             /* For AIC we skip quant/dequant of INTRADC */
3352             q = 1 << 3;
3353             qadd=0;
3354         }
3355
3356         /* note: block[0] is assumed to be positive */
3357         block[0] = (block[0] + (q >> 1)) / q;
3358         start_i = 1;
3359         last_non_zero = 0;
3360         qmat = s->q_intra_matrix[qscale];
3361         if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3362             bias= 1<<(QMAT_SHIFT-1);
3363         length     = s->intra_ac_vlc_length;
3364         last_length= s->intra_ac_vlc_last_length;
3365     } else {
3366         start_i = 0;
3367         last_non_zero = -1;
3368         qmat = s->q_inter_matrix[qscale];
3369         length     = s->inter_ac_vlc_length;
3370         last_length= s->inter_ac_vlc_last_length;
3371     }
3372     last_i= start_i;
3373
3374     threshold1= (1<<QMAT_SHIFT) - bias - 1;
3375     threshold2= (threshold1<<1);
3376
3377     for(i=63; i>=start_i; i--) {
3378         const int j = scantable[i];
3379         int level = block[j] * qmat[j];
3380
3381         if(((unsigned)(level+threshold1))>threshold2){
3382             last_non_zero = i;
3383             break;
3384         }
3385     }
3386
3387     for(i=start_i; i<=last_non_zero; i++) {
3388         const int j = scantable[i];
3389         int level = block[j] * qmat[j];
3390
3391 //        if(   bias+level >= (1<<(QMAT_SHIFT - 3))
3392 //           || bias-level >= (1<<(QMAT_SHIFT - 3))){
3393         if(((unsigned)(level+threshold1))>threshold2){
3394             if(level>0){
3395                 level= (bias + level)>>QMAT_SHIFT;
3396                 coeff[0][i]= level;
3397                 coeff[1][i]= level-1;
3398 //                coeff[2][k]= level-2;
3399             }else{
3400                 level= (bias - level)>>QMAT_SHIFT;
3401                 coeff[0][i]= -level;
3402                 coeff[1][i]= -level+1;
3403 //                coeff[2][k]= -level+2;
3404             }
3405             coeff_count[i]= FFMIN(level, 2);
3406             assert(coeff_count[i]);
3407             max |=level;
3408         }else{
3409             coeff[0][i]= (level>>31)|1;
3410             coeff_count[i]= 1;
3411         }
3412     }
3413
3414     *overflow= s->max_qcoeff < max; //overflow might have happened
3415
3416     if(last_non_zero < start_i){
3417         memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3418         return last_non_zero;
3419     }
3420
3421     score_tab[start_i]= 0;
3422     survivor[0]= start_i;
3423     survivor_count= 1;
3424
3425     for(i=start_i; i<=last_non_zero; i++){
3426         int level_index, j, zero_distortion;
3427         int dct_coeff= FFABS(block[ scantable[i] ]);
3428         int best_score=256*256*256*120;
3429
3430         if (s->dsp.fdct == ff_fdct_ifast)
3431             dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3432         zero_distortion= dct_coeff*dct_coeff;
3433
3434         for(level_index=0; level_index < coeff_count[i]; level_index++){
3435             int distortion;
3436             int level= coeff[level_index][i];
3437             const int alevel= FFABS(level);
3438             int unquant_coeff;
3439
3440             assert(level);
3441
3442             if(s->out_format == FMT_H263){
3443                 unquant_coeff= alevel*qmul + qadd;
3444             }else{ //MPEG1
3445                 j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3446                 if(s->mb_intra){
3447                         unquant_coeff = (int)(  alevel  * qscale * s->intra_matrix[j]) >> 3;
3448                         unquant_coeff =   (unquant_coeff - 1) | 1;
3449                 }else{
3450                         unquant_coeff = (((  alevel  << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3451                         unquant_coeff =   (unquant_coeff - 1) | 1;
3452                 }
3453                 unquant_coeff<<= 3;
3454             }
3455
3456             distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3457             level+=64;
3458             if((level&(~127)) == 0){
3459                 for(j=survivor_count-1; j>=0; j--){
3460                     int run= i - survivor[j];
3461                     int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3462                     score += score_tab[i-run];
3463
3464                     if(score < best_score){
3465                         best_score= score;
3466                         run_tab[i+1]= run;
3467                         level_tab[i+1]= level-64;
3468                     }
3469                 }
3470
3471                 if(s->out_format == FMT_H263){
3472                     for(j=survivor_count-1; j>=0; j--){
3473                         int run= i - survivor[j];
3474                         int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3475                         score += score_tab[i-run];
3476                         if(score < last_score){
3477                             last_score= score;
3478                             last_run= run;
3479                             last_level= level-64;
3480                             last_i= i+1;
3481                         }
3482                     }
3483                 }
3484             }else{
3485                 distortion += esc_length*lambda;
3486                 for(j=survivor_count-1; j>=0; j--){
3487                     int run= i - survivor[j];
3488                     int score= distortion + score_tab[i-run];
3489
3490                     if(score < best_score){
3491                         best_score= score;
3492                         run_tab[i+1]= run;
3493                         level_tab[i+1]= level-64;
3494                     }
3495                 }
3496
3497                 if(s->out_format == FMT_H263){
3498                   for(j=survivor_count-1; j>=0; j--){
3499                         int run= i - survivor[j];
3500                         int score= distortion + score_tab[i-run];
3501                         if(score < last_score){
3502                             last_score= score;
3503                             last_run= run;
3504                             last_level= level-64;
3505                             last_i= i+1;
3506                         }
3507                     }
3508                 }
3509             }
3510         }
3511
3512         score_tab[i+1]= best_score;
3513
3514         //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3515         if(last_non_zero <= 27){
3516             for(; survivor_count; survivor_count--){
3517                 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3518                     break;
3519             }
3520         }else{
3521             for(; survivor_count; survivor_count--){
3522                 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3523                     break;
3524             }
3525         }
3526
3527         survivor[ survivor_count++ ]= i+1;
3528     }
3529
3530     if(s->out_format != FMT_H263){
3531         last_score= 256*256*256*120;
3532         for(i= survivor[0]; i<=last_non_zero + 1; i++){
3533             int score= score_tab[i];
3534             if(i) score += lambda*2; //FIXME exacter?
3535
3536             if(score < last_score){
3537                 last_score= score;
3538                 last_i= i;
3539                 last_level= level_tab[i];
3540                 last_run= run_tab[i];
3541             }
3542         }
3543     }
3544
3545     s->coded_score[n] = last_score;
3546
3547     dc= FFABS(block[0]);
3548     last_non_zero= last_i - 1;
3549     memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3550
3551     if(last_non_zero < start_i)
3552         return last_non_zero;
3553
3554     if(last_non_zero == 0 && start_i == 0){
3555         int best_level= 0;
3556         int best_score= dc * dc;
3557
3558         for(i=0; i<coeff_count[0]; i++){
3559             int level= coeff[i][0];
3560             int alevel= FFABS(level);
3561             int unquant_coeff, score, distortion;
3562
3563             if(s->out_format == FMT_H263){
3564                     unquant_coeff= (alevel*qmul + qadd)>>3;
3565             }else{ //MPEG1
3566                     unquant_coeff = (((  alevel  << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3567                     unquant_coeff =   (unquant_coeff - 1) | 1;
3568             }
3569             unquant_coeff = (unquant_coeff + 4) >> 3;
3570             unquant_coeff<<= 3 + 3;
3571
3572             distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3573             level+=64;
3574             if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3575             else                    score= distortion + esc_length*lambda;
3576
3577             if(score < best_score){
3578                 best_score= score;
3579                 best_level= level - 64;
3580             }
3581         }
3582         block[0]= best_level;
3583         s->coded_score[n] = best_score - dc*dc;
3584         if(best_level == 0) return -1;
3585         else                return last_non_zero;
3586     }
3587
3588     i= last_i;
3589     assert(last_level);
3590
3591     block[ perm_scantable[last_non_zero] ]= last_level;
3592     i -= last_run + 1;
3593
3594     for(; i>start_i; i -= run_tab[i] + 1){
3595         block[ perm_scantable[i-1] ]= level_tab[i];
3596     }
3597
3598     return last_non_zero;
3599 }
3600
3601 //#define REFINE_STATS 1
3602 static int16_t basis[64][64];
3603
3604 static void build_basis(uint8_t *perm){
3605     int i, j, x, y;
3606     emms_c();
3607     for(i=0; i<8; i++){
3608         for(j=0; j<8; j++){
3609             for(y=0; y<8; y++){
3610                 for(x=0; x<8; x++){
3611                     double s= 0.25*(1<<BASIS_SHIFT);
3612                     int index= 8*i + j;
3613                     int perm_index= perm[index];
3614                     if(i==0) s*= sqrt(0.5);
3615                     if(j==0) s*= sqrt(0.5);
3616                     basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3617                 }
3618             }
3619         }
3620     }
3621 }
3622
3623 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3624                         int16_t *block, int16_t *weight, int16_t *orig,
3625                         int n, int qscale){
3626     int16_t rem[64];
3627     LOCAL_ALIGNED_16(int16_t, d1, [64]);
3628     const uint8_t *scantable= s->intra_scantable.scantable;
3629     const uint8_t *perm_scantable= s->intra_scantable.permutated;
3630 //    unsigned int threshold1, threshold2;
3631 //    int bias=0;
3632     int run_tab[65];
3633     int prev_run=0;
3634     int prev_level=0;
3635     int qmul, qadd, start_i, last_non_zero, i, dc;
3636     uint8_t * length;
3637     uint8_t * last_length;
3638     int lambda;
3639     int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3640 #ifdef REFINE_STATS
3641 static int count=0;
3642 static int after_last=0;
3643 static int to_zero=0;
3644 static int from_zero=0;
3645 static int raise=0;
3646 static int lower=0;
3647 static int messed_sign=0;
3648 #endif
3649
3650     if(basis[0][0] == 0)
3651         build_basis(s->dsp.idct_permutation);
3652
3653     qmul= qscale*2;
3654     qadd= (qscale-1)|1;
3655     if (s->mb_intra) {
3656         if (!s->h263_aic) {
3657             if (n < 4)
3658                 q = s->y_dc_scale;
3659             else
3660                 q = s->c_dc_scale;
3661         } else{
3662             /* For AIC we skip quant/dequant of INTRADC */
3663             q = 1;
3664             qadd=0;
3665         }
3666         q <<= RECON_SHIFT-3;
3667         /* note: block[0] is assumed to be positive */
3668         dc= block[0]*q;
3669 //        block[0] = (block[0] + (q >> 1)) / q;
3670         start_i = 1;
3671 //        if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3672 //            bias= 1<<(QMAT_SHIFT-1);
3673         length     = s->intra_ac_vlc_length;
3674         last_length= s->intra_ac_vlc_last_length;
3675     } else {
3676         dc= 0;
3677         start_i = 0;
3678         length     = s->inter_ac_vlc_length;
3679         last_length= s->inter_ac_vlc_last_length;
3680     }
3681     last_non_zero = s->block_last_index[n];
3682
3683 #ifdef REFINE_STATS
3684 {START_TIMER
3685 #endif
3686     dc += (1<<(RECON_SHIFT-1));
3687     for(i=0; i<64; i++){
3688         rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME  use orig dirrectly instead of copying to rem[]
3689     }
3690 #ifdef REFINE_STATS
3691 STOP_TIMER("memset rem[]")}
3692 #endif
3693     sum=0;
3694     for(i=0; i<64; i++){
3695         int one= 36;
3696         int qns=4;
3697         int w;
3698
3699         w= FFABS(weight[i]) + qns*one;
3700         w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3701
3702         weight[i] = w;
3703 //        w=weight[i] = (63*qns + (w/2)) / w;
3704
3705         assert(w>0);
3706         assert(w<(1<<6));
3707         sum += w*w;
3708     }
3709     lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3710 #ifdef REFINE_STATS
3711 {START_TIMER
3712 #endif
3713     run=0;
3714     rle_index=0;
3715     for(i=start_i; i<=last_non_zero; i++){
3716         int j= perm_scantable[i];
3717         const int level= block[j];
3718         int coeff;
3719
3720         if(level){
3721             if(level<0) coeff= qmul*level - qadd;
3722             else        coeff= qmul*level + qadd;
3723             run_tab[rle_index++]=run;
3724             run=0;
3725
3726             s->dsp.add_8x8basis(rem, basis[j], coeff);
3727         }else{
3728             run++;
3729         }
3730     }
3731 #ifdef REFINE_STATS
3732 if(last_non_zero>0){
3733 STOP_TIMER("init rem[]")
3734 }
3735 }
3736
3737 {START_TIMER
3738 #endif
3739     for(;;){
3740         int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3741         int best_coeff=0;
3742         int best_change=0;
3743         int run2, best_unquant_change=0, analyze_gradient;
3744 #ifdef REFINE_STATS
3745 {START_TIMER
3746 #endif
3747         analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
3748
3749         if(analyze_gradient){
3750 #ifdef REFINE_STATS
3751 {START_TIMER
3752 #endif
3753             for(i=0; i<64; i++){
3754                 int w= weight[i];
3755
3756                 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3757             }
3758 #ifdef REFINE_STATS
3759 STOP_TIMER("rem*w*w")}
3760 {START_TIMER
3761 #endif
3762             s->dsp.fdct(d1);
3763 #ifdef REFINE_STATS
3764 STOP_TIMER("dct")}
3765 #endif
3766         }
3767
3768         if(start_i){
3769             const int level= block[0];
3770             int change, old_coeff;
3771
3772             assert(s->mb_intra);
3773
3774             old_coeff= q*level;
3775
3776             for(change=-1; change<=1; change+=2){
3777                 int new_level= level + change;
3778                 int score, new_coeff;
3779
3780                 new_coeff= q*new_level;
3781                 if(new_coeff >= 2048 || new_coeff < 0)
3782                     continue;
3783
3784                 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
3785                 if(score<best_score){
3786                     best_score= score;
3787                     best_coeff= 0;
3788                     best_change= change;
3789                     best_unquant_change= new_coeff - old_coeff;
3790                 }
3791             }
3792         }
3793
3794         run=0;
3795         rle_index=0;
3796         run2= run_tab[rle_index++];
3797         prev_level=0;
3798         prev_run=0;
3799
3800         for(i=start_i; i<64; i++){
3801             int j= perm_scantable[i];
3802             const int level= block[j];
3803             int change, old_coeff;
3804
3805             if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
3806                 break;
3807
3808             if(level){
3809                 if(level<0) old_coeff= qmul*level - qadd;
3810                 else        old_coeff= qmul*level + qadd;
3811                 run2= run_tab[rle_index++]; //FIXME ! maybe after last
3812             }else{
3813                 old_coeff=0;
3814                 run2--;
3815                 assert(run2>=0 || i >= last_non_zero );
3816             }
3817
3818             for(change=-1; change<=1; change+=2){
3819                 int new_level= level + change;
3820                 int score, new_coeff, unquant_change;
3821
3822                 score=0;
3823                 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
3824                    continue;
3825
3826                 if(new_level){
3827                     if(new_level<0) new_coeff= qmul*new_level - qadd;
3828                     else            new_coeff= qmul*new_level + qadd;
3829                     if(new_coeff >= 2048 || new_coeff <= -2048)
3830                         continue;
3831                     //FIXME check for overflow
3832
3833                     if(level){
3834                         if(level < 63 && level > -63){
3835                             if(i < last_non_zero)
3836                                 score +=   length[UNI_AC_ENC_INDEX(run, new_level+64)]
3837                                          - length[UNI_AC_ENC_INDEX(run, level+64)];
3838                             else
3839                                 score +=   last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
3840                                          - last_length[UNI_AC_ENC_INDEX(run, level+64)];
3841                         }
3842                     }else{
3843                         assert(FFABS(new_level)==1);
3844
3845                         if(analyze_gradient){
3846                             int g= d1[ scantable[i] ];
3847                             if(g && (g^new_level) >= 0)
3848                                 continue;
3849                         }
3850
3851                         if(i < last_non_zero){
3852                             int next_i= i + run2 + 1;
3853                             int next_level= block[ perm_scantable[next_i] ] + 64;
3854
3855                             if(next_level&(~127))
3856                                 next_level= 0;
3857
3858                             if(next_i < last_non_zero)
3859                                 score +=   length[UNI_AC_ENC_INDEX(run, 65)]
3860                                          + length[UNI_AC_ENC_INDEX(run2, next_level)]
3861                                          - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3862                             else
3863                                 score +=  length[UNI_AC_ENC_INDEX(run, 65)]
3864                                         + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3865                                         - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3866                         }else{
3867                             score += last_length[UNI_AC_ENC_INDEX(run, 65)];
3868                             if(prev_level){
3869                                 score +=  length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3870                                         - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3871                             }
3872                         }
3873                     }
3874                 }else{
3875                     new_coeff=0;
3876                     assert(FFABS(level)==1);
3877
3878                     if(i < last_non_zero){
3879                         int next_i= i + run2 + 1;
3880                         int next_level= block[ perm_scantable[next_i] ] + 64;
3881
3882                         if(next_level&(~127))
3883                             next_level= 0;
3884
3885                         if(next_i < last_non_zero)
3886                             score +=   length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3887                                      - length[UNI_AC_ENC_INDEX(run2, next_level)]
3888                                      - length[UNI_AC_ENC_INDEX(run, 65)];
3889                         else
3890                             score +=   last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3891                                      - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3892                                      - length[UNI_AC_ENC_INDEX(run, 65)];
3893                     }else{
3894                         score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
3895                         if(prev_level){
3896                             score +=  last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3897                                     - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3898                         }
3899                     }
3900                 }
3901
3902                 score *= lambda;
3903
3904                 unquant_change= new_coeff - old_coeff;
3905                 assert((score < 100*lambda && score > -100*lambda) || lambda==0);
3906
3907                 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
3908                 if(score<best_score){
3909                     best_score= score;
3910                     best_coeff= i;
3911                     best_change= change;
3912                     best_unquant_change= unquant_change;
3913                 }
3914             }
3915             if(level){
3916                 prev_level= level + 64;
3917                 if(prev_level&(~127))
3918                     prev_level= 0;
3919                 prev_run= run;
3920                 run=0;
3921             }else{
3922                 run++;
3923             }
3924         }
3925 #ifdef REFINE_STATS
3926 STOP_TIMER("iterative step")}
3927 #endif
3928
3929         if(best_change){
3930             int j= perm_scantable[ best_coeff ];
3931
3932             block[j] += best_change;
3933
3934             if(best_coeff > last_non_zero){
3935                 last_non_zero= best_coeff;
3936                 assert(block[j]);
3937 #ifdef REFINE_STATS
3938 after_last++;
3939 #endif
3940             }else{
3941 #ifdef REFINE_STATS
3942 if(block[j]){
3943     if(block[j] - best_change){
3944         if(FFABS(block[j]) > FFABS(block[j] - best_change)){
3945             raise++;
3946         }else{
3947             lower++;
3948         }
3949     }else{
3950         from_zero++;
3951     }
3952 }else{
3953     to_zero++;
3954 }
3955 #endif
3956                 for(; last_non_zero>=start_i; last_non_zero--){
3957                     if(block[perm_scantable[last_non_zero]])
3958                         break;
3959                 }
3960             }
3961 #ifdef REFINE_STATS
3962 count++;
3963 if(256*256*256*64 % count == 0){
3964     printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
3965 }
3966 #endif
3967             run=0;
3968             rle_index=0;
3969             for(i=start_i; i<=last_non_zero; i++){
3970                 int j= perm_scantable[i];
3971                 const int level= block[j];
3972
3973                  if(level){
3974                      run_tab[rle_index++]=run;
3975                      run=0;
3976                  }else{
3977                      run++;
3978                  }
3979             }
3980
3981             s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
3982         }else{
3983             break;
3984         }
3985     }
3986 #ifdef REFINE_STATS
3987 if(last_non_zero>0){
3988 STOP_TIMER("iterative search")
3989 }
3990 }
3991 #endif
3992
3993     return last_non_zero;
3994 }
3995
3996 int ff_dct_quantize_c(MpegEncContext *s,
3997                         int16_t *block, int n,
3998                         int qscale, int *overflow)
3999 {
4000     int i, j, level, last_non_zero, q, start_i;
4001     const int *qmat;
4002     const uint8_t *scantable= s->intra_scantable.scantable;
4003     int bias;
4004     int max=0;
4005     unsigned int threshold1, threshold2;
4006
4007     s->dsp.fdct (block);
4008
4009     if(s->dct_error_sum)
4010         s->denoise_dct(s, block);
4011
4012     if (s->mb_intra) {
4013         if (!s->h263_aic) {
4014             if (n < 4)
4015                 q = s->y_dc_scale;
4016             else
4017                 q = s->c_dc_scale;
4018             q = q << 3;
4019         } else
4020             /* For AIC we skip quant/dequant of INTRADC */
4021             q = 1 << 3;
4022
4023         /* note: block[0] is assumed to be positive */
4024         block[0] = (block[0] + (q >> 1)) / q;
4025         start_i = 1;
4026         last_non_zero = 0;
4027         qmat = s->q_intra_matrix[qscale];
4028         bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4029     } else {
4030         start_i = 0;
4031         last_non_zero = -1;
4032         qmat = s->q_inter_matrix[qscale];
4033         bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4034     }
4035     threshold1= (1<<QMAT_SHIFT) - bias - 1;
4036     threshold2= (threshold1<<1);
4037     for(i=63;i>=start_i;i--) {
4038         j = scantable[i];
4039         level = block[j] * qmat[j];
4040
4041         if(((unsigned)(level+threshold1))>threshold2){
4042             last_non_zero = i;
4043             break;
4044         }else{
4045             block[j]=0;
4046         }
4047     }
4048     for(i=start_i; i<=last_non_zero; i++) {
4049         j = scantable[i];
4050         level = block[j] * qmat[j];
4051
4052 //        if(   bias+level >= (1<<QMAT_SHIFT)
4053 //           || bias-level >= (1<<QMAT_SHIFT)){
4054         if(((unsigned)(level+threshold1))>threshold2){
4055             if(level>0){
4056                 level= (bias + level)>>QMAT_SHIFT;
4057                 block[j]= level;
4058             }else{
4059                 level= (bias - level)>>QMAT_SHIFT;
4060                 block[j]= -level;
4061             }
4062             max |=level;
4063         }else{
4064             block[j]=0;
4065         }
4066     }
4067     *overflow= s->max_qcoeff < max; //overflow might have happened
4068
4069     /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4070     if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4071         ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4072
4073     return last_non_zero;
4074 }
4075
4076 #define OFFSET(x) offsetof(MpegEncContext, x)
4077 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4078 static const AVOption h263_options[] = {
4079     { "obmc",         "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4080     { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4081     { "mb_info",      "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4082     FF_MPV_COMMON_OPTS
4083     { NULL },
4084 };
4085
4086 static const AVClass h263_class = {
4087     .class_name = "H.263 encoder",
4088     .item_name  = av_default_item_name,
4089     .option     = h263_options,
4090     .version    = LIBAVUTIL_VERSION_INT,
4091 };
4092
4093 AVCodec ff_h263_encoder = {
4094     .name           = "h263",
4095     .long_name      = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4096     .type           = AVMEDIA_TYPE_VIDEO,
4097     .id             = AV_CODEC_ID_H263,
4098     .priv_data_size = sizeof(MpegEncContext),
4099     .init           = ff_MPV_encode_init,
4100     .encode2        = ff_MPV_encode_picture,
4101     .close          = ff_MPV_encode_end,
4102     .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4103     .priv_class     = &h263_class,
4104 };
4105
4106 static const AVOption h263p_options[] = {
4107     { "umv",        "Use unlimited motion vectors.",    OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4108     { "aiv",        "Use alternative inter VLC.",       OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4109     { "obmc",       "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4110     { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4111     FF_MPV_COMMON_OPTS
4112     { NULL },
4113 };
4114 static const AVClass h263p_class = {
4115     .class_name = "H.263p encoder",
4116     .item_name  = av_default_item_name,
4117     .option     = h263p_options,
4118     .version    = LIBAVUTIL_VERSION_INT,
4119 };
4120
4121 AVCodec ff_h263p_encoder = {
4122     .name           = "h263p",
4123     .long_name      = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4124     .type           = AVMEDIA_TYPE_VIDEO,
4125     .id             = AV_CODEC_ID_H263P,
4126     .priv_data_size = sizeof(MpegEncContext),
4127     .init           = ff_MPV_encode_init,
4128     .encode2        = ff_MPV_encode_picture,
4129     .close          = ff_MPV_encode_end,
4130     .capabilities   = CODEC_CAP_SLICE_THREADS,
4131     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4132     .priv_class     = &h263p_class,
4133 };
4134
4135 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4136
4137 AVCodec ff_msmpeg4v2_encoder = {
4138     .name           = "msmpeg4v2",
4139     .long_name      = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4140     .type           = AVMEDIA_TYPE_VIDEO,
4141     .id             = AV_CODEC_ID_MSMPEG4V2,
4142     .priv_data_size = sizeof(MpegEncContext),
4143     .init           = ff_MPV_encode_init,
4144     .encode2        = ff_MPV_encode_picture,
4145     .close          = ff_MPV_encode_end,
4146     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4147     .priv_class     = &msmpeg4v2_class,
4148 };
4149
4150 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4151
4152 AVCodec ff_msmpeg4v3_encoder = {
4153     .name           = "msmpeg4",
4154     .long_name      = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4155     .type           = AVMEDIA_TYPE_VIDEO,
4156     .id             = AV_CODEC_ID_MSMPEG4V3,
4157     .priv_data_size = sizeof(MpegEncContext),
4158     .init           = ff_MPV_encode_init,
4159     .encode2        = ff_MPV_encode_picture,
4160     .close          = ff_MPV_encode_end,
4161     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4162     .priv_class     = &msmpeg4v3_class,
4163 };
4164
4165 FF_MPV_GENERIC_CLASS(wmv1)
4166
4167 AVCodec ff_wmv1_encoder = {
4168     .name           = "wmv1",
4169     .long_name      = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4170     .type           = AVMEDIA_TYPE_VIDEO,
4171     .id             = AV_CODEC_ID_WMV1,
4172     .priv_data_size = sizeof(MpegEncContext),
4173     .init           = ff_MPV_encode_init,
4174     .encode2        = ff_MPV_encode_picture,
4175     .close          = ff_MPV_encode_end,
4176     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4177     .priv_class     = &wmv1_class,
4178 };