]> git.sesse.net Git - ffmpeg/blob - libavcodec/mpegvideo_enc.c
cosmetics: Group .name and .long_name together in codec/format declarations
[ffmpeg] / libavcodec / mpegvideo_enc.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29
30 #include "libavutil/internal.h"
31 #include "libavutil/intmath.h"
32 #include "libavutil/mathematics.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/opt.h"
35 #include "avcodec.h"
36 #include "dct.h"
37 #include "dsputil.h"
38 #include "mpeg12.h"
39 #include "mpegvideo.h"
40 #include "h261.h"
41 #include "h263.h"
42 #include "mathops.h"
43 #include "mjpegenc.h"
44 #include "msmpeg4.h"
45 #include "faandct.h"
46 #include "thread.h"
47 #include "aandcttab.h"
48 #include "flv.h"
49 #include "mpeg4video.h"
50 #include "internal.h"
51 #include "bytestream.h"
52 #include <limits.h>
53
54 static int encode_picture(MpegEncContext *s, int picture_number);
55 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
56 static int sse_mb(MpegEncContext *s);
57 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
58 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
59
60 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
61 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
62
63 const AVOption ff_mpv_generic_options[] = {
64     FF_MPV_COMMON_OPTS
65     { NULL },
66 };
67
68 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
69                        uint16_t (*qmat16)[2][64],
70                        const uint16_t *quant_matrix,
71                        int bias, int qmin, int qmax, int intra)
72 {
73     int qscale;
74     int shift = 0;
75
76     for (qscale = qmin; qscale <= qmax; qscale++) {
77         int i;
78         if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
79             dsp->fdct == ff_jpeg_fdct_islow_10 ||
80             dsp->fdct == ff_faandct) {
81             for (i = 0; i < 64; i++) {
82                 const int j = dsp->idct_permutation[i];
83                 /* 16 <= qscale * quant_matrix[i] <= 7905
84                  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
85                  *             19952 <=              x  <= 249205026
86                  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
87                  *           3444240 >= (1 << 36) / (x) >= 275 */
88
89                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
90                                         (qscale * quant_matrix[j]));
91             }
92         } else if (dsp->fdct == ff_fdct_ifast) {
93             for (i = 0; i < 64; i++) {
94                 const int j = dsp->idct_permutation[i];
95                 /* 16 <= qscale * quant_matrix[i] <= 7905
96                  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
97                  *             19952 <=              x  <= 249205026
98                  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
99                  *           3444240 >= (1 << 36) / (x) >= 275 */
100
101                 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
102                                         (ff_aanscales[i] * qscale *
103                                          quant_matrix[j]));
104             }
105         } else {
106             for (i = 0; i < 64; i++) {
107                 const int j = dsp->idct_permutation[i];
108                 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
109                  * Assume x = qscale * quant_matrix[i]
110                  * So             16 <=              x  <= 7905
111                  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
112                  * so          32768 >= (1 << 19) / (x) >= 67 */
113                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
114                                         (qscale * quant_matrix[j]));
115                 //qmat  [qscale][i] = (1 << QMAT_SHIFT_MMX) /
116                 //                    (qscale * quant_matrix[i]);
117                 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
118                                        (qscale * quant_matrix[j]);
119
120                 if (qmat16[qscale][0][i] == 0 ||
121                     qmat16[qscale][0][i] == 128 * 256)
122                     qmat16[qscale][0][i] = 128 * 256 - 1;
123                 qmat16[qscale][1][i] =
124                     ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
125                                 qmat16[qscale][0][i]);
126             }
127         }
128
129         for (i = intra; i < 64; i++) {
130             int64_t max = 8191;
131             if (dsp->fdct == ff_fdct_ifast) {
132                 max = (8191LL * ff_aanscales[i]) >> 14;
133             }
134             while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
135                 shift++;
136             }
137         }
138     }
139     if (shift) {
140         av_log(NULL, AV_LOG_INFO,
141                "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
142                QMAT_SHIFT - shift);
143     }
144 }
145
146 static inline void update_qscale(MpegEncContext *s)
147 {
148     s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
149                 (FF_LAMBDA_SHIFT + 7);
150     s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
151
152     s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
153                  FF_LAMBDA_SHIFT;
154 }
155
156 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
157 {
158     int i;
159
160     if (matrix) {
161         put_bits(pb, 1, 1);
162         for (i = 0; i < 64; i++) {
163             put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
164         }
165     } else
166         put_bits(pb, 1, 0);
167 }
168
169 /**
170  * init s->current_picture.qscale_table from s->lambda_table
171  */
172 void ff_init_qscale_tab(MpegEncContext *s)
173 {
174     int8_t * const qscale_table = s->current_picture.qscale_table;
175     int i;
176
177     for (i = 0; i < s->mb_num; i++) {
178         unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
179         int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
180         qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
181                                                   s->avctx->qmax);
182     }
183 }
184
185 static void update_duplicate_context_after_me(MpegEncContext *dst,
186                                               MpegEncContext *src)
187 {
188 #define COPY(a) dst->a= src->a
189     COPY(pict_type);
190     COPY(current_picture);
191     COPY(f_code);
192     COPY(b_code);
193     COPY(qscale);
194     COPY(lambda);
195     COPY(lambda2);
196     COPY(picture_in_gop_number);
197     COPY(gop_picture_number);
198     COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
199     COPY(progressive_frame);    // FIXME don't set in encode_header
200     COPY(partitioned_frame);    // FIXME don't set in encode_header
201 #undef COPY
202 }
203
204 /**
205  * Set the given MpegEncContext to defaults for encoding.
206  * the changed fields will not depend upon the prior state of the MpegEncContext.
207  */
208 static void MPV_encode_defaults(MpegEncContext *s)
209 {
210     int i;
211     ff_MPV_common_defaults(s);
212
213     for (i = -16; i < 16; i++) {
214         default_fcode_tab[i + MAX_MV] = 1;
215     }
216     s->me.mv_penalty = default_mv_penalty;
217     s->fcode_tab     = default_fcode_tab;
218 }
219
220 /* init video encoder */
221 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
222 {
223     MpegEncContext *s = avctx->priv_data;
224     int i;
225     int chroma_h_shift, chroma_v_shift;
226
227     MPV_encode_defaults(s);
228
229     switch (avctx->codec_id) {
230     case AV_CODEC_ID_MPEG2VIDEO:
231         if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
232             avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
233             av_log(avctx, AV_LOG_ERROR,
234                    "only YUV420 and YUV422 are supported\n");
235             return -1;
236         }
237         break;
238     case AV_CODEC_ID_LJPEG:
239         if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
240             avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
241             avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
242             avctx->pix_fmt != AV_PIX_FMT_BGRA     &&
243             ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
244               avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
245               avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
246              avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
247             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
248             return -1;
249         }
250         break;
251     case AV_CODEC_ID_MJPEG:
252         if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
253             avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
254             ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
255               avctx->pix_fmt != AV_PIX_FMT_YUV422P) ||
256              avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
257             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
258             return -1;
259         }
260         break;
261     default:
262         if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
263             av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
264             return -1;
265         }
266     }
267
268     switch (avctx->pix_fmt) {
269     case AV_PIX_FMT_YUVJ422P:
270     case AV_PIX_FMT_YUV422P:
271         s->chroma_format = CHROMA_422;
272         break;
273     case AV_PIX_FMT_YUVJ420P:
274     case AV_PIX_FMT_YUV420P:
275     default:
276         s->chroma_format = CHROMA_420;
277         break;
278     }
279
280     s->bit_rate = avctx->bit_rate;
281     s->width    = avctx->width;
282     s->height   = avctx->height;
283     if (avctx->gop_size > 600 &&
284         avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
285         av_log(avctx, AV_LOG_ERROR,
286                "Warning keyframe interval too large! reducing it ...\n");
287         avctx->gop_size = 600;
288     }
289     s->gop_size     = avctx->gop_size;
290     s->avctx        = avctx;
291     s->flags        = avctx->flags;
292     s->flags2       = avctx->flags2;
293     s->max_b_frames = avctx->max_b_frames;
294     s->codec_id     = avctx->codec->id;
295     s->strict_std_compliance = avctx->strict_std_compliance;
296     s->quarter_sample     = (avctx->flags & CODEC_FLAG_QPEL) != 0;
297     s->mpeg_quant         = avctx->mpeg_quant;
298     s->rtp_mode           = !!avctx->rtp_payload_size;
299     s->intra_dc_precision = avctx->intra_dc_precision;
300     s->user_specified_pts = AV_NOPTS_VALUE;
301
302     if (s->gop_size <= 1) {
303         s->intra_only = 1;
304         s->gop_size   = 12;
305     } else {
306         s->intra_only = 0;
307     }
308
309     s->me_method = avctx->me_method;
310
311     /* Fixed QSCALE */
312     s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
313
314     s->adaptive_quant = (s->avctx->lumi_masking ||
315                          s->avctx->dark_masking ||
316                          s->avctx->temporal_cplx_masking ||
317                          s->avctx->spatial_cplx_masking  ||
318                          s->avctx->p_masking      ||
319                          s->avctx->border_masking ||
320                          (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
321                         !s->fixed_qscale;
322
323     s->loop_filter      = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
324
325     if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
326         av_log(avctx, AV_LOG_ERROR,
327                "a vbv buffer size is needed, "
328                "for encoding with a maximum bitrate\n");
329         return -1;
330     }
331
332     if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
333         av_log(avctx, AV_LOG_INFO,
334                "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
335     }
336
337     if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
338         av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
339         return -1;
340     }
341
342     if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
343         av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
344         return -1;
345     }
346
347     if (avctx->rc_max_rate &&
348         avctx->rc_max_rate == avctx->bit_rate &&
349         avctx->rc_max_rate != avctx->rc_min_rate) {
350         av_log(avctx, AV_LOG_INFO,
351                "impossible bitrate constraints, this will fail\n");
352     }
353
354     if (avctx->rc_buffer_size &&
355         avctx->bit_rate * (int64_t)avctx->time_base.num >
356             avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
357         av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
358         return -1;
359     }
360
361     if (!s->fixed_qscale &&
362         avctx->bit_rate * av_q2d(avctx->time_base) >
363             avctx->bit_rate_tolerance) {
364         av_log(avctx, AV_LOG_ERROR,
365                "bitrate tolerance too small for bitrate\n");
366         return -1;
367     }
368
369     if (s->avctx->rc_max_rate &&
370         s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
371         (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
372          s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
373         90000LL * (avctx->rc_buffer_size - 1) >
374             s->avctx->rc_max_rate * 0xFFFFLL) {
375         av_log(avctx, AV_LOG_INFO,
376                "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
377                "specified vbv buffer is too large for the given bitrate!\n");
378     }
379
380     if ((s->flags & CODEC_FLAG_4MV)  && s->codec_id != AV_CODEC_ID_MPEG4 &&
381         s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
382         s->codec_id != AV_CODEC_ID_FLV1) {
383         av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
384         return -1;
385     }
386
387     if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
388         av_log(avctx, AV_LOG_ERROR,
389                "OBMC is only supported with simple mb decision\n");
390         return -1;
391     }
392
393     if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
394         av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
395         return -1;
396     }
397
398     if (s->max_b_frames                    &&
399         s->codec_id != AV_CODEC_ID_MPEG4      &&
400         s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
401         s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
402         av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
403         return -1;
404     }
405
406     if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
407          s->codec_id == AV_CODEC_ID_H263  ||
408          s->codec_id == AV_CODEC_ID_H263P) &&
409         (avctx->sample_aspect_ratio.num > 255 ||
410          avctx->sample_aspect_ratio.den > 255)) {
411         av_log(avctx, AV_LOG_ERROR,
412                "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
413                avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
414         return -1;
415     }
416
417     if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
418         s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
419         av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
420         return -1;
421     }
422
423     // FIXME mpeg2 uses that too
424     if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
425         av_log(avctx, AV_LOG_ERROR,
426                "mpeg2 style quantization not supported by codec\n");
427         return -1;
428     }
429
430     if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
431         av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
432         return -1;
433     }
434
435     if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
436         s->avctx->mb_decision != FF_MB_DECISION_RD) {
437         av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
438         return -1;
439     }
440
441     if (s->avctx->scenechange_threshold < 1000000000 &&
442         (s->flags & CODEC_FLAG_CLOSED_GOP)) {
443         av_log(avctx, AV_LOG_ERROR,
444                "closed gop with scene change detection are not supported yet, "
445                "set threshold to 1000000000\n");
446         return -1;
447     }
448
449     if (s->flags & CODEC_FLAG_LOW_DELAY) {
450         if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
451             av_log(avctx, AV_LOG_ERROR,
452                   "low delay forcing is only available for mpeg2\n");
453             return -1;
454         }
455         if (s->max_b_frames != 0) {
456             av_log(avctx, AV_LOG_ERROR,
457                    "b frames cannot be used with low delay\n");
458             return -1;
459         }
460     }
461
462     if (s->q_scale_type == 1) {
463         if (avctx->qmax > 12) {
464             av_log(avctx, AV_LOG_ERROR,
465                    "non linear quant only supports qmax <= 12 currently\n");
466             return -1;
467         }
468     }
469
470     if (s->avctx->thread_count > 1         &&
471         s->codec_id != AV_CODEC_ID_MPEG4      &&
472         s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
473         s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
474         (s->codec_id != AV_CODEC_ID_H263P)) {
475         av_log(avctx, AV_LOG_ERROR,
476                "multi threaded encoding not supported by codec\n");
477         return -1;
478     }
479
480     if (s->avctx->thread_count < 1) {
481         av_log(avctx, AV_LOG_ERROR,
482                "automatic thread number detection not supported by codec,"
483                "patch welcome\n");
484         return -1;
485     }
486
487     if (s->avctx->thread_count > 1)
488         s->rtp_mode = 1;
489
490     if (!avctx->time_base.den || !avctx->time_base.num) {
491         av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
492         return -1;
493     }
494
495     i = (INT_MAX / 2 + 128) >> 8;
496     if (avctx->mb_threshold >= i) {
497         av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
498                i - 1);
499         return -1;
500     }
501
502     if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
503         av_log(avctx, AV_LOG_INFO,
504                "notice: b_frame_strategy only affects the first pass\n");
505         avctx->b_frame_strategy = 0;
506     }
507
508     i = av_gcd(avctx->time_base.den, avctx->time_base.num);
509     if (i > 1) {
510         av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
511         avctx->time_base.den /= i;
512         avctx->time_base.num /= i;
513         //return -1;
514     }
515
516     if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
517         s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
518         // (a + x * 3 / 8) / x
519         s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
520         s->inter_quant_bias = 0;
521     } else {
522         s->intra_quant_bias = 0;
523         // (a - x / 4) / x
524         s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
525     }
526
527     if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
528         s->intra_quant_bias = avctx->intra_quant_bias;
529     if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
530         s->inter_quant_bias = avctx->inter_quant_bias;
531
532     av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
533                                      &chroma_v_shift);
534
535     if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
536         s->avctx->time_base.den > (1 << 16) - 1) {
537         av_log(avctx, AV_LOG_ERROR,
538                "timebase %d/%d not supported by MPEG 4 standard, "
539                "the maximum admitted value for the timebase denominator "
540                "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
541                (1 << 16) - 1);
542         return -1;
543     }
544     s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
545
546     switch (avctx->codec->id) {
547     case AV_CODEC_ID_MPEG1VIDEO:
548         s->out_format = FMT_MPEG1;
549         s->low_delay  = !!(s->flags & CODEC_FLAG_LOW_DELAY);
550         avctx->delay  = s->low_delay ? 0 : (s->max_b_frames + 1);
551         break;
552     case AV_CODEC_ID_MPEG2VIDEO:
553         s->out_format = FMT_MPEG1;
554         s->low_delay  = !!(s->flags & CODEC_FLAG_LOW_DELAY);
555         avctx->delay  = s->low_delay ? 0 : (s->max_b_frames + 1);
556         s->rtp_mode   = 1;
557         break;
558     case AV_CODEC_ID_LJPEG:
559     case AV_CODEC_ID_MJPEG:
560         s->out_format = FMT_MJPEG;
561         s->intra_only = 1; /* force intra only for jpeg */
562         if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
563             avctx->pix_fmt   == AV_PIX_FMT_BGRA) {
564             s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
565             s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
566             s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
567         } else {
568             s->mjpeg_vsample[0] = 2;
569             s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
570             s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
571             s->mjpeg_hsample[0] = 2;
572             s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
573             s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
574         }
575         if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
576             ff_mjpeg_encode_init(s) < 0)
577             return -1;
578         avctx->delay = 0;
579         s->low_delay = 1;
580         break;
581     case AV_CODEC_ID_H261:
582         if (!CONFIG_H261_ENCODER)
583             return -1;
584         if (ff_h261_get_picture_format(s->width, s->height) < 0) {
585             av_log(avctx, AV_LOG_ERROR,
586                    "The specified picture size of %dx%d is not valid for the "
587                    "H.261 codec.\nValid sizes are 176x144, 352x288\n",
588                     s->width, s->height);
589             return -1;
590         }
591         s->out_format = FMT_H261;
592         avctx->delay  = 0;
593         s->low_delay  = 1;
594         break;
595     case AV_CODEC_ID_H263:
596         if (!CONFIG_H263_ENCODER)
597         return -1;
598         if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
599                              s->width, s->height) == 8) {
600             av_log(avctx, AV_LOG_INFO,
601                    "The specified picture size of %dx%d is not valid for "
602                    "the H.263 codec.\nValid sizes are 128x96, 176x144, "
603                    "352x288, 704x576, and 1408x1152."
604                    "Try H.263+.\n", s->width, s->height);
605             return -1;
606         }
607         s->out_format = FMT_H263;
608         avctx->delay  = 0;
609         s->low_delay  = 1;
610         break;
611     case AV_CODEC_ID_H263P:
612         s->out_format = FMT_H263;
613         s->h263_plus  = 1;
614         /* Fx */
615         s->h263_aic        = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
616         s->modified_quant  = s->h263_aic;
617         s->loop_filter     = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
618         s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
619
620         /* /Fx */
621         /* These are just to be sure */
622         avctx->delay = 0;
623         s->low_delay = 1;
624         break;
625     case AV_CODEC_ID_FLV1:
626         s->out_format      = FMT_H263;
627         s->h263_flv        = 2; /* format = 1; 11-bit codes */
628         s->unrestricted_mv = 1;
629         s->rtp_mode  = 0; /* don't allow GOB */
630         avctx->delay = 0;
631         s->low_delay = 1;
632         break;
633     case AV_CODEC_ID_RV10:
634         s->out_format = FMT_H263;
635         avctx->delay  = 0;
636         s->low_delay  = 1;
637         break;
638     case AV_CODEC_ID_RV20:
639         s->out_format      = FMT_H263;
640         avctx->delay       = 0;
641         s->low_delay       = 1;
642         s->modified_quant  = 1;
643         s->h263_aic        = 1;
644         s->h263_plus       = 1;
645         s->loop_filter     = 1;
646         s->unrestricted_mv = 0;
647         break;
648     case AV_CODEC_ID_MPEG4:
649         s->out_format      = FMT_H263;
650         s->h263_pred       = 1;
651         s->unrestricted_mv = 1;
652         s->low_delay       = s->max_b_frames ? 0 : 1;
653         avctx->delay       = s->low_delay ? 0 : (s->max_b_frames + 1);
654         break;
655     case AV_CODEC_ID_MSMPEG4V2:
656         s->out_format      = FMT_H263;
657         s->h263_pred       = 1;
658         s->unrestricted_mv = 1;
659         s->msmpeg4_version = 2;
660         avctx->delay       = 0;
661         s->low_delay       = 1;
662         break;
663     case AV_CODEC_ID_MSMPEG4V3:
664         s->out_format        = FMT_H263;
665         s->h263_pred         = 1;
666         s->unrestricted_mv   = 1;
667         s->msmpeg4_version   = 3;
668         s->flipflop_rounding = 1;
669         avctx->delay         = 0;
670         s->low_delay         = 1;
671         break;
672     case AV_CODEC_ID_WMV1:
673         s->out_format        = FMT_H263;
674         s->h263_pred         = 1;
675         s->unrestricted_mv   = 1;
676         s->msmpeg4_version   = 4;
677         s->flipflop_rounding = 1;
678         avctx->delay         = 0;
679         s->low_delay         = 1;
680         break;
681     case AV_CODEC_ID_WMV2:
682         s->out_format        = FMT_H263;
683         s->h263_pred         = 1;
684         s->unrestricted_mv   = 1;
685         s->msmpeg4_version   = 5;
686         s->flipflop_rounding = 1;
687         avctx->delay         = 0;
688         s->low_delay         = 1;
689         break;
690     default:
691         return -1;
692     }
693
694     avctx->has_b_frames = !s->low_delay;
695
696     s->encoding = 1;
697
698     s->progressive_frame    =
699     s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
700                                                 CODEC_FLAG_INTERLACED_ME) ||
701                                 s->alternate_scan);
702
703     /* init */
704     if (ff_MPV_common_init(s) < 0)
705         return -1;
706
707     if (ARCH_X86)
708         ff_MPV_encode_init_x86(s);
709
710     if (!s->dct_quantize)
711         s->dct_quantize = ff_dct_quantize_c;
712     if (!s->denoise_dct)
713         s->denoise_dct  = denoise_dct_c;
714     s->fast_dct_quantize = s->dct_quantize;
715     if (avctx->trellis)
716         s->dct_quantize  = dct_quantize_trellis_c;
717
718     if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
719         s->chroma_qscale_table = ff_h263_chroma_qscale_table;
720
721     s->quant_precision = 5;
722
723     ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
724     ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
725
726     if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
727         ff_h261_encode_init(s);
728     if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
729         ff_h263_encode_init(s);
730     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
731         ff_msmpeg4_encode_init(s);
732     if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
733         && s->out_format == FMT_MPEG1)
734         ff_mpeg1_encode_init(s);
735
736     /* init q matrix */
737     for (i = 0; i < 64; i++) {
738         int j = s->dsp.idct_permutation[i];
739         if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
740             s->mpeg_quant) {
741             s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
742             s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
743         } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
744             s->intra_matrix[j] =
745             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
746         } else {
747             /* mpeg1/2 */
748             s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
749             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
750         }
751         if (s->avctx->intra_matrix)
752             s->intra_matrix[j] = s->avctx->intra_matrix[i];
753         if (s->avctx->inter_matrix)
754             s->inter_matrix[j] = s->avctx->inter_matrix[i];
755     }
756
757     /* precompute matrix */
758     /* for mjpeg, we do include qscale in the matrix */
759     if (s->out_format != FMT_MJPEG) {
760         ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
761                           s->intra_matrix, s->intra_quant_bias, avctx->qmin,
762                           31, 1);
763         ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
764                           s->inter_matrix, s->inter_quant_bias, avctx->qmin,
765                           31, 0);
766     }
767
768     if (ff_rate_control_init(s) < 0)
769         return -1;
770
771     return 0;
772 }
773
774 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
775 {
776     MpegEncContext *s = avctx->priv_data;
777
778     ff_rate_control_uninit(s);
779
780     ff_MPV_common_end(s);
781     if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
782         s->out_format == FMT_MJPEG)
783         ff_mjpeg_encode_close(s);
784
785     av_freep(&avctx->extradata);
786
787     return 0;
788 }
789
790 static int get_sae(uint8_t *src, int ref, int stride)
791 {
792     int x,y;
793     int acc = 0;
794
795     for (y = 0; y < 16; y++) {
796         for (x = 0; x < 16; x++) {
797             acc += FFABS(src[x + y * stride] - ref);
798         }
799     }
800
801     return acc;
802 }
803
804 static int get_intra_count(MpegEncContext *s, uint8_t *src,
805                            uint8_t *ref, int stride)
806 {
807     int x, y, w, h;
808     int acc = 0;
809
810     w = s->width  & ~15;
811     h = s->height & ~15;
812
813     for (y = 0; y < h; y += 16) {
814         for (x = 0; x < w; x += 16) {
815             int offset = x + y * stride;
816             int sad  = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
817                                      16);
818             int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
819             int sae  = get_sae(src + offset, mean, stride);
820
821             acc += sae + 500 < sad;
822         }
823     }
824     return acc;
825 }
826
827
828 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
829 {
830     Picture *pic = NULL;
831     int64_t pts;
832     int i, display_picture_number = 0, ret;
833     const int encoding_delay = s->max_b_frames ? s->max_b_frames :
834                                                  (s->low_delay ? 0 : 1);
835     int direct = 1;
836
837     if (pic_arg) {
838         pts = pic_arg->pts;
839         display_picture_number = s->input_picture_number++;
840
841         if (pts != AV_NOPTS_VALUE) {
842             if (s->user_specified_pts != AV_NOPTS_VALUE) {
843                 int64_t time = pts;
844                 int64_t last = s->user_specified_pts;
845
846                 if (time <= last) {
847                     av_log(s->avctx, AV_LOG_ERROR,
848                            "Error, Invalid timestamp=%"PRId64", "
849                            "last=%"PRId64"\n", pts, s->user_specified_pts);
850                     return -1;
851                 }
852
853                 if (!s->low_delay && display_picture_number == 1)
854                     s->dts_delta = time - last;
855             }
856             s->user_specified_pts = pts;
857         } else {
858             if (s->user_specified_pts != AV_NOPTS_VALUE) {
859                 s->user_specified_pts =
860                 pts = s->user_specified_pts + 1;
861                 av_log(s->avctx, AV_LOG_INFO,
862                        "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
863                        pts);
864             } else {
865                 pts = display_picture_number;
866             }
867         }
868     }
869
870     if (pic_arg) {
871         if (!pic_arg->buf[0]);
872             direct = 0;
873         if (pic_arg->linesize[0] != s->linesize)
874             direct = 0;
875         if (pic_arg->linesize[1] != s->uvlinesize)
876             direct = 0;
877         if (pic_arg->linesize[2] != s->uvlinesize)
878             direct = 0;
879
880         av_dlog(s->avctx, "%d %d %d %d\n", pic_arg->linesize[0],
881                 pic_arg->linesize[1], s->linesize, s->uvlinesize);
882
883         if (direct) {
884             i = ff_find_unused_picture(s, 1);
885             if (i < 0)
886                 return i;
887
888             pic = &s->picture[i];
889             pic->reference = 3;
890
891             if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0)
892                 return ret;
893             if (ff_alloc_picture(s, pic, 1) < 0) {
894                 return -1;
895             }
896         } else {
897             i = ff_find_unused_picture(s, 0);
898             if (i < 0)
899                 return i;
900
901             pic = &s->picture[i];
902             pic->reference = 3;
903
904             if (ff_alloc_picture(s, pic, 0) < 0) {
905                 return -1;
906             }
907
908             if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
909                 pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
910                 pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
911                 // empty
912             } else {
913                 int h_chroma_shift, v_chroma_shift;
914                 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
915                                                  &h_chroma_shift,
916                                                  &v_chroma_shift);
917
918                 for (i = 0; i < 3; i++) {
919                     int src_stride = pic_arg->linesize[i];
920                     int dst_stride = i ? s->uvlinesize : s->linesize;
921                     int h_shift = i ? h_chroma_shift : 0;
922                     int v_shift = i ? v_chroma_shift : 0;
923                     int w = s->width  >> h_shift;
924                     int h = s->height >> v_shift;
925                     uint8_t *src = pic_arg->data[i];
926                     uint8_t *dst = pic->f.data[i];
927
928                     if (!s->avctx->rc_buffer_size)
929                         dst += INPLACE_OFFSET;
930
931                     if (src_stride == dst_stride)
932                         memcpy(dst, src, src_stride * h);
933                     else {
934                         while (h--) {
935                             memcpy(dst, src, w);
936                             dst += dst_stride;
937                             src += src_stride;
938                         }
939                     }
940                 }
941             }
942         }
943         ret = av_frame_copy_props(&pic->f, pic_arg);
944         if (ret < 0)
945             return ret;
946
947         pic->f.display_picture_number = display_picture_number;
948         pic->f.pts = pts; // we set this here to avoid modifiying pic_arg
949     }
950
951     /* shift buffer entries */
952     for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
953         s->input_picture[i - 1] = s->input_picture[i];
954
955     s->input_picture[encoding_delay] = (Picture*) pic;
956
957     return 0;
958 }
959
960 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
961 {
962     int x, y, plane;
963     int score = 0;
964     int64_t score64 = 0;
965
966     for (plane = 0; plane < 3; plane++) {
967         const int stride = p->f.linesize[plane];
968         const int bw = plane ? 1 : 2;
969         for (y = 0; y < s->mb_height * bw; y++) {
970             for (x = 0; x < s->mb_width * bw; x++) {
971                 int off = p->shared ? 0 : 16;
972                 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
973                 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
974                 int v   = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
975
976                 switch (s->avctx->frame_skip_exp) {
977                 case 0: score    =  FFMAX(score, v);          break;
978                 case 1: score   += FFABS(v);                  break;
979                 case 2: score   += v * v;                     break;
980                 case 3: score64 += FFABS(v * v * (int64_t)v); break;
981                 case 4: score64 += v * v * (int64_t)(v * v);  break;
982                 }
983             }
984         }
985     }
986
987     if (score)
988         score64 = score;
989
990     if (score64 < s->avctx->frame_skip_threshold)
991         return 1;
992     if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
993         return 1;
994     return 0;
995 }
996
997 static int encode_frame(AVCodecContext *c, AVFrame *frame)
998 {
999     AVPacket pkt = { 0 };
1000     int ret, got_output;
1001
1002     av_init_packet(&pkt);
1003     ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1004     if (ret < 0)
1005         return ret;
1006
1007     ret = pkt.size;
1008     av_free_packet(&pkt);
1009     return ret;
1010 }
1011
1012 static int estimate_best_b_count(MpegEncContext *s)
1013 {
1014     AVCodec *codec    = avcodec_find_encoder(s->avctx->codec_id);
1015     AVCodecContext *c = avcodec_alloc_context3(NULL);
1016     AVFrame input[FF_MAX_B_FRAMES + 2];
1017     const int scale = s->avctx->brd_scale;
1018     int i, j, out_size, p_lambda, b_lambda, lambda2;
1019     int64_t best_rd  = INT64_MAX;
1020     int best_b_count = -1;
1021
1022     assert(scale >= 0 && scale <= 3);
1023
1024     //emms_c();
1025     //s->next_picture_ptr->quality;
1026     p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1027     //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1028     b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1029     if (!b_lambda) // FIXME we should do this somewhere else
1030         b_lambda = p_lambda;
1031     lambda2  = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1032                FF_LAMBDA_SHIFT;
1033
1034     c->width        = s->width  >> scale;
1035     c->height       = s->height >> scale;
1036     c->flags        = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1037                       CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1038     c->flags       |= s->avctx->flags & CODEC_FLAG_QPEL;
1039     c->mb_decision  = s->avctx->mb_decision;
1040     c->me_cmp       = s->avctx->me_cmp;
1041     c->mb_cmp       = s->avctx->mb_cmp;
1042     c->me_sub_cmp   = s->avctx->me_sub_cmp;
1043     c->pix_fmt      = AV_PIX_FMT_YUV420P;
1044     c->time_base    = s->avctx->time_base;
1045     c->max_b_frames = s->max_b_frames;
1046
1047     if (avcodec_open2(c, codec, NULL) < 0)
1048         return -1;
1049
1050     for (i = 0; i < s->max_b_frames + 2; i++) {
1051         int ysize = c->width * c->height;
1052         int csize = (c->width / 2) * (c->height / 2);
1053         Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1054                                                 s->next_picture_ptr;
1055
1056         avcodec_get_frame_defaults(&input[i]);
1057         input[i].data[0]     = av_malloc(ysize + 2 * csize);
1058         input[i].data[1]     = input[i].data[0] + ysize;
1059         input[i].data[2]     = input[i].data[1] + csize;
1060         input[i].linesize[0] = c->width;
1061         input[i].linesize[1] =
1062         input[i].linesize[2] = c->width / 2;
1063
1064         if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1065             pre_input = *pre_input_ptr;
1066
1067             if (!pre_input.shared && i) {
1068                 pre_input.f.data[0] += INPLACE_OFFSET;
1069                 pre_input.f.data[1] += INPLACE_OFFSET;
1070                 pre_input.f.data[2] += INPLACE_OFFSET;
1071             }
1072
1073             s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
1074                                  pre_input.f.data[0], pre_input.f.linesize[0],
1075                                  c->width,      c->height);
1076             s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
1077                                  pre_input.f.data[1], pre_input.f.linesize[1],
1078                                  c->width >> 1, c->height >> 1);
1079             s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
1080                                  pre_input.f.data[2], pre_input.f.linesize[2],
1081                                  c->width >> 1, c->height >> 1);
1082         }
1083     }
1084
1085     for (j = 0; j < s->max_b_frames + 1; j++) {
1086         int64_t rd = 0;
1087
1088         if (!s->input_picture[j])
1089             break;
1090
1091         c->error[0] = c->error[1] = c->error[2] = 0;
1092
1093         input[0].pict_type = AV_PICTURE_TYPE_I;
1094         input[0].quality   = 1 * FF_QP2LAMBDA;
1095
1096         out_size = encode_frame(c, &input[0]);
1097
1098         //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1099
1100         for (i = 0; i < s->max_b_frames + 1; i++) {
1101             int is_p = i % (j + 1) == j || i == s->max_b_frames;
1102
1103             input[i + 1].pict_type = is_p ?
1104                                      AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1105             input[i + 1].quality   = is_p ? p_lambda : b_lambda;
1106
1107             out_size = encode_frame(c, &input[i + 1]);
1108
1109             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1110         }
1111
1112         /* get the delayed frames */
1113         while (out_size) {
1114             out_size = encode_frame(c, NULL);
1115             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1116         }
1117
1118         rd += c->error[0] + c->error[1] + c->error[2];
1119
1120         if (rd < best_rd) {
1121             best_rd = rd;
1122             best_b_count = j;
1123         }
1124     }
1125
1126     avcodec_close(c);
1127     av_freep(&c);
1128
1129     for (i = 0; i < s->max_b_frames + 2; i++) {
1130         av_freep(&input[i].data[0]);
1131     }
1132
1133     return best_b_count;
1134 }
1135
1136 static int select_input_picture(MpegEncContext *s)
1137 {
1138     int i, ret;
1139
1140     for (i = 1; i < MAX_PICTURE_COUNT; i++)
1141         s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1142     s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1143
1144     /* set next picture type & ordering */
1145     if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1146         if (/*s->picture_in_gop_number >= s->gop_size ||*/
1147             s->next_picture_ptr == NULL || s->intra_only) {
1148             s->reordered_input_picture[0] = s->input_picture[0];
1149             s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1150             s->reordered_input_picture[0]->f.coded_picture_number =
1151                 s->coded_picture_number++;
1152         } else {
1153             int b_frames;
1154
1155             if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1156                 if (s->picture_in_gop_number < s->gop_size &&
1157                     skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1158                     // FIXME check that te gop check above is +-1 correct
1159                     av_frame_unref(&s->input_picture[0]->f);
1160
1161                     emms_c();
1162                     ff_vbv_update(s, 0);
1163
1164                     goto no_output_pic;
1165                 }
1166             }
1167
1168             if (s->flags & CODEC_FLAG_PASS2) {
1169                 for (i = 0; i < s->max_b_frames + 1; i++) {
1170                     int pict_num = s->input_picture[0]->f.display_picture_number + i;
1171
1172                     if (pict_num >= s->rc_context.num_entries)
1173                         break;
1174                     if (!s->input_picture[i]) {
1175                         s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1176                         break;
1177                     }
1178
1179                     s->input_picture[i]->f.pict_type =
1180                         s->rc_context.entry[pict_num].new_pict_type;
1181                 }
1182             }
1183
1184             if (s->avctx->b_frame_strategy == 0) {
1185                 b_frames = s->max_b_frames;
1186                 while (b_frames && !s->input_picture[b_frames])
1187                     b_frames--;
1188             } else if (s->avctx->b_frame_strategy == 1) {
1189                 for (i = 1; i < s->max_b_frames + 1; i++) {
1190                     if (s->input_picture[i] &&
1191                         s->input_picture[i]->b_frame_score == 0) {
1192                         s->input_picture[i]->b_frame_score =
1193                             get_intra_count(s,
1194                                             s->input_picture[i    ]->f.data[0],
1195                                             s->input_picture[i - 1]->f.data[0],
1196                                             s->linesize) + 1;
1197                     }
1198                 }
1199                 for (i = 0; i < s->max_b_frames + 1; i++) {
1200                     if (s->input_picture[i] == NULL ||
1201                         s->input_picture[i]->b_frame_score - 1 >
1202                             s->mb_num / s->avctx->b_sensitivity)
1203                         break;
1204                 }
1205
1206                 b_frames = FFMAX(0, i - 1);
1207
1208                 /* reset scores */
1209                 for (i = 0; i < b_frames + 1; i++) {
1210                     s->input_picture[i]->b_frame_score = 0;
1211                 }
1212             } else if (s->avctx->b_frame_strategy == 2) {
1213                 b_frames = estimate_best_b_count(s);
1214             } else {
1215                 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1216                 b_frames = 0;
1217             }
1218
1219             emms_c();
1220
1221             for (i = b_frames - 1; i >= 0; i--) {
1222                 int type = s->input_picture[i]->f.pict_type;
1223                 if (type && type != AV_PICTURE_TYPE_B)
1224                     b_frames = i;
1225             }
1226             if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1227                 b_frames == s->max_b_frames) {
1228                 av_log(s->avctx, AV_LOG_ERROR,
1229                        "warning, too many b frames in a row\n");
1230             }
1231
1232             if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1233                 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1234                     s->gop_size > s->picture_in_gop_number) {
1235                     b_frames = s->gop_size - s->picture_in_gop_number - 1;
1236                 } else {
1237                     if (s->flags & CODEC_FLAG_CLOSED_GOP)
1238                         b_frames = 0;
1239                     s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1240                 }
1241             }
1242
1243             if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1244                 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1245                 b_frames--;
1246
1247             s->reordered_input_picture[0] = s->input_picture[b_frames];
1248             if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1249                 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1250             s->reordered_input_picture[0]->f.coded_picture_number =
1251                 s->coded_picture_number++;
1252             for (i = 0; i < b_frames; i++) {
1253                 s->reordered_input_picture[i + 1] = s->input_picture[i];
1254                 s->reordered_input_picture[i + 1]->f.pict_type =
1255                     AV_PICTURE_TYPE_B;
1256                 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1257                     s->coded_picture_number++;
1258             }
1259         }
1260     }
1261 no_output_pic:
1262     if (s->reordered_input_picture[0]) {
1263         s->reordered_input_picture[0]->reference =
1264            s->reordered_input_picture[0]->f.pict_type !=
1265                AV_PICTURE_TYPE_B ? 3 : 0;
1266
1267         ff_mpeg_unref_picture(s, &s->new_picture);
1268         if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, s->reordered_input_picture[0])))
1269             return ret;
1270
1271         if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1272             // input is a shared pix, so we can't modifiy it -> alloc a new
1273             // one & ensure that the shared one is reuseable
1274
1275             Picture *pic;
1276             int i = ff_find_unused_picture(s, 0);
1277             if (i < 0)
1278                 return i;
1279             pic = &s->picture[i];
1280
1281             pic->reference = s->reordered_input_picture[0]->reference;
1282             if (ff_alloc_picture(s, pic, 0) < 0) {
1283                 return -1;
1284             }
1285
1286             ret = av_frame_copy_props(&pic->f, &s->reordered_input_picture[0]->f);
1287             if (ret < 0)
1288                 return ret;
1289
1290             /* mark us unused / free shared pic */
1291             av_frame_unref(&s->reordered_input_picture[0]->f);
1292             s->reordered_input_picture[0]->shared = 0;
1293
1294             s->current_picture_ptr = pic;
1295         } else {
1296             // input is not a shared pix -> reuse buffer for current_pix
1297             s->current_picture_ptr = s->reordered_input_picture[0];
1298             for (i = 0; i < 4; i++) {
1299                 s->new_picture.f.data[i] += INPLACE_OFFSET;
1300             }
1301         }
1302         ff_mpeg_unref_picture(s, &s->current_picture);
1303         if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1304                                        s->current_picture_ptr)) < 0)
1305             return ret;
1306
1307         s->picture_number = s->new_picture.f.display_picture_number;
1308     } else {
1309         ff_mpeg_unref_picture(s, &s->new_picture);
1310     }
1311     return 0;
1312 }
1313
1314 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1315                           const AVFrame *pic_arg, int *got_packet)
1316 {
1317     MpegEncContext *s = avctx->priv_data;
1318     int i, stuffing_count, ret;
1319     int context_count = s->slice_context_count;
1320
1321     s->picture_in_gop_number++;
1322
1323     if (load_input_picture(s, pic_arg) < 0)
1324         return -1;
1325
1326     if (select_input_picture(s) < 0) {
1327         return -1;
1328     }
1329
1330     /* output? */
1331     if (s->new_picture.f.data[0]) {
1332         if (!pkt->data &&
1333             (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1334             return ret;
1335         if (s->mb_info) {
1336             s->mb_info_ptr = av_packet_new_side_data(pkt,
1337                                  AV_PKT_DATA_H263_MB_INFO,
1338                                  s->mb_width*s->mb_height*12);
1339             s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1340         }
1341
1342         for (i = 0; i < context_count; i++) {
1343             int start_y = s->thread_context[i]->start_mb_y;
1344             int   end_y = s->thread_context[i]->  end_mb_y;
1345             int h       = s->mb_height;
1346             uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1347             uint8_t *end   = pkt->data + (size_t)(((int64_t) pkt->size) *   end_y / h);
1348
1349             init_put_bits(&s->thread_context[i]->pb, start, end - start);
1350         }
1351
1352         s->pict_type = s->new_picture.f.pict_type;
1353         //emms_c();
1354         ff_MPV_frame_start(s, avctx);
1355 vbv_retry:
1356         if (encode_picture(s, s->picture_number) < 0)
1357             return -1;
1358
1359         avctx->header_bits = s->header_bits;
1360         avctx->mv_bits     = s->mv_bits;
1361         avctx->misc_bits   = s->misc_bits;
1362         avctx->i_tex_bits  = s->i_tex_bits;
1363         avctx->p_tex_bits  = s->p_tex_bits;
1364         avctx->i_count     = s->i_count;
1365         // FIXME f/b_count in avctx
1366         avctx->p_count     = s->mb_num - s->i_count - s->skip_count;
1367         avctx->skip_count  = s->skip_count;
1368
1369         ff_MPV_frame_end(s);
1370
1371         if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1372             ff_mjpeg_encode_picture_trailer(s);
1373
1374         if (avctx->rc_buffer_size) {
1375             RateControlContext *rcc = &s->rc_context;
1376             int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1377
1378             if (put_bits_count(&s->pb) > max_size &&
1379                 s->lambda < s->avctx->lmax) {
1380                 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1381                                        (s->qscale + 1) / s->qscale);
1382                 if (s->adaptive_quant) {
1383                     int i;
1384                     for (i = 0; i < s->mb_height * s->mb_stride; i++)
1385                         s->lambda_table[i] =
1386                             FFMAX(s->lambda_table[i] + 1,
1387                                   s->lambda_table[i] * (s->qscale + 1) /
1388                                   s->qscale);
1389                 }
1390                 s->mb_skipped = 0;        // done in MPV_frame_start()
1391                 // done in encode_picture() so we must undo it
1392                 if (s->pict_type == AV_PICTURE_TYPE_P) {
1393                     if (s->flipflop_rounding          ||
1394                         s->codec_id == AV_CODEC_ID_H263P ||
1395                         s->codec_id == AV_CODEC_ID_MPEG4)
1396                         s->no_rounding ^= 1;
1397                 }
1398                 if (s->pict_type != AV_PICTURE_TYPE_B) {
1399                     s->time_base       = s->last_time_base;
1400                     s->last_non_b_time = s->time - s->pp_time;
1401                 }
1402                 for (i = 0; i < context_count; i++) {
1403                     PutBitContext *pb = &s->thread_context[i]->pb;
1404                     init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1405                 }
1406                 goto vbv_retry;
1407             }
1408
1409             assert(s->avctx->rc_max_rate);
1410         }
1411
1412         if (s->flags & CODEC_FLAG_PASS1)
1413             ff_write_pass1_stats(s);
1414
1415         for (i = 0; i < 4; i++) {
1416             s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1417             avctx->error[i] += s->current_picture_ptr->f.error[i];
1418         }
1419
1420         if (s->flags & CODEC_FLAG_PASS1)
1421             assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1422                    avctx->i_tex_bits + avctx->p_tex_bits ==
1423                        put_bits_count(&s->pb));
1424         flush_put_bits(&s->pb);
1425         s->frame_bits  = put_bits_count(&s->pb);
1426
1427         stuffing_count = ff_vbv_update(s, s->frame_bits);
1428         if (stuffing_count) {
1429             if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1430                     stuffing_count + 50) {
1431                 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1432                 return -1;
1433             }
1434
1435             switch (s->codec_id) {
1436             case AV_CODEC_ID_MPEG1VIDEO:
1437             case AV_CODEC_ID_MPEG2VIDEO:
1438                 while (stuffing_count--) {
1439                     put_bits(&s->pb, 8, 0);
1440                 }
1441             break;
1442             case AV_CODEC_ID_MPEG4:
1443                 put_bits(&s->pb, 16, 0);
1444                 put_bits(&s->pb, 16, 0x1C3);
1445                 stuffing_count -= 4;
1446                 while (stuffing_count--) {
1447                     put_bits(&s->pb, 8, 0xFF);
1448                 }
1449             break;
1450             default:
1451                 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1452             }
1453             flush_put_bits(&s->pb);
1454             s->frame_bits  = put_bits_count(&s->pb);
1455         }
1456
1457         /* update mpeg1/2 vbv_delay for CBR */
1458         if (s->avctx->rc_max_rate                          &&
1459             s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1460             s->out_format == FMT_MPEG1                     &&
1461             90000LL * (avctx->rc_buffer_size - 1) <=
1462                 s->avctx->rc_max_rate * 0xFFFFLL) {
1463             int vbv_delay, min_delay;
1464             double inbits  = s->avctx->rc_max_rate *
1465                              av_q2d(s->avctx->time_base);
1466             int    minbits = s->frame_bits - 8 *
1467                              (s->vbv_delay_ptr - s->pb.buf - 1);
1468             double bits    = s->rc_context.buffer_index + minbits - inbits;
1469
1470             if (bits < 0)
1471                 av_log(s->avctx, AV_LOG_ERROR,
1472                        "Internal error, negative bits\n");
1473
1474             assert(s->repeat_first_field == 0);
1475
1476             vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1477             min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1478                         s->avctx->rc_max_rate;
1479
1480             vbv_delay = FFMAX(vbv_delay, min_delay);
1481
1482             assert(vbv_delay < 0xFFFF);
1483
1484             s->vbv_delay_ptr[0] &= 0xF8;
1485             s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1486             s->vbv_delay_ptr[1]  = vbv_delay >> 5;
1487             s->vbv_delay_ptr[2] &= 0x07;
1488             s->vbv_delay_ptr[2] |= vbv_delay << 3;
1489             avctx->vbv_delay     = vbv_delay * 300;
1490         }
1491         s->total_bits     += s->frame_bits;
1492         avctx->frame_bits  = s->frame_bits;
1493
1494         pkt->pts = s->current_picture.f.pts;
1495         if (!s->low_delay) {
1496             if (!s->current_picture.f.coded_picture_number)
1497                 pkt->dts = pkt->pts - s->dts_delta;
1498             else
1499                 pkt->dts = s->reordered_pts;
1500             s->reordered_pts = s->input_picture[0]->f.pts;
1501         } else
1502             pkt->dts = pkt->pts;
1503         if (s->current_picture.f.key_frame)
1504             pkt->flags |= AV_PKT_FLAG_KEY;
1505         if (s->mb_info)
1506             av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1507     } else {
1508         s->frame_bits = 0;
1509     }
1510     assert((s->frame_bits & 7) == 0);
1511
1512     pkt->size = s->frame_bits / 8;
1513     *got_packet = !!pkt->size;
1514     return 0;
1515 }
1516
1517 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1518                                                 int n, int threshold)
1519 {
1520     static const char tab[64] = {
1521         3, 2, 2, 1, 1, 1, 1, 1,
1522         1, 1, 1, 1, 1, 1, 1, 1,
1523         1, 1, 1, 1, 1, 1, 1, 1,
1524         0, 0, 0, 0, 0, 0, 0, 0,
1525         0, 0, 0, 0, 0, 0, 0, 0,
1526         0, 0, 0, 0, 0, 0, 0, 0,
1527         0, 0, 0, 0, 0, 0, 0, 0,
1528         0, 0, 0, 0, 0, 0, 0, 0
1529     };
1530     int score = 0;
1531     int run = 0;
1532     int i;
1533     int16_t *block = s->block[n];
1534     const int last_index = s->block_last_index[n];
1535     int skip_dc;
1536
1537     if (threshold < 0) {
1538         skip_dc = 0;
1539         threshold = -threshold;
1540     } else
1541         skip_dc = 1;
1542
1543     /* Are all we could set to zero already zero? */
1544     if (last_index <= skip_dc - 1)
1545         return;
1546
1547     for (i = 0; i <= last_index; i++) {
1548         const int j = s->intra_scantable.permutated[i];
1549         const int level = FFABS(block[j]);
1550         if (level == 1) {
1551             if (skip_dc && i == 0)
1552                 continue;
1553             score += tab[run];
1554             run = 0;
1555         } else if (level > 1) {
1556             return;
1557         } else {
1558             run++;
1559         }
1560     }
1561     if (score >= threshold)
1562         return;
1563     for (i = skip_dc; i <= last_index; i++) {
1564         const int j = s->intra_scantable.permutated[i];
1565         block[j] = 0;
1566     }
1567     if (block[0])
1568         s->block_last_index[n] = 0;
1569     else
1570         s->block_last_index[n] = -1;
1571 }
1572
1573 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1574                                int last_index)
1575 {
1576     int i;
1577     const int maxlevel = s->max_qcoeff;
1578     const int minlevel = s->min_qcoeff;
1579     int overflow = 0;
1580
1581     if (s->mb_intra) {
1582         i = 1; // skip clipping of intra dc
1583     } else
1584         i = 0;
1585
1586     for (; i <= last_index; i++) {
1587         const int j = s->intra_scantable.permutated[i];
1588         int level = block[j];
1589
1590         if (level > maxlevel) {
1591             level = maxlevel;
1592             overflow++;
1593         } else if (level < minlevel) {
1594             level = minlevel;
1595             overflow++;
1596         }
1597
1598         block[j] = level;
1599     }
1600
1601     if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1602         av_log(s->avctx, AV_LOG_INFO,
1603                "warning, clipping %d dct coefficients to %d..%d\n",
1604                overflow, minlevel, maxlevel);
1605 }
1606
1607 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1608 {
1609     int x, y;
1610     // FIXME optimize
1611     for (y = 0; y < 8; y++) {
1612         for (x = 0; x < 8; x++) {
1613             int x2, y2;
1614             int sum = 0;
1615             int sqr = 0;
1616             int count = 0;
1617
1618             for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1619                 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1620                     int v = ptr[x2 + y2 * stride];
1621                     sum += v;
1622                     sqr += v * v;
1623                     count++;
1624                 }
1625             }
1626             weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1627         }
1628     }
1629 }
1630
1631 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1632                                                 int motion_x, int motion_y,
1633                                                 int mb_block_height,
1634                                                 int mb_block_count)
1635 {
1636     int16_t weight[8][64];
1637     int16_t orig[8][64];
1638     const int mb_x = s->mb_x;
1639     const int mb_y = s->mb_y;
1640     int i;
1641     int skip_dct[8];
1642     int dct_offset = s->linesize * 8; // default for progressive frames
1643     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1644     int wrap_y, wrap_c;
1645
1646     for (i = 0; i < mb_block_count; i++)
1647         skip_dct[i] = s->skipdct;
1648
1649     if (s->adaptive_quant) {
1650         const int last_qp = s->qscale;
1651         const int mb_xy = mb_x + mb_y * s->mb_stride;
1652
1653         s->lambda = s->lambda_table[mb_xy];
1654         update_qscale(s);
1655
1656         if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1657             s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
1658             s->dquant = s->qscale - last_qp;
1659
1660             if (s->out_format == FMT_H263) {
1661                 s->dquant = av_clip(s->dquant, -2, 2);
1662
1663                 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1664                     if (!s->mb_intra) {
1665                         if (s->pict_type == AV_PICTURE_TYPE_B) {
1666                             if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1667                                 s->dquant = 0;
1668                         }
1669                         if (s->mv_type == MV_TYPE_8X8)
1670                             s->dquant = 0;
1671                     }
1672                 }
1673             }
1674         }
1675         ff_set_qscale(s, last_qp + s->dquant);
1676     } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1677         ff_set_qscale(s, s->qscale + s->dquant);
1678
1679     wrap_y = s->linesize;
1680     wrap_c = s->uvlinesize;
1681     ptr_y  = s->new_picture.f.data[0] +
1682              (mb_y * 16 * wrap_y)              + mb_x * 16;
1683     ptr_cb = s->new_picture.f.data[1] +
1684              (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1685     ptr_cr = s->new_picture.f.data[2] +
1686              (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1687
1688     if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1689         uint8_t *ebuf = s->edge_emu_buffer + 32;
1690         s->vdsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
1691                                  mb_y * 16, s->width, s->height);
1692         ptr_y = ebuf;
1693         s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
1694                                  mb_block_height, mb_x * 8, mb_y * 8,
1695                                  s->width >> 1, s->height >> 1);
1696         ptr_cb = ebuf + 18 * wrap_y;
1697         s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
1698                                  mb_block_height, mb_x * 8, mb_y * 8,
1699                                  s->width >> 1, s->height >> 1);
1700         ptr_cr = ebuf + 18 * wrap_y + 8;
1701     }
1702
1703     if (s->mb_intra) {
1704         if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1705             int progressive_score, interlaced_score;
1706
1707             s->interlaced_dct = 0;
1708             progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1709                                                     NULL, wrap_y, 8) +
1710                                 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1711                                                     NULL, wrap_y, 8) - 400;
1712
1713             if (progressive_score > 0) {
1714                 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1715                                                        NULL, wrap_y * 2, 8) +
1716                                    s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1717                                                        NULL, wrap_y * 2, 8);
1718                 if (progressive_score > interlaced_score) {
1719                     s->interlaced_dct = 1;
1720
1721                     dct_offset = wrap_y;
1722                     wrap_y <<= 1;
1723                     if (s->chroma_format == CHROMA_422)
1724                         wrap_c <<= 1;
1725                 }
1726             }
1727         }
1728
1729         s->dsp.get_pixels(s->block[0], ptr_y                  , wrap_y);
1730         s->dsp.get_pixels(s->block[1], ptr_y              + 8 , wrap_y);
1731         s->dsp.get_pixels(s->block[2], ptr_y + dct_offset     , wrap_y);
1732         s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1733
1734         if (s->flags & CODEC_FLAG_GRAY) {
1735             skip_dct[4] = 1;
1736             skip_dct[5] = 1;
1737         } else {
1738             s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1739             s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1740             if (!s->chroma_y_shift) { /* 422 */
1741                 s->dsp.get_pixels(s->block[6],
1742                                   ptr_cb + (dct_offset >> 1), wrap_c);
1743                 s->dsp.get_pixels(s->block[7],
1744                                   ptr_cr + (dct_offset >> 1), wrap_c);
1745             }
1746         }
1747     } else {
1748         op_pixels_func (*op_pix)[4];
1749         qpel_mc_func (*op_qpix)[16];
1750         uint8_t *dest_y, *dest_cb, *dest_cr;
1751
1752         dest_y  = s->dest[0];
1753         dest_cb = s->dest[1];
1754         dest_cr = s->dest[2];
1755
1756         if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1757             op_pix  = s->hdsp.put_pixels_tab;
1758             op_qpix = s->dsp.put_qpel_pixels_tab;
1759         } else {
1760             op_pix  = s->hdsp.put_no_rnd_pixels_tab;
1761             op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1762         }
1763
1764         if (s->mv_dir & MV_DIR_FORWARD) {
1765             ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1766                           s->last_picture.f.data,
1767                           op_pix, op_qpix);
1768             op_pix  = s->hdsp.avg_pixels_tab;
1769             op_qpix = s->dsp.avg_qpel_pixels_tab;
1770         }
1771         if (s->mv_dir & MV_DIR_BACKWARD) {
1772             ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1773                           s->next_picture.f.data,
1774                           op_pix, op_qpix);
1775         }
1776
1777         if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1778             int progressive_score, interlaced_score;
1779
1780             s->interlaced_dct = 0;
1781             progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1782                                                     ptr_y,              wrap_y,
1783                                                     8) +
1784                                 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1785                                                     ptr_y + wrap_y * 8, wrap_y,
1786                                                     8) - 400;
1787
1788             if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1789                 progressive_score -= 400;
1790
1791             if (progressive_score > 0) {
1792                 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1793                                                        ptr_y,
1794                                                        wrap_y * 2, 8) +
1795                                    s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1796                                                        ptr_y + wrap_y,
1797                                                        wrap_y * 2, 8);
1798
1799                 if (progressive_score > interlaced_score) {
1800                     s->interlaced_dct = 1;
1801
1802                     dct_offset = wrap_y;
1803                     wrap_y <<= 1;
1804                     if (s->chroma_format == CHROMA_422)
1805                         wrap_c <<= 1;
1806                 }
1807             }
1808         }
1809
1810         s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1811         s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1812         s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1813                            dest_y + dct_offset, wrap_y);
1814         s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1815                            dest_y + dct_offset + 8, wrap_y);
1816
1817         if (s->flags & CODEC_FLAG_GRAY) {
1818             skip_dct[4] = 1;
1819             skip_dct[5] = 1;
1820         } else {
1821             s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1822             s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1823             if (!s->chroma_y_shift) { /* 422 */
1824                 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
1825                                    dest_cb + (dct_offset >> 1), wrap_c);
1826                 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
1827                                    dest_cr + (dct_offset >> 1), wrap_c);
1828             }
1829         }
1830         /* pre quantization */
1831         if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1832                 2 * s->qscale * s->qscale) {
1833             // FIXME optimize
1834             if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1835                               wrap_y, 8) < 20 * s->qscale)
1836                 skip_dct[0] = 1;
1837             if (s->dsp.sad[1](NULL, ptr_y + 8,
1838                               dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1839                 skip_dct[1] = 1;
1840             if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1841                               dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1842                 skip_dct[2] = 1;
1843             if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1844                               dest_y + dct_offset + 8,
1845                               wrap_y, 8) < 20 * s->qscale)
1846                 skip_dct[3] = 1;
1847             if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1848                               wrap_c, 8) < 20 * s->qscale)
1849                 skip_dct[4] = 1;
1850             if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1851                               wrap_c, 8) < 20 * s->qscale)
1852                 skip_dct[5] = 1;
1853             if (!s->chroma_y_shift) { /* 422 */
1854                 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
1855                                   dest_cb + (dct_offset >> 1),
1856                                   wrap_c, 8) < 20 * s->qscale)
1857                     skip_dct[6] = 1;
1858                 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
1859                                   dest_cr + (dct_offset >> 1),
1860                                   wrap_c, 8) < 20 * s->qscale)
1861                     skip_dct[7] = 1;
1862             }
1863         }
1864     }
1865
1866     if (s->quantizer_noise_shaping) {
1867         if (!skip_dct[0])
1868             get_visual_weight(weight[0], ptr_y                 , wrap_y);
1869         if (!skip_dct[1])
1870             get_visual_weight(weight[1], ptr_y              + 8, wrap_y);
1871         if (!skip_dct[2])
1872             get_visual_weight(weight[2], ptr_y + dct_offset    , wrap_y);
1873         if (!skip_dct[3])
1874             get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1875         if (!skip_dct[4])
1876             get_visual_weight(weight[4], ptr_cb                , wrap_c);
1877         if (!skip_dct[5])
1878             get_visual_weight(weight[5], ptr_cr                , wrap_c);
1879         if (!s->chroma_y_shift) { /* 422 */
1880             if (!skip_dct[6])
1881                 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
1882                                   wrap_c);
1883             if (!skip_dct[7])
1884                 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
1885                                   wrap_c);
1886         }
1887         memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
1888     }
1889
1890     /* DCT & quantize */
1891     assert(s->out_format != FMT_MJPEG || s->qscale == 8);
1892     {
1893         for (i = 0; i < mb_block_count; i++) {
1894             if (!skip_dct[i]) {
1895                 int overflow;
1896                 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1897                 // FIXME we could decide to change to quantizer instead of
1898                 // clipping
1899                 // JS: I don't think that would be a good idea it could lower
1900                 //     quality instead of improve it. Just INTRADC clipping
1901                 //     deserves changes in quantizer
1902                 if (overflow)
1903                     clip_coeffs(s, s->block[i], s->block_last_index[i]);
1904             } else
1905                 s->block_last_index[i] = -1;
1906         }
1907         if (s->quantizer_noise_shaping) {
1908             for (i = 0; i < mb_block_count; i++) {
1909                 if (!skip_dct[i]) {
1910                     s->block_last_index[i] =
1911                         dct_quantize_refine(s, s->block[i], weight[i],
1912                                             orig[i], i, s->qscale);
1913                 }
1914             }
1915         }
1916
1917         if (s->luma_elim_threshold && !s->mb_intra)
1918             for (i = 0; i < 4; i++)
1919                 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
1920         if (s->chroma_elim_threshold && !s->mb_intra)
1921             for (i = 4; i < mb_block_count; i++)
1922                 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
1923
1924         if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
1925             for (i = 0; i < mb_block_count; i++) {
1926                 if (s->block_last_index[i] == -1)
1927                     s->coded_score[i] = INT_MAX / 256;
1928             }
1929         }
1930     }
1931
1932     if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
1933         s->block_last_index[4] =
1934         s->block_last_index[5] = 0;
1935         s->block[4][0] =
1936         s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
1937     }
1938
1939     // non c quantize code returns incorrect block_last_index FIXME
1940     if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
1941         for (i = 0; i < mb_block_count; i++) {
1942             int j;
1943             if (s->block_last_index[i] > 0) {
1944                 for (j = 63; j > 0; j--) {
1945                     if (s->block[i][s->intra_scantable.permutated[j]])
1946                         break;
1947                 }
1948                 s->block_last_index[i] = j;
1949             }
1950         }
1951     }
1952
1953     /* huffman encode */
1954     switch(s->codec_id){ //FIXME funct ptr could be slightly faster
1955     case AV_CODEC_ID_MPEG1VIDEO:
1956     case AV_CODEC_ID_MPEG2VIDEO:
1957         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1958             ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
1959         break;
1960     case AV_CODEC_ID_MPEG4:
1961         if (CONFIG_MPEG4_ENCODER)
1962             ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
1963         break;
1964     case AV_CODEC_ID_MSMPEG4V2:
1965     case AV_CODEC_ID_MSMPEG4V3:
1966     case AV_CODEC_ID_WMV1:
1967         if (CONFIG_MSMPEG4_ENCODER)
1968             ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
1969         break;
1970     case AV_CODEC_ID_WMV2:
1971         if (CONFIG_WMV2_ENCODER)
1972             ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
1973         break;
1974     case AV_CODEC_ID_H261:
1975         if (CONFIG_H261_ENCODER)
1976             ff_h261_encode_mb(s, s->block, motion_x, motion_y);
1977         break;
1978     case AV_CODEC_ID_H263:
1979     case AV_CODEC_ID_H263P:
1980     case AV_CODEC_ID_FLV1:
1981     case AV_CODEC_ID_RV10:
1982     case AV_CODEC_ID_RV20:
1983         if (CONFIG_H263_ENCODER)
1984             ff_h263_encode_mb(s, s->block, motion_x, motion_y);
1985         break;
1986     case AV_CODEC_ID_MJPEG:
1987         if (CONFIG_MJPEG_ENCODER)
1988             ff_mjpeg_encode_mb(s, s->block);
1989         break;
1990     default:
1991         assert(0);
1992     }
1993 }
1994
1995 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
1996 {
1997     if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y,  8, 6);
1998     else                                encode_mb_internal(s, motion_x, motion_y, 16, 8);
1999 }
2000
2001 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2002     int i;
2003
2004     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2005
2006     /* mpeg1 */
2007     d->mb_skip_run= s->mb_skip_run;
2008     for(i=0; i<3; i++)
2009         d->last_dc[i] = s->last_dc[i];
2010
2011     /* statistics */
2012     d->mv_bits= s->mv_bits;
2013     d->i_tex_bits= s->i_tex_bits;
2014     d->p_tex_bits= s->p_tex_bits;
2015     d->i_count= s->i_count;
2016     d->f_count= s->f_count;
2017     d->b_count= s->b_count;
2018     d->skip_count= s->skip_count;
2019     d->misc_bits= s->misc_bits;
2020     d->last_bits= 0;
2021
2022     d->mb_skipped= 0;
2023     d->qscale= s->qscale;
2024     d->dquant= s->dquant;
2025
2026     d->esc3_level_length= s->esc3_level_length;
2027 }
2028
2029 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2030     int i;
2031
2032     memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2033     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2034
2035     /* mpeg1 */
2036     d->mb_skip_run= s->mb_skip_run;
2037     for(i=0; i<3; i++)
2038         d->last_dc[i] = s->last_dc[i];
2039
2040     /* statistics */
2041     d->mv_bits= s->mv_bits;
2042     d->i_tex_bits= s->i_tex_bits;
2043     d->p_tex_bits= s->p_tex_bits;
2044     d->i_count= s->i_count;
2045     d->f_count= s->f_count;
2046     d->b_count= s->b_count;
2047     d->skip_count= s->skip_count;
2048     d->misc_bits= s->misc_bits;
2049
2050     d->mb_intra= s->mb_intra;
2051     d->mb_skipped= s->mb_skipped;
2052     d->mv_type= s->mv_type;
2053     d->mv_dir= s->mv_dir;
2054     d->pb= s->pb;
2055     if(s->data_partitioning){
2056         d->pb2= s->pb2;
2057         d->tex_pb= s->tex_pb;
2058     }
2059     d->block= s->block;
2060     for(i=0; i<8; i++)
2061         d->block_last_index[i]= s->block_last_index[i];
2062     d->interlaced_dct= s->interlaced_dct;
2063     d->qscale= s->qscale;
2064
2065     d->esc3_level_length= s->esc3_level_length;
2066 }
2067
2068 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2069                            PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2070                            int *dmin, int *next_block, int motion_x, int motion_y)
2071 {
2072     int score;
2073     uint8_t *dest_backup[3];
2074
2075     copy_context_before_encode(s, backup, type);
2076
2077     s->block= s->blocks[*next_block];
2078     s->pb= pb[*next_block];
2079     if(s->data_partitioning){
2080         s->pb2   = pb2   [*next_block];
2081         s->tex_pb= tex_pb[*next_block];
2082     }
2083
2084     if(*next_block){
2085         memcpy(dest_backup, s->dest, sizeof(s->dest));
2086         s->dest[0] = s->rd_scratchpad;
2087         s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2088         s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2089         assert(s->linesize >= 32); //FIXME
2090     }
2091
2092     encode_mb(s, motion_x, motion_y);
2093
2094     score= put_bits_count(&s->pb);
2095     if(s->data_partitioning){
2096         score+= put_bits_count(&s->pb2);
2097         score+= put_bits_count(&s->tex_pb);
2098     }
2099
2100     if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2101         ff_MPV_decode_mb(s, s->block);
2102
2103         score *= s->lambda2;
2104         score += sse_mb(s) << FF_LAMBDA_SHIFT;
2105     }
2106
2107     if(*next_block){
2108         memcpy(s->dest, dest_backup, sizeof(s->dest));
2109     }
2110
2111     if(score<*dmin){
2112         *dmin= score;
2113         *next_block^=1;
2114
2115         copy_context_after_encode(best, s, type);
2116     }
2117 }
2118
2119 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2120     uint32_t *sq = ff_squareTbl + 256;
2121     int acc=0;
2122     int x,y;
2123
2124     if(w==16 && h==16)
2125         return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2126     else if(w==8 && h==8)
2127         return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2128
2129     for(y=0; y<h; y++){
2130         for(x=0; x<w; x++){
2131             acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2132         }
2133     }
2134
2135     assert(acc>=0);
2136
2137     return acc;
2138 }
2139
2140 static int sse_mb(MpegEncContext *s){
2141     int w= 16;
2142     int h= 16;
2143
2144     if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2145     if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2146
2147     if(w==16 && h==16)
2148       if(s->avctx->mb_cmp == FF_CMP_NSSE){
2149         return  s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2150                +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2151                +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2152       }else{
2153         return  s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2154                +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2155                +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2156       }
2157     else
2158         return  sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2159                +sse(s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2160                +sse(s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2161 }
2162
2163 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2164     MpegEncContext *s= *(void**)arg;
2165
2166
2167     s->me.pre_pass=1;
2168     s->me.dia_size= s->avctx->pre_dia_size;
2169     s->first_slice_line=1;
2170     for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2171         for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2172             ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2173         }
2174         s->first_slice_line=0;
2175     }
2176
2177     s->me.pre_pass=0;
2178
2179     return 0;
2180 }
2181
2182 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2183     MpegEncContext *s= *(void**)arg;
2184
2185     ff_check_alignment();
2186
2187     s->me.dia_size= s->avctx->dia_size;
2188     s->first_slice_line=1;
2189     for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2190         s->mb_x=0; //for block init below
2191         ff_init_block_index(s);
2192         for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2193             s->block_index[0]+=2;
2194             s->block_index[1]+=2;
2195             s->block_index[2]+=2;
2196             s->block_index[3]+=2;
2197
2198             /* compute motion vector & mb_type and store in context */
2199             if(s->pict_type==AV_PICTURE_TYPE_B)
2200                 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2201             else
2202                 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2203         }
2204         s->first_slice_line=0;
2205     }
2206     return 0;
2207 }
2208
2209 static int mb_var_thread(AVCodecContext *c, void *arg){
2210     MpegEncContext *s= *(void**)arg;
2211     int mb_x, mb_y;
2212
2213     ff_check_alignment();
2214
2215     for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2216         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2217             int xx = mb_x * 16;
2218             int yy = mb_y * 16;
2219             uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2220             int varc;
2221             int sum = s->dsp.pix_sum(pix, s->linesize);
2222
2223             varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2224
2225             s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2226             s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2227             s->me.mb_var_sum_temp    += varc;
2228         }
2229     }
2230     return 0;
2231 }
2232
2233 static void write_slice_end(MpegEncContext *s){
2234     if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2235         if(s->partitioned_frame){
2236             ff_mpeg4_merge_partitions(s);
2237         }
2238
2239         ff_mpeg4_stuffing(&s->pb);
2240     }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2241         ff_mjpeg_encode_stuffing(&s->pb);
2242     }
2243
2244     avpriv_align_put_bits(&s->pb);
2245     flush_put_bits(&s->pb);
2246
2247     if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2248         s->misc_bits+= get_bits_diff(s);
2249 }
2250
2251 static void write_mb_info(MpegEncContext *s)
2252 {
2253     uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2254     int offset = put_bits_count(&s->pb);
2255     int mba  = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2256     int gobn = s->mb_y / s->gob_index;
2257     int pred_x, pred_y;
2258     if (CONFIG_H263_ENCODER)
2259         ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2260     bytestream_put_le32(&ptr, offset);
2261     bytestream_put_byte(&ptr, s->qscale);
2262     bytestream_put_byte(&ptr, gobn);
2263     bytestream_put_le16(&ptr, mba);
2264     bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2265     bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2266     /* 4MV not implemented */
2267     bytestream_put_byte(&ptr, 0); /* hmv2 */
2268     bytestream_put_byte(&ptr, 0); /* vmv2 */
2269 }
2270
2271 static void update_mb_info(MpegEncContext *s, int startcode)
2272 {
2273     if (!s->mb_info)
2274         return;
2275     if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2276         s->mb_info_size += 12;
2277         s->prev_mb_info = s->last_mb_info;
2278     }
2279     if (startcode) {
2280         s->prev_mb_info = put_bits_count(&s->pb)/8;
2281         /* This might have incremented mb_info_size above, and we return without
2282          * actually writing any info into that slot yet. But in that case,
2283          * this will be called again at the start of the after writing the
2284          * start code, actually writing the mb info. */
2285         return;
2286     }
2287
2288     s->last_mb_info = put_bits_count(&s->pb)/8;
2289     if (!s->mb_info_size)
2290         s->mb_info_size += 12;
2291     write_mb_info(s);
2292 }
2293
2294 static int encode_thread(AVCodecContext *c, void *arg){
2295     MpegEncContext *s= *(void**)arg;
2296     int mb_x, mb_y, pdif = 0;
2297     int chr_h= 16>>s->chroma_y_shift;
2298     int i, j;
2299     MpegEncContext best_s, backup_s;
2300     uint8_t bit_buf[2][MAX_MB_BYTES];
2301     uint8_t bit_buf2[2][MAX_MB_BYTES];
2302     uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2303     PutBitContext pb[2], pb2[2], tex_pb[2];
2304
2305     ff_check_alignment();
2306
2307     for(i=0; i<2; i++){
2308         init_put_bits(&pb    [i], bit_buf    [i], MAX_MB_BYTES);
2309         init_put_bits(&pb2   [i], bit_buf2   [i], MAX_MB_BYTES);
2310         init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2311     }
2312
2313     s->last_bits= put_bits_count(&s->pb);
2314     s->mv_bits=0;
2315     s->misc_bits=0;
2316     s->i_tex_bits=0;
2317     s->p_tex_bits=0;
2318     s->i_count=0;
2319     s->f_count=0;
2320     s->b_count=0;
2321     s->skip_count=0;
2322
2323     for(i=0; i<3; i++){
2324         /* init last dc values */
2325         /* note: quant matrix value (8) is implied here */
2326         s->last_dc[i] = 128 << s->intra_dc_precision;
2327
2328         s->current_picture.f.error[i] = 0;
2329     }
2330     s->mb_skip_run = 0;
2331     memset(s->last_mv, 0, sizeof(s->last_mv));
2332
2333     s->last_mv_dir = 0;
2334
2335     switch(s->codec_id){
2336     case AV_CODEC_ID_H263:
2337     case AV_CODEC_ID_H263P:
2338     case AV_CODEC_ID_FLV1:
2339         if (CONFIG_H263_ENCODER)
2340             s->gob_index = ff_h263_get_gob_height(s);
2341         break;
2342     case AV_CODEC_ID_MPEG4:
2343         if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2344             ff_mpeg4_init_partitions(s);
2345         break;
2346     }
2347
2348     s->resync_mb_x=0;
2349     s->resync_mb_y=0;
2350     s->first_slice_line = 1;
2351     s->ptr_lastgob = s->pb.buf;
2352     for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2353         s->mb_x=0;
2354         s->mb_y= mb_y;
2355
2356         ff_set_qscale(s, s->qscale);
2357         ff_init_block_index(s);
2358
2359         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2360             int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2361             int mb_type= s->mb_type[xy];
2362 //            int d;
2363             int dmin= INT_MAX;
2364             int dir;
2365
2366             if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2367                 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2368                 return -1;
2369             }
2370             if(s->data_partitioning){
2371                 if(   s->pb2   .buf_end - s->pb2   .buf - (put_bits_count(&s->    pb2)>>3) < MAX_MB_BYTES
2372                    || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2373                     av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2374                     return -1;
2375                 }
2376             }
2377
2378             s->mb_x = mb_x;
2379             s->mb_y = mb_y;  // moved into loop, can get changed by H.261
2380             ff_update_block_index(s);
2381
2382             if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2383                 ff_h261_reorder_mb_index(s);
2384                 xy= s->mb_y*s->mb_stride + s->mb_x;
2385                 mb_type= s->mb_type[xy];
2386             }
2387
2388             /* write gob / video packet header  */
2389             if(s->rtp_mode){
2390                 int current_packet_size, is_gob_start;
2391
2392                 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2393
2394                 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2395
2396                 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2397
2398                 switch(s->codec_id){
2399                 case AV_CODEC_ID_H263:
2400                 case AV_CODEC_ID_H263P:
2401                     if(!s->h263_slice_structured)
2402                         if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2403                     break;
2404                 case AV_CODEC_ID_MPEG2VIDEO:
2405                     if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2406                 case AV_CODEC_ID_MPEG1VIDEO:
2407                     if(s->mb_skip_run) is_gob_start=0;
2408                     break;
2409                 }
2410
2411                 if(is_gob_start){
2412                     if(s->start_mb_y != mb_y || mb_x!=0){
2413                         write_slice_end(s);
2414
2415                         if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2416                             ff_mpeg4_init_partitions(s);
2417                         }
2418                     }
2419
2420                     assert((put_bits_count(&s->pb)&7) == 0);
2421                     current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2422
2423                     if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
2424                         int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2425                         int d= 100 / s->avctx->error_rate;
2426                         if(r % d == 0){
2427                             current_packet_size=0;
2428                             s->pb.buf_ptr= s->ptr_lastgob;
2429                             assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2430                         }
2431                     }
2432
2433                     if (s->avctx->rtp_callback){
2434                         int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2435                         s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2436                     }
2437                     update_mb_info(s, 1);
2438
2439                     switch(s->codec_id){
2440                     case AV_CODEC_ID_MPEG4:
2441                         if (CONFIG_MPEG4_ENCODER) {
2442                             ff_mpeg4_encode_video_packet_header(s);
2443                             ff_mpeg4_clean_buffers(s);
2444                         }
2445                     break;
2446                     case AV_CODEC_ID_MPEG1VIDEO:
2447                     case AV_CODEC_ID_MPEG2VIDEO:
2448                         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2449                             ff_mpeg1_encode_slice_header(s);
2450                             ff_mpeg1_clean_buffers(s);
2451                         }
2452                     break;
2453                     case AV_CODEC_ID_H263:
2454                     case AV_CODEC_ID_H263P:
2455                         if (CONFIG_H263_ENCODER)
2456                             ff_h263_encode_gob_header(s, mb_y);
2457                     break;
2458                     }
2459
2460                     if(s->flags&CODEC_FLAG_PASS1){
2461                         int bits= put_bits_count(&s->pb);
2462                         s->misc_bits+= bits - s->last_bits;
2463                         s->last_bits= bits;
2464                     }
2465
2466                     s->ptr_lastgob += current_packet_size;
2467                     s->first_slice_line=1;
2468                     s->resync_mb_x=mb_x;
2469                     s->resync_mb_y=mb_y;
2470                 }
2471             }
2472
2473             if(  (s->resync_mb_x   == s->mb_x)
2474                && s->resync_mb_y+1 == s->mb_y){
2475                 s->first_slice_line=0;
2476             }
2477
2478             s->mb_skipped=0;
2479             s->dquant=0; //only for QP_RD
2480
2481             update_mb_info(s, 0);
2482
2483             if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2484                 int next_block=0;
2485                 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2486
2487                 copy_context_before_encode(&backup_s, s, -1);
2488                 backup_s.pb= s->pb;
2489                 best_s.data_partitioning= s->data_partitioning;
2490                 best_s.partitioned_frame= s->partitioned_frame;
2491                 if(s->data_partitioning){
2492                     backup_s.pb2= s->pb2;
2493                     backup_s.tex_pb= s->tex_pb;
2494                 }
2495
2496                 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2497                     s->mv_dir = MV_DIR_FORWARD;
2498                     s->mv_type = MV_TYPE_16X16;
2499                     s->mb_intra= 0;
2500                     s->mv[0][0][0] = s->p_mv_table[xy][0];
2501                     s->mv[0][0][1] = s->p_mv_table[xy][1];
2502                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2503                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2504                 }
2505                 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2506                     s->mv_dir = MV_DIR_FORWARD;
2507                     s->mv_type = MV_TYPE_FIELD;
2508                     s->mb_intra= 0;
2509                     for(i=0; i<2; i++){
2510                         j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2511                         s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2512                         s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2513                     }
2514                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2515                                  &dmin, &next_block, 0, 0);
2516                 }
2517                 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2518                     s->mv_dir = MV_DIR_FORWARD;
2519                     s->mv_type = MV_TYPE_16X16;
2520                     s->mb_intra= 0;
2521                     s->mv[0][0][0] = 0;
2522                     s->mv[0][0][1] = 0;
2523                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2524                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2525                 }
2526                 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2527                     s->mv_dir = MV_DIR_FORWARD;
2528                     s->mv_type = MV_TYPE_8X8;
2529                     s->mb_intra= 0;
2530                     for(i=0; i<4; i++){
2531                         s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2532                         s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2533                     }
2534                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2535                                  &dmin, &next_block, 0, 0);
2536                 }
2537                 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2538                     s->mv_dir = MV_DIR_FORWARD;
2539                     s->mv_type = MV_TYPE_16X16;
2540                     s->mb_intra= 0;
2541                     s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2542                     s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2543                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2544                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2545                 }
2546                 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2547                     s->mv_dir = MV_DIR_BACKWARD;
2548                     s->mv_type = MV_TYPE_16X16;
2549                     s->mb_intra= 0;
2550                     s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2551                     s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2552                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2553                                  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2554                 }
2555                 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2556                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2557                     s->mv_type = MV_TYPE_16X16;
2558                     s->mb_intra= 0;
2559                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2560                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2561                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2562                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2563                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2564                                  &dmin, &next_block, 0, 0);
2565                 }
2566                 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2567                     s->mv_dir = MV_DIR_FORWARD;
2568                     s->mv_type = MV_TYPE_FIELD;
2569                     s->mb_intra= 0;
2570                     for(i=0; i<2; i++){
2571                         j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2572                         s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2573                         s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2574                     }
2575                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2576                                  &dmin, &next_block, 0, 0);
2577                 }
2578                 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2579                     s->mv_dir = MV_DIR_BACKWARD;
2580                     s->mv_type = MV_TYPE_FIELD;
2581                     s->mb_intra= 0;
2582                     for(i=0; i<2; i++){
2583                         j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2584                         s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2585                         s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2586                     }
2587                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2588                                  &dmin, &next_block, 0, 0);
2589                 }
2590                 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2591                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2592                     s->mv_type = MV_TYPE_FIELD;
2593                     s->mb_intra= 0;
2594                     for(dir=0; dir<2; dir++){
2595                         for(i=0; i<2; i++){
2596                             j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2597                             s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2598                             s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2599                         }
2600                     }
2601                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2602                                  &dmin, &next_block, 0, 0);
2603                 }
2604                 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2605                     s->mv_dir = 0;
2606                     s->mv_type = MV_TYPE_16X16;
2607                     s->mb_intra= 1;
2608                     s->mv[0][0][0] = 0;
2609                     s->mv[0][0][1] = 0;
2610                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2611                                  &dmin, &next_block, 0, 0);
2612                     if(s->h263_pred || s->h263_aic){
2613                         if(best_s.mb_intra)
2614                             s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2615                         else
2616                             ff_clean_intra_table_entries(s); //old mode?
2617                     }
2618                 }
2619
2620                 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2621                     if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2622                         const int last_qp= backup_s.qscale;
2623                         int qpi, qp, dc[6];
2624                         int16_t ac[6][16];
2625                         const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2626                         static const int dquant_tab[4]={-1,1,-2,2};
2627
2628                         assert(backup_s.dquant == 0);
2629
2630                         //FIXME intra
2631                         s->mv_dir= best_s.mv_dir;
2632                         s->mv_type = MV_TYPE_16X16;
2633                         s->mb_intra= best_s.mb_intra;
2634                         s->mv[0][0][0] = best_s.mv[0][0][0];
2635                         s->mv[0][0][1] = best_s.mv[0][0][1];
2636                         s->mv[1][0][0] = best_s.mv[1][0][0];
2637                         s->mv[1][0][1] = best_s.mv[1][0][1];
2638
2639                         qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2640                         for(; qpi<4; qpi++){
2641                             int dquant= dquant_tab[qpi];
2642                             qp= last_qp + dquant;
2643                             if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2644                                 continue;
2645                             backup_s.dquant= dquant;
2646                             if(s->mb_intra && s->dc_val[0]){
2647                                 for(i=0; i<6; i++){
2648                                     dc[i]= s->dc_val[0][ s->block_index[i] ];
2649                                     memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
2650                                 }
2651                             }
2652
2653                             encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2654                                          &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2655                             if(best_s.qscale != qp){
2656                                 if(s->mb_intra && s->dc_val[0]){
2657                                     for(i=0; i<6; i++){
2658                                         s->dc_val[0][ s->block_index[i] ]= dc[i];
2659                                         memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
2660                                     }
2661                                 }
2662                             }
2663                         }
2664                     }
2665                 }
2666                 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2667                     int mx= s->b_direct_mv_table[xy][0];
2668                     int my= s->b_direct_mv_table[xy][1];
2669
2670                     backup_s.dquant = 0;
2671                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2672                     s->mb_intra= 0;
2673                     ff_mpeg4_set_direct_mv(s, mx, my);
2674                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2675                                  &dmin, &next_block, mx, my);
2676                 }
2677                 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2678                     backup_s.dquant = 0;
2679                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2680                     s->mb_intra= 0;
2681                     ff_mpeg4_set_direct_mv(s, 0, 0);
2682                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2683                                  &dmin, &next_block, 0, 0);
2684                 }
2685                 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2686                     int coded=0;
2687                     for(i=0; i<6; i++)
2688                         coded |= s->block_last_index[i];
2689                     if(coded){
2690                         int mx,my;
2691                         memcpy(s->mv, best_s.mv, sizeof(s->mv));
2692                         if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2693                             mx=my=0; //FIXME find the one we actually used
2694                             ff_mpeg4_set_direct_mv(s, mx, my);
2695                         }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2696                             mx= s->mv[1][0][0];
2697                             my= s->mv[1][0][1];
2698                         }else{
2699                             mx= s->mv[0][0][0];
2700                             my= s->mv[0][0][1];
2701                         }
2702
2703                         s->mv_dir= best_s.mv_dir;
2704                         s->mv_type = best_s.mv_type;
2705                         s->mb_intra= 0;
2706 /*                        s->mv[0][0][0] = best_s.mv[0][0][0];
2707                         s->mv[0][0][1] = best_s.mv[0][0][1];
2708                         s->mv[1][0][0] = best_s.mv[1][0][0];
2709                         s->mv[1][0][1] = best_s.mv[1][0][1];*/
2710                         backup_s.dquant= 0;
2711                         s->skipdct=1;
2712                         encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2713                                         &dmin, &next_block, mx, my);
2714                         s->skipdct=0;
2715                     }
2716                 }
2717
2718                 s->current_picture.qscale_table[xy] = best_s.qscale;
2719
2720                 copy_context_after_encode(s, &best_s, -1);
2721
2722                 pb_bits_count= put_bits_count(&s->pb);
2723                 flush_put_bits(&s->pb);
2724                 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2725                 s->pb= backup_s.pb;
2726
2727                 if(s->data_partitioning){
2728                     pb2_bits_count= put_bits_count(&s->pb2);
2729                     flush_put_bits(&s->pb2);
2730                     avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2731                     s->pb2= backup_s.pb2;
2732
2733                     tex_pb_bits_count= put_bits_count(&s->tex_pb);
2734                     flush_put_bits(&s->tex_pb);
2735                     avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2736                     s->tex_pb= backup_s.tex_pb;
2737                 }
2738                 s->last_bits= put_bits_count(&s->pb);
2739
2740                 if (CONFIG_H263_ENCODER &&
2741                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2742                     ff_h263_update_motion_val(s);
2743
2744                 if(next_block==0){ //FIXME 16 vs linesize16
2745                     s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad                     , s->linesize  ,16);
2746                     s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize    , s->uvlinesize, 8);
2747                     s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2748                 }
2749
2750                 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2751                     ff_MPV_decode_mb(s, s->block);
2752             } else {
2753                 int motion_x = 0, motion_y = 0;
2754                 s->mv_type=MV_TYPE_16X16;
2755                 // only one MB-Type possible
2756
2757                 switch(mb_type){
2758                 case CANDIDATE_MB_TYPE_INTRA:
2759                     s->mv_dir = 0;
2760                     s->mb_intra= 1;
2761                     motion_x= s->mv[0][0][0] = 0;
2762                     motion_y= s->mv[0][0][1] = 0;
2763                     break;
2764                 case CANDIDATE_MB_TYPE_INTER:
2765                     s->mv_dir = MV_DIR_FORWARD;
2766                     s->mb_intra= 0;
2767                     motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2768                     motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2769                     break;
2770                 case CANDIDATE_MB_TYPE_INTER_I:
2771                     s->mv_dir = MV_DIR_FORWARD;
2772                     s->mv_type = MV_TYPE_FIELD;
2773                     s->mb_intra= 0;
2774                     for(i=0; i<2; i++){
2775                         j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2776                         s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2777                         s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2778                     }
2779                     break;
2780                 case CANDIDATE_MB_TYPE_INTER4V:
2781                     s->mv_dir = MV_DIR_FORWARD;
2782                     s->mv_type = MV_TYPE_8X8;
2783                     s->mb_intra= 0;
2784                     for(i=0; i<4; i++){
2785                         s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2786                         s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2787                     }
2788                     break;
2789                 case CANDIDATE_MB_TYPE_DIRECT:
2790                     if (CONFIG_MPEG4_ENCODER) {
2791                         s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2792                         s->mb_intra= 0;
2793                         motion_x=s->b_direct_mv_table[xy][0];
2794                         motion_y=s->b_direct_mv_table[xy][1];
2795                         ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2796                     }
2797                     break;
2798                 case CANDIDATE_MB_TYPE_DIRECT0:
2799                     if (CONFIG_MPEG4_ENCODER) {
2800                         s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2801                         s->mb_intra= 0;
2802                         ff_mpeg4_set_direct_mv(s, 0, 0);
2803                     }
2804                     break;
2805                 case CANDIDATE_MB_TYPE_BIDIR:
2806                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2807                     s->mb_intra= 0;
2808                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2809                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2810                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2811                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2812                     break;
2813                 case CANDIDATE_MB_TYPE_BACKWARD:
2814                     s->mv_dir = MV_DIR_BACKWARD;
2815                     s->mb_intra= 0;
2816                     motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2817                     motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2818                     break;
2819                 case CANDIDATE_MB_TYPE_FORWARD:
2820                     s->mv_dir = MV_DIR_FORWARD;
2821                     s->mb_intra= 0;
2822                     motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2823                     motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2824                     break;
2825                 case CANDIDATE_MB_TYPE_FORWARD_I:
2826                     s->mv_dir = MV_DIR_FORWARD;
2827                     s->mv_type = MV_TYPE_FIELD;
2828                     s->mb_intra= 0;
2829                     for(i=0; i<2; i++){
2830                         j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2831                         s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2832                         s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2833                     }
2834                     break;
2835                 case CANDIDATE_MB_TYPE_BACKWARD_I:
2836                     s->mv_dir = MV_DIR_BACKWARD;
2837                     s->mv_type = MV_TYPE_FIELD;
2838                     s->mb_intra= 0;
2839                     for(i=0; i<2; i++){
2840                         j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2841                         s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2842                         s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2843                     }
2844                     break;
2845                 case CANDIDATE_MB_TYPE_BIDIR_I:
2846                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2847                     s->mv_type = MV_TYPE_FIELD;
2848                     s->mb_intra= 0;
2849                     for(dir=0; dir<2; dir++){
2850                         for(i=0; i<2; i++){
2851                             j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2852                             s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2853                             s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2854                         }
2855                     }
2856                     break;
2857                 default:
2858                     av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2859                 }
2860
2861                 encode_mb(s, motion_x, motion_y);
2862
2863                 // RAL: Update last macroblock type
2864                 s->last_mv_dir = s->mv_dir;
2865
2866                 if (CONFIG_H263_ENCODER &&
2867                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2868                     ff_h263_update_motion_val(s);
2869
2870                 ff_MPV_decode_mb(s, s->block);
2871             }
2872
2873             /* clean the MV table in IPS frames for direct mode in B frames */
2874             if(s->mb_intra /* && I,P,S_TYPE */){
2875                 s->p_mv_table[xy][0]=0;
2876                 s->p_mv_table[xy][1]=0;
2877             }
2878
2879             if(s->flags&CODEC_FLAG_PSNR){
2880                 int w= 16;
2881                 int h= 16;
2882
2883                 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2884                 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2885
2886                 s->current_picture.f.error[0] += sse(
2887                     s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2888                     s->dest[0], w, h, s->linesize);
2889                 s->current_picture.f.error[1] += sse(
2890                     s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
2891                     s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2892                 s->current_picture.f.error[2] += sse(
2893                     s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
2894                     s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2895             }
2896             if(s->loop_filter){
2897                 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
2898                     ff_h263_loop_filter(s);
2899             }
2900             av_dlog(s->avctx, "MB %d %d bits\n",
2901                     s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
2902         }
2903     }
2904
2905     //not beautiful here but we must write it before flushing so it has to be here
2906     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
2907         ff_msmpeg4_encode_ext_header(s);
2908
2909     write_slice_end(s);
2910
2911     /* Send the last GOB if RTP */
2912     if (s->avctx->rtp_callback) {
2913         int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
2914         pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
2915         /* Call the RTP callback to send the last GOB */
2916         emms_c();
2917         s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
2918     }
2919
2920     return 0;
2921 }
2922
2923 #define MERGE(field) dst->field += src->field; src->field=0
2924 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
2925     MERGE(me.scene_change_score);
2926     MERGE(me.mc_mb_var_sum_temp);
2927     MERGE(me.mb_var_sum_temp);
2928 }
2929
2930 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
2931     int i;
2932
2933     MERGE(dct_count[0]); //note, the other dct vars are not part of the context
2934     MERGE(dct_count[1]);
2935     MERGE(mv_bits);
2936     MERGE(i_tex_bits);
2937     MERGE(p_tex_bits);
2938     MERGE(i_count);
2939     MERGE(f_count);
2940     MERGE(b_count);
2941     MERGE(skip_count);
2942     MERGE(misc_bits);
2943     MERGE(er.error_count);
2944     MERGE(padding_bug_score);
2945     MERGE(current_picture.f.error[0]);
2946     MERGE(current_picture.f.error[1]);
2947     MERGE(current_picture.f.error[2]);
2948
2949     if(dst->avctx->noise_reduction){
2950         for(i=0; i<64; i++){
2951             MERGE(dct_error_sum[0][i]);
2952             MERGE(dct_error_sum[1][i]);
2953         }
2954     }
2955
2956     assert(put_bits_count(&src->pb) % 8 ==0);
2957     assert(put_bits_count(&dst->pb) % 8 ==0);
2958     avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
2959     flush_put_bits(&dst->pb);
2960 }
2961
2962 static int estimate_qp(MpegEncContext *s, int dry_run){
2963     if (s->next_lambda){
2964         s->current_picture_ptr->f.quality =
2965         s->current_picture.f.quality = s->next_lambda;
2966         if(!dry_run) s->next_lambda= 0;
2967     } else if (!s->fixed_qscale) {
2968         s->current_picture_ptr->f.quality =
2969         s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
2970         if (s->current_picture.f.quality < 0)
2971             return -1;
2972     }
2973
2974     if(s->adaptive_quant){
2975         switch(s->codec_id){
2976         case AV_CODEC_ID_MPEG4:
2977             if (CONFIG_MPEG4_ENCODER)
2978                 ff_clean_mpeg4_qscales(s);
2979             break;
2980         case AV_CODEC_ID_H263:
2981         case AV_CODEC_ID_H263P:
2982         case AV_CODEC_ID_FLV1:
2983             if (CONFIG_H263_ENCODER)
2984                 ff_clean_h263_qscales(s);
2985             break;
2986         default:
2987             ff_init_qscale_tab(s);
2988         }
2989
2990         s->lambda= s->lambda_table[0];
2991         //FIXME broken
2992     }else
2993         s->lambda = s->current_picture.f.quality;
2994     update_qscale(s);
2995     return 0;
2996 }
2997
2998 /* must be called before writing the header */
2999 static void set_frame_distances(MpegEncContext * s){
3000     assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3001     s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3002
3003     if(s->pict_type==AV_PICTURE_TYPE_B){
3004         s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3005         assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3006     }else{
3007         s->pp_time= s->time - s->last_non_b_time;
3008         s->last_non_b_time= s->time;
3009         assert(s->picture_number==0 || s->pp_time > 0);
3010     }
3011 }
3012
3013 static int encode_picture(MpegEncContext *s, int picture_number)
3014 {
3015     int i, ret;
3016     int bits;
3017     int context_count = s->slice_context_count;
3018
3019     s->picture_number = picture_number;
3020
3021     /* Reset the average MB variance */
3022     s->me.mb_var_sum_temp    =
3023     s->me.mc_mb_var_sum_temp = 0;
3024
3025     /* we need to initialize some time vars before we can encode b-frames */
3026     // RAL: Condition added for MPEG1VIDEO
3027     if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3028         set_frame_distances(s);
3029     if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3030         ff_set_mpeg4_time(s);
3031
3032     s->me.scene_change_score=0;
3033
3034 //    s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3035
3036     if(s->pict_type==AV_PICTURE_TYPE_I){
3037         if(s->msmpeg4_version >= 3) s->no_rounding=1;
3038         else                        s->no_rounding=0;
3039     }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3040         if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3041             s->no_rounding ^= 1;
3042     }
3043
3044     if(s->flags & CODEC_FLAG_PASS2){
3045         if (estimate_qp(s,1) < 0)
3046             return -1;
3047         ff_get_2pass_fcode(s);
3048     }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3049         if(s->pict_type==AV_PICTURE_TYPE_B)
3050             s->lambda= s->last_lambda_for[s->pict_type];
3051         else
3052             s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3053         update_qscale(s);
3054     }
3055
3056     s->mb_intra=0; //for the rate distortion & bit compare functions
3057     for(i=1; i<context_count; i++){
3058         ret = ff_update_duplicate_context(s->thread_context[i], s);
3059         if (ret < 0)
3060             return ret;
3061     }
3062
3063     if(ff_init_me(s)<0)
3064         return -1;
3065
3066     /* Estimate motion for every MB */
3067     if(s->pict_type != AV_PICTURE_TYPE_I){
3068         s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3069         s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3070         if (s->pict_type != AV_PICTURE_TYPE_B) {
3071             if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3072                 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3073             }
3074         }
3075
3076         s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3077     }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3078         /* I-Frame */
3079         for(i=0; i<s->mb_stride*s->mb_height; i++)
3080             s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3081
3082         if(!s->fixed_qscale){
3083             /* finding spatial complexity for I-frame rate control */
3084             s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3085         }
3086     }
3087     for(i=1; i<context_count; i++){
3088         merge_context_after_me(s, s->thread_context[i]);
3089     }
3090     s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3091     s->current_picture.   mb_var_sum= s->current_picture_ptr->   mb_var_sum= s->me.   mb_var_sum_temp;
3092     emms_c();
3093
3094     if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3095         s->pict_type= AV_PICTURE_TYPE_I;
3096         for(i=0; i<s->mb_stride*s->mb_height; i++)
3097             s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3098         av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3099                 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3100     }
3101
3102     if(!s->umvplus){
3103         if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3104             s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3105
3106             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3107                 int a,b;
3108                 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3109                 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3110                 s->f_code= FFMAX3(s->f_code, a, b);
3111             }
3112
3113             ff_fix_long_p_mvs(s);
3114             ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3115             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3116                 int j;
3117                 for(i=0; i<2; i++){
3118                     for(j=0; j<2; j++)
3119                         ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3120                                         s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3121                 }
3122             }
3123         }
3124
3125         if(s->pict_type==AV_PICTURE_TYPE_B){
3126             int a, b;
3127
3128             a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3129             b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3130             s->f_code = FFMAX(a, b);
3131
3132             a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3133             b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3134             s->b_code = FFMAX(a, b);
3135
3136             ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3137             ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3138             ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3139             ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3140             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3141                 int dir, j;
3142                 for(dir=0; dir<2; dir++){
3143                     for(i=0; i<2; i++){
3144                         for(j=0; j<2; j++){
3145                             int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3146                                           : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3147                             ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3148                                             s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3149                         }
3150                     }
3151                 }
3152             }
3153         }
3154     }
3155
3156     if (estimate_qp(s, 0) < 0)
3157         return -1;
3158
3159     if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3160         s->qscale= 3; //reduce clipping problems
3161
3162     if (s->out_format == FMT_MJPEG) {
3163         /* for mjpeg, we do include qscale in the matrix */
3164         for(i=1;i<64;i++){
3165             int j= s->dsp.idct_permutation[i];
3166
3167             s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3168         }
3169         s->y_dc_scale_table=
3170         s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3171         s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3172         ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3173                        s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3174         s->qscale= 8;
3175     }
3176
3177     //FIXME var duplication
3178     s->current_picture_ptr->f.key_frame =
3179     s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3180     s->current_picture_ptr->f.pict_type =
3181     s->current_picture.f.pict_type = s->pict_type;
3182
3183     if (s->current_picture.f.key_frame)
3184         s->picture_in_gop_number=0;
3185
3186     s->last_bits= put_bits_count(&s->pb);
3187     switch(s->out_format) {
3188     case FMT_MJPEG:
3189         if (CONFIG_MJPEG_ENCODER)
3190             ff_mjpeg_encode_picture_header(s);
3191         break;
3192     case FMT_H261:
3193         if (CONFIG_H261_ENCODER)
3194             ff_h261_encode_picture_header(s, picture_number);
3195         break;
3196     case FMT_H263:
3197         if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3198             ff_wmv2_encode_picture_header(s, picture_number);
3199         else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3200             ff_msmpeg4_encode_picture_header(s, picture_number);
3201         else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3202             ff_mpeg4_encode_picture_header(s, picture_number);
3203         else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3204             ff_rv10_encode_picture_header(s, picture_number);
3205         else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3206             ff_rv20_encode_picture_header(s, picture_number);
3207         else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3208             ff_flv_encode_picture_header(s, picture_number);
3209         else if (CONFIG_H263_ENCODER)
3210             ff_h263_encode_picture_header(s, picture_number);
3211         break;
3212     case FMT_MPEG1:
3213         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3214             ff_mpeg1_encode_picture_header(s, picture_number);
3215         break;
3216     default:
3217         assert(0);
3218     }
3219     bits= put_bits_count(&s->pb);
3220     s->header_bits= bits - s->last_bits;
3221
3222     for(i=1; i<context_count; i++){
3223         update_duplicate_context_after_me(s->thread_context[i], s);
3224     }
3225     s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3226     for(i=1; i<context_count; i++){
3227         merge_context_after_encode(s, s->thread_context[i]);
3228     }
3229     emms_c();
3230     return 0;
3231 }
3232
3233 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3234     const int intra= s->mb_intra;
3235     int i;
3236
3237     s->dct_count[intra]++;
3238
3239     for(i=0; i<64; i++){
3240         int level= block[i];
3241
3242         if(level){
3243             if(level>0){
3244                 s->dct_error_sum[intra][i] += level;
3245                 level -= s->dct_offset[intra][i];
3246                 if(level<0) level=0;
3247             }else{
3248                 s->dct_error_sum[intra][i] -= level;
3249                 level += s->dct_offset[intra][i];
3250                 if(level>0) level=0;
3251             }
3252             block[i]= level;
3253         }
3254     }
3255 }
3256
3257 static int dct_quantize_trellis_c(MpegEncContext *s,
3258                                   int16_t *block, int n,
3259                                   int qscale, int *overflow){
3260     const int *qmat;
3261     const uint8_t *scantable= s->intra_scantable.scantable;
3262     const uint8_t *perm_scantable= s->intra_scantable.permutated;
3263     int max=0;
3264     unsigned int threshold1, threshold2;
3265     int bias=0;
3266     int run_tab[65];
3267     int level_tab[65];
3268     int score_tab[65];
3269     int survivor[65];
3270     int survivor_count;
3271     int last_run=0;
3272     int last_level=0;
3273     int last_score= 0;
3274     int last_i;
3275     int coeff[2][64];
3276     int coeff_count[64];
3277     int qmul, qadd, start_i, last_non_zero, i, dc;
3278     const int esc_length= s->ac_esc_length;
3279     uint8_t * length;
3280     uint8_t * last_length;
3281     const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3282
3283     s->dsp.fdct (block);
3284
3285     if(s->dct_error_sum)
3286         s->denoise_dct(s, block);
3287     qmul= qscale*16;
3288     qadd= ((qscale-1)|1)*8;
3289
3290     if (s->mb_intra) {
3291         int q;
3292         if (!s->h263_aic) {
3293             if (n < 4)
3294                 q = s->y_dc_scale;
3295             else
3296                 q = s->c_dc_scale;
3297             q = q << 3;
3298         } else{
3299             /* For AIC we skip quant/dequant of INTRADC */
3300             q = 1 << 3;
3301             qadd=0;
3302         }
3303
3304         /* note: block[0] is assumed to be positive */
3305         block[0] = (block[0] + (q >> 1)) / q;
3306         start_i = 1;
3307         last_non_zero = 0;
3308         qmat = s->q_intra_matrix[qscale];
3309         if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3310             bias= 1<<(QMAT_SHIFT-1);
3311         length     = s->intra_ac_vlc_length;
3312         last_length= s->intra_ac_vlc_last_length;
3313     } else {
3314         start_i = 0;
3315         last_non_zero = -1;
3316         qmat = s->q_inter_matrix[qscale];
3317         length     = s->inter_ac_vlc_length;
3318         last_length= s->inter_ac_vlc_last_length;
3319     }
3320     last_i= start_i;
3321
3322     threshold1= (1<<QMAT_SHIFT) - bias - 1;
3323     threshold2= (threshold1<<1);
3324
3325     for(i=63; i>=start_i; i--) {
3326         const int j = scantable[i];
3327         int level = block[j] * qmat[j];
3328
3329         if(((unsigned)(level+threshold1))>threshold2){
3330             last_non_zero = i;
3331             break;
3332         }
3333     }
3334
3335     for(i=start_i; i<=last_non_zero; i++) {
3336         const int j = scantable[i];
3337         int level = block[j] * qmat[j];
3338
3339 //        if(   bias+level >= (1<<(QMAT_SHIFT - 3))
3340 //           || bias-level >= (1<<(QMAT_SHIFT - 3))){
3341         if(((unsigned)(level+threshold1))>threshold2){
3342             if(level>0){
3343                 level= (bias + level)>>QMAT_SHIFT;
3344                 coeff[0][i]= level;
3345                 coeff[1][i]= level-1;
3346 //                coeff[2][k]= level-2;
3347             }else{
3348                 level= (bias - level)>>QMAT_SHIFT;
3349                 coeff[0][i]= -level;
3350                 coeff[1][i]= -level+1;
3351 //                coeff[2][k]= -level+2;
3352             }
3353             coeff_count[i]= FFMIN(level, 2);
3354             assert(coeff_count[i]);
3355             max |=level;
3356         }else{
3357             coeff[0][i]= (level>>31)|1;
3358             coeff_count[i]= 1;
3359         }
3360     }
3361
3362     *overflow= s->max_qcoeff < max; //overflow might have happened
3363
3364     if(last_non_zero < start_i){
3365         memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3366         return last_non_zero;
3367     }
3368
3369     score_tab[start_i]= 0;
3370     survivor[0]= start_i;
3371     survivor_count= 1;
3372
3373     for(i=start_i; i<=last_non_zero; i++){
3374         int level_index, j, zero_distortion;
3375         int dct_coeff= FFABS(block[ scantable[i] ]);
3376         int best_score=256*256*256*120;
3377
3378         if (s->dsp.fdct == ff_fdct_ifast)
3379             dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3380         zero_distortion= dct_coeff*dct_coeff;
3381
3382         for(level_index=0; level_index < coeff_count[i]; level_index++){
3383             int distortion;
3384             int level= coeff[level_index][i];
3385             const int alevel= FFABS(level);
3386             int unquant_coeff;
3387
3388             assert(level);
3389
3390             if(s->out_format == FMT_H263){
3391                 unquant_coeff= alevel*qmul + qadd;
3392             }else{ //MPEG1
3393                 j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3394                 if(s->mb_intra){
3395                         unquant_coeff = (int)(  alevel  * qscale * s->intra_matrix[j]) >> 3;
3396                         unquant_coeff =   (unquant_coeff - 1) | 1;
3397                 }else{
3398                         unquant_coeff = (((  alevel  << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3399                         unquant_coeff =   (unquant_coeff - 1) | 1;
3400                 }
3401                 unquant_coeff<<= 3;
3402             }
3403
3404             distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3405             level+=64;
3406             if((level&(~127)) == 0){
3407                 for(j=survivor_count-1; j>=0; j--){
3408                     int run= i - survivor[j];
3409                     int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3410                     score += score_tab[i-run];
3411
3412                     if(score < best_score){
3413                         best_score= score;
3414                         run_tab[i+1]= run;
3415                         level_tab[i+1]= level-64;
3416                     }
3417                 }
3418
3419                 if(s->out_format == FMT_H263){
3420                     for(j=survivor_count-1; j>=0; j--){
3421                         int run= i - survivor[j];
3422                         int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3423                         score += score_tab[i-run];
3424                         if(score < last_score){
3425                             last_score= score;
3426                             last_run= run;
3427                             last_level= level-64;
3428                             last_i= i+1;
3429                         }
3430                     }
3431                 }
3432             }else{
3433                 distortion += esc_length*lambda;
3434                 for(j=survivor_count-1; j>=0; j--){
3435                     int run= i - survivor[j];
3436                     int score= distortion + score_tab[i-run];
3437
3438                     if(score < best_score){
3439                         best_score= score;
3440                         run_tab[i+1]= run;
3441                         level_tab[i+1]= level-64;
3442                     }
3443                 }
3444
3445                 if(s->out_format == FMT_H263){
3446                   for(j=survivor_count-1; j>=0; j--){
3447                         int run= i - survivor[j];
3448                         int score= distortion + score_tab[i-run];
3449                         if(score < last_score){
3450                             last_score= score;
3451                             last_run= run;
3452                             last_level= level-64;
3453                             last_i= i+1;
3454                         }
3455                     }
3456                 }
3457             }
3458         }
3459
3460         score_tab[i+1]= best_score;
3461
3462         //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3463         if(last_non_zero <= 27){
3464             for(; survivor_count; survivor_count--){
3465                 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3466                     break;
3467             }
3468         }else{
3469             for(; survivor_count; survivor_count--){
3470                 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3471                     break;
3472             }
3473         }
3474
3475         survivor[ survivor_count++ ]= i+1;
3476     }
3477
3478     if(s->out_format != FMT_H263){
3479         last_score= 256*256*256*120;
3480         for(i= survivor[0]; i<=last_non_zero + 1; i++){
3481             int score= score_tab[i];
3482             if(i) score += lambda*2; //FIXME exacter?
3483
3484             if(score < last_score){
3485                 last_score= score;
3486                 last_i= i;
3487                 last_level= level_tab[i];
3488                 last_run= run_tab[i];
3489             }
3490         }
3491     }
3492
3493     s->coded_score[n] = last_score;
3494
3495     dc= FFABS(block[0]);
3496     last_non_zero= last_i - 1;
3497     memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3498
3499     if(last_non_zero < start_i)
3500         return last_non_zero;
3501
3502     if(last_non_zero == 0 && start_i == 0){
3503         int best_level= 0;
3504         int best_score= dc * dc;
3505
3506         for(i=0; i<coeff_count[0]; i++){
3507             int level= coeff[i][0];
3508             int alevel= FFABS(level);
3509             int unquant_coeff, score, distortion;
3510
3511             if(s->out_format == FMT_H263){
3512                     unquant_coeff= (alevel*qmul + qadd)>>3;
3513             }else{ //MPEG1
3514                     unquant_coeff = (((  alevel  << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3515                     unquant_coeff =   (unquant_coeff - 1) | 1;
3516             }
3517             unquant_coeff = (unquant_coeff + 4) >> 3;
3518             unquant_coeff<<= 3 + 3;
3519
3520             distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3521             level+=64;
3522             if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3523             else                    score= distortion + esc_length*lambda;
3524
3525             if(score < best_score){
3526                 best_score= score;
3527                 best_level= level - 64;
3528             }
3529         }
3530         block[0]= best_level;
3531         s->coded_score[n] = best_score - dc*dc;
3532         if(best_level == 0) return -1;
3533         else                return last_non_zero;
3534     }
3535
3536     i= last_i;
3537     assert(last_level);
3538
3539     block[ perm_scantable[last_non_zero] ]= last_level;
3540     i -= last_run + 1;
3541
3542     for(; i>start_i; i -= run_tab[i] + 1){
3543         block[ perm_scantable[i-1] ]= level_tab[i];
3544     }
3545
3546     return last_non_zero;
3547 }
3548
3549 //#define REFINE_STATS 1
3550 static int16_t basis[64][64];
3551
3552 static void build_basis(uint8_t *perm){
3553     int i, j, x, y;
3554     emms_c();
3555     for(i=0; i<8; i++){
3556         for(j=0; j<8; j++){
3557             for(y=0; y<8; y++){
3558                 for(x=0; x<8; x++){
3559                     double s= 0.25*(1<<BASIS_SHIFT);
3560                     int index= 8*i + j;
3561                     int perm_index= perm[index];
3562                     if(i==0) s*= sqrt(0.5);
3563                     if(j==0) s*= sqrt(0.5);
3564                     basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3565                 }
3566             }
3567         }
3568     }
3569 }
3570
3571 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3572                         int16_t *block, int16_t *weight, int16_t *orig,
3573                         int n, int qscale){
3574     int16_t rem[64];
3575     LOCAL_ALIGNED_16(int16_t, d1, [64]);
3576     const uint8_t *scantable= s->intra_scantable.scantable;
3577     const uint8_t *perm_scantable= s->intra_scantable.permutated;
3578 //    unsigned int threshold1, threshold2;
3579 //    int bias=0;
3580     int run_tab[65];
3581     int prev_run=0;
3582     int prev_level=0;
3583     int qmul, qadd, start_i, last_non_zero, i, dc;
3584     uint8_t * length;
3585     uint8_t * last_length;
3586     int lambda;
3587     int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3588 #ifdef REFINE_STATS
3589 static int count=0;
3590 static int after_last=0;
3591 static int to_zero=0;
3592 static int from_zero=0;
3593 static int raise=0;
3594 static int lower=0;
3595 static int messed_sign=0;
3596 #endif
3597
3598     if(basis[0][0] == 0)
3599         build_basis(s->dsp.idct_permutation);
3600
3601     qmul= qscale*2;
3602     qadd= (qscale-1)|1;
3603     if (s->mb_intra) {
3604         if (!s->h263_aic) {
3605             if (n < 4)
3606                 q = s->y_dc_scale;
3607             else
3608                 q = s->c_dc_scale;
3609         } else{
3610             /* For AIC we skip quant/dequant of INTRADC */
3611             q = 1;
3612             qadd=0;
3613         }
3614         q <<= RECON_SHIFT-3;
3615         /* note: block[0] is assumed to be positive */
3616         dc= block[0]*q;
3617 //        block[0] = (block[0] + (q >> 1)) / q;
3618         start_i = 1;
3619 //        if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3620 //            bias= 1<<(QMAT_SHIFT-1);
3621         length     = s->intra_ac_vlc_length;
3622         last_length= s->intra_ac_vlc_last_length;
3623     } else {
3624         dc= 0;
3625         start_i = 0;
3626         length     = s->inter_ac_vlc_length;
3627         last_length= s->inter_ac_vlc_last_length;
3628     }
3629     last_non_zero = s->block_last_index[n];
3630
3631 #ifdef REFINE_STATS
3632 {START_TIMER
3633 #endif
3634     dc += (1<<(RECON_SHIFT-1));
3635     for(i=0; i<64; i++){
3636         rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME  use orig dirrectly instead of copying to rem[]
3637     }
3638 #ifdef REFINE_STATS
3639 STOP_TIMER("memset rem[]")}
3640 #endif
3641     sum=0;
3642     for(i=0; i<64; i++){
3643         int one= 36;
3644         int qns=4;
3645         int w;
3646
3647         w= FFABS(weight[i]) + qns*one;
3648         w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3649
3650         weight[i] = w;
3651 //        w=weight[i] = (63*qns + (w/2)) / w;
3652
3653         assert(w>0);
3654         assert(w<(1<<6));
3655         sum += w*w;
3656     }
3657     lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3658 #ifdef REFINE_STATS
3659 {START_TIMER
3660 #endif
3661     run=0;
3662     rle_index=0;
3663     for(i=start_i; i<=last_non_zero; i++){
3664         int j= perm_scantable[i];
3665         const int level= block[j];
3666         int coeff;
3667
3668         if(level){
3669             if(level<0) coeff= qmul*level - qadd;
3670             else        coeff= qmul*level + qadd;
3671             run_tab[rle_index++]=run;
3672             run=0;
3673
3674             s->dsp.add_8x8basis(rem, basis[j], coeff);
3675         }else{
3676             run++;
3677         }
3678     }
3679 #ifdef REFINE_STATS
3680 if(last_non_zero>0){
3681 STOP_TIMER("init rem[]")
3682 }
3683 }
3684
3685 {START_TIMER
3686 #endif
3687     for(;;){
3688         int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3689         int best_coeff=0;
3690         int best_change=0;
3691         int run2, best_unquant_change=0, analyze_gradient;
3692 #ifdef REFINE_STATS
3693 {START_TIMER
3694 #endif
3695         analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
3696
3697         if(analyze_gradient){
3698 #ifdef REFINE_STATS
3699 {START_TIMER
3700 #endif
3701             for(i=0; i<64; i++){
3702                 int w= weight[i];
3703
3704                 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3705             }
3706 #ifdef REFINE_STATS
3707 STOP_TIMER("rem*w*w")}
3708 {START_TIMER
3709 #endif
3710             s->dsp.fdct(d1);
3711 #ifdef REFINE_STATS
3712 STOP_TIMER("dct")}
3713 #endif
3714         }
3715
3716         if(start_i){
3717             const int level= block[0];
3718             int change, old_coeff;
3719
3720             assert(s->mb_intra);
3721
3722             old_coeff= q*level;
3723
3724             for(change=-1; change<=1; change+=2){
3725                 int new_level= level + change;
3726                 int score, new_coeff;
3727
3728                 new_coeff= q*new_level;
3729                 if(new_coeff >= 2048 || new_coeff < 0)
3730                     continue;
3731
3732                 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
3733                 if(score<best_score){
3734                     best_score= score;
3735                     best_coeff= 0;
3736                     best_change= change;
3737                     best_unquant_change= new_coeff - old_coeff;
3738                 }
3739             }
3740         }
3741
3742         run=0;
3743         rle_index=0;
3744         run2= run_tab[rle_index++];
3745         prev_level=0;
3746         prev_run=0;
3747
3748         for(i=start_i; i<64; i++){
3749             int j= perm_scantable[i];
3750             const int level= block[j];
3751             int change, old_coeff;
3752
3753             if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
3754                 break;
3755
3756             if(level){
3757                 if(level<0) old_coeff= qmul*level - qadd;
3758                 else        old_coeff= qmul*level + qadd;
3759                 run2= run_tab[rle_index++]; //FIXME ! maybe after last
3760             }else{
3761                 old_coeff=0;
3762                 run2--;
3763                 assert(run2>=0 || i >= last_non_zero );
3764             }
3765
3766             for(change=-1; change<=1; change+=2){
3767                 int new_level= level + change;
3768                 int score, new_coeff, unquant_change;
3769
3770                 score=0;
3771                 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
3772                    continue;
3773
3774                 if(new_level){
3775                     if(new_level<0) new_coeff= qmul*new_level - qadd;
3776                     else            new_coeff= qmul*new_level + qadd;
3777                     if(new_coeff >= 2048 || new_coeff <= -2048)
3778                         continue;
3779                     //FIXME check for overflow
3780
3781                     if(level){
3782                         if(level < 63 && level > -63){
3783                             if(i < last_non_zero)
3784                                 score +=   length[UNI_AC_ENC_INDEX(run, new_level+64)]
3785                                          - length[UNI_AC_ENC_INDEX(run, level+64)];
3786                             else
3787                                 score +=   last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
3788                                          - last_length[UNI_AC_ENC_INDEX(run, level+64)];
3789                         }
3790                     }else{
3791                         assert(FFABS(new_level)==1);
3792
3793                         if(analyze_gradient){
3794                             int g= d1[ scantable[i] ];
3795                             if(g && (g^new_level) >= 0)
3796                                 continue;
3797                         }
3798
3799                         if(i < last_non_zero){
3800                             int next_i= i + run2 + 1;
3801                             int next_level= block[ perm_scantable[next_i] ] + 64;
3802
3803                             if(next_level&(~127))
3804                                 next_level= 0;
3805
3806                             if(next_i < last_non_zero)
3807                                 score +=   length[UNI_AC_ENC_INDEX(run, 65)]
3808                                          + length[UNI_AC_ENC_INDEX(run2, next_level)]
3809                                          - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3810                             else
3811                                 score +=  length[UNI_AC_ENC_INDEX(run, 65)]
3812                                         + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3813                                         - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3814                         }else{
3815                             score += last_length[UNI_AC_ENC_INDEX(run, 65)];
3816                             if(prev_level){
3817                                 score +=  length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3818                                         - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3819                             }
3820                         }
3821                     }
3822                 }else{
3823                     new_coeff=0;
3824                     assert(FFABS(level)==1);
3825
3826                     if(i < last_non_zero){
3827                         int next_i= i + run2 + 1;
3828                         int next_level= block[ perm_scantable[next_i] ] + 64;
3829
3830                         if(next_level&(~127))
3831                             next_level= 0;
3832
3833                         if(next_i < last_non_zero)
3834                             score +=   length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3835                                      - length[UNI_AC_ENC_INDEX(run2, next_level)]
3836                                      - length[UNI_AC_ENC_INDEX(run, 65)];
3837                         else
3838                             score +=   last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3839                                      - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3840                                      - length[UNI_AC_ENC_INDEX(run, 65)];
3841                     }else{
3842                         score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
3843                         if(prev_level){
3844                             score +=  last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3845                                     - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3846                         }
3847                     }
3848                 }
3849
3850                 score *= lambda;
3851
3852                 unquant_change= new_coeff - old_coeff;
3853                 assert((score < 100*lambda && score > -100*lambda) || lambda==0);
3854
3855                 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
3856                 if(score<best_score){
3857                     best_score= score;
3858                     best_coeff= i;
3859                     best_change= change;
3860                     best_unquant_change= unquant_change;
3861                 }
3862             }
3863             if(level){
3864                 prev_level= level + 64;
3865                 if(prev_level&(~127))
3866                     prev_level= 0;
3867                 prev_run= run;
3868                 run=0;
3869             }else{
3870                 run++;
3871             }
3872         }
3873 #ifdef REFINE_STATS
3874 STOP_TIMER("iterative step")}
3875 #endif
3876
3877         if(best_change){
3878             int j= perm_scantable[ best_coeff ];
3879
3880             block[j] += best_change;
3881
3882             if(best_coeff > last_non_zero){
3883                 last_non_zero= best_coeff;
3884                 assert(block[j]);
3885 #ifdef REFINE_STATS
3886 after_last++;
3887 #endif
3888             }else{
3889 #ifdef REFINE_STATS
3890 if(block[j]){
3891     if(block[j] - best_change){
3892         if(FFABS(block[j]) > FFABS(block[j] - best_change)){
3893             raise++;
3894         }else{
3895             lower++;
3896         }
3897     }else{
3898         from_zero++;
3899     }
3900 }else{
3901     to_zero++;
3902 }
3903 #endif
3904                 for(; last_non_zero>=start_i; last_non_zero--){
3905                     if(block[perm_scantable[last_non_zero]])
3906                         break;
3907                 }
3908             }
3909 #ifdef REFINE_STATS
3910 count++;
3911 if(256*256*256*64 % count == 0){
3912     printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
3913 }
3914 #endif
3915             run=0;
3916             rle_index=0;
3917             for(i=start_i; i<=last_non_zero; i++){
3918                 int j= perm_scantable[i];
3919                 const int level= block[j];
3920
3921                  if(level){
3922                      run_tab[rle_index++]=run;
3923                      run=0;
3924                  }else{
3925                      run++;
3926                  }
3927             }
3928
3929             s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
3930         }else{
3931             break;
3932         }
3933     }
3934 #ifdef REFINE_STATS
3935 if(last_non_zero>0){
3936 STOP_TIMER("iterative search")
3937 }
3938 }
3939 #endif
3940
3941     return last_non_zero;
3942 }
3943
3944 int ff_dct_quantize_c(MpegEncContext *s,
3945                         int16_t *block, int n,
3946                         int qscale, int *overflow)
3947 {
3948     int i, j, level, last_non_zero, q, start_i;
3949     const int *qmat;
3950     const uint8_t *scantable= s->intra_scantable.scantable;
3951     int bias;
3952     int max=0;
3953     unsigned int threshold1, threshold2;
3954
3955     s->dsp.fdct (block);
3956
3957     if(s->dct_error_sum)
3958         s->denoise_dct(s, block);
3959
3960     if (s->mb_intra) {
3961         if (!s->h263_aic) {
3962             if (n < 4)
3963                 q = s->y_dc_scale;
3964             else
3965                 q = s->c_dc_scale;
3966             q = q << 3;
3967         } else
3968             /* For AIC we skip quant/dequant of INTRADC */
3969             q = 1 << 3;
3970
3971         /* note: block[0] is assumed to be positive */
3972         block[0] = (block[0] + (q >> 1)) / q;
3973         start_i = 1;
3974         last_non_zero = 0;
3975         qmat = s->q_intra_matrix[qscale];
3976         bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3977     } else {
3978         start_i = 0;
3979         last_non_zero = -1;
3980         qmat = s->q_inter_matrix[qscale];
3981         bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3982     }
3983     threshold1= (1<<QMAT_SHIFT) - bias - 1;
3984     threshold2= (threshold1<<1);
3985     for(i=63;i>=start_i;i--) {
3986         j = scantable[i];
3987         level = block[j] * qmat[j];
3988
3989         if(((unsigned)(level+threshold1))>threshold2){
3990             last_non_zero = i;
3991             break;
3992         }else{
3993             block[j]=0;
3994         }
3995     }
3996     for(i=start_i; i<=last_non_zero; i++) {
3997         j = scantable[i];
3998         level = block[j] * qmat[j];
3999
4000 //        if(   bias+level >= (1<<QMAT_SHIFT)
4001 //           || bias-level >= (1<<QMAT_SHIFT)){
4002         if(((unsigned)(level+threshold1))>threshold2){
4003             if(level>0){
4004                 level= (bias + level)>>QMAT_SHIFT;
4005                 block[j]= level;
4006             }else{
4007                 level= (bias - level)>>QMAT_SHIFT;
4008                 block[j]= -level;
4009             }
4010             max |=level;
4011         }else{
4012             block[j]=0;
4013         }
4014     }
4015     *overflow= s->max_qcoeff < max; //overflow might have happened
4016
4017     /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4018     if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4019         ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4020
4021     return last_non_zero;
4022 }
4023
4024 #define OFFSET(x) offsetof(MpegEncContext, x)
4025 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4026 static const AVOption h263_options[] = {
4027     { "obmc",         "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4028     { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4029     { "mb_info",      "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4030     FF_MPV_COMMON_OPTS
4031     { NULL },
4032 };
4033
4034 static const AVClass h263_class = {
4035     .class_name = "H.263 encoder",
4036     .item_name  = av_default_item_name,
4037     .option     = h263_options,
4038     .version    = LIBAVUTIL_VERSION_INT,
4039 };
4040
4041 AVCodec ff_h263_encoder = {
4042     .name           = "h263",
4043     .long_name      = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4044     .type           = AVMEDIA_TYPE_VIDEO,
4045     .id             = AV_CODEC_ID_H263,
4046     .priv_data_size = sizeof(MpegEncContext),
4047     .init           = ff_MPV_encode_init,
4048     .encode2        = ff_MPV_encode_picture,
4049     .close          = ff_MPV_encode_end,
4050     .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4051     .priv_class     = &h263_class,
4052 };
4053
4054 static const AVOption h263p_options[] = {
4055     { "umv",        "Use unlimited motion vectors.",    OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4056     { "aiv",        "Use alternative inter VLC.",       OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4057     { "obmc",       "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4058     { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4059     FF_MPV_COMMON_OPTS
4060     { NULL },
4061 };
4062 static const AVClass h263p_class = {
4063     .class_name = "H.263p encoder",
4064     .item_name  = av_default_item_name,
4065     .option     = h263p_options,
4066     .version    = LIBAVUTIL_VERSION_INT,
4067 };
4068
4069 AVCodec ff_h263p_encoder = {
4070     .name           = "h263p",
4071     .long_name      = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4072     .type           = AVMEDIA_TYPE_VIDEO,
4073     .id             = AV_CODEC_ID_H263P,
4074     .priv_data_size = sizeof(MpegEncContext),
4075     .init           = ff_MPV_encode_init,
4076     .encode2        = ff_MPV_encode_picture,
4077     .close          = ff_MPV_encode_end,
4078     .capabilities   = CODEC_CAP_SLICE_THREADS,
4079     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4080     .priv_class     = &h263p_class,
4081 };
4082
4083 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4084
4085 AVCodec ff_msmpeg4v2_encoder = {
4086     .name           = "msmpeg4v2",
4087     .long_name      = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4088     .type           = AVMEDIA_TYPE_VIDEO,
4089     .id             = AV_CODEC_ID_MSMPEG4V2,
4090     .priv_data_size = sizeof(MpegEncContext),
4091     .init           = ff_MPV_encode_init,
4092     .encode2        = ff_MPV_encode_picture,
4093     .close          = ff_MPV_encode_end,
4094     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4095     .priv_class     = &msmpeg4v2_class,
4096 };
4097
4098 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4099
4100 AVCodec ff_msmpeg4v3_encoder = {
4101     .name           = "msmpeg4",
4102     .long_name      = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4103     .type           = AVMEDIA_TYPE_VIDEO,
4104     .id             = AV_CODEC_ID_MSMPEG4V3,
4105     .priv_data_size = sizeof(MpegEncContext),
4106     .init           = ff_MPV_encode_init,
4107     .encode2        = ff_MPV_encode_picture,
4108     .close          = ff_MPV_encode_end,
4109     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4110     .priv_class     = &msmpeg4v3_class,
4111 };
4112
4113 FF_MPV_GENERIC_CLASS(wmv1)
4114
4115 AVCodec ff_wmv1_encoder = {
4116     .name           = "wmv1",
4117     .long_name      = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4118     .type           = AVMEDIA_TYPE_VIDEO,
4119     .id             = AV_CODEC_ID_WMV1,
4120     .priv_data_size = sizeof(MpegEncContext),
4121     .init           = ff_MPV_encode_init,
4122     .encode2        = ff_MPV_encode_picture,
4123     .close          = ff_MPV_encode_end,
4124     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4125     .priv_class     = &wmv1_class,
4126 };