]> git.sesse.net Git - ffmpeg/blob - libavcodec/mpegvideo_enc.c
dsputil: Split off H.263 bits into their own H263DSPContext
[ffmpeg] / libavcodec / mpegvideo_enc.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29
30 #include "libavutil/internal.h"
31 #include "libavutil/intmath.h"
32 #include "libavutil/mathematics.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/opt.h"
35 #include "avcodec.h"
36 #include "dct.h"
37 #include "dsputil.h"
38 #include "mpeg12.h"
39 #include "mpegvideo.h"
40 #include "h261.h"
41 #include "h263.h"
42 #include "mathops.h"
43 #include "mjpegenc.h"
44 #include "msmpeg4.h"
45 #include "faandct.h"
46 #include "thread.h"
47 #include "aandcttab.h"
48 #include "flv.h"
49 #include "mpeg4video.h"
50 #include "internal.h"
51 #include "bytestream.h"
52 #include <limits.h>
53
54 static int encode_picture(MpegEncContext *s, int picture_number);
55 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
56 static int sse_mb(MpegEncContext *s);
57 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
58 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
59
60 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
61 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
62
63 const AVOption ff_mpv_generic_options[] = {
64     FF_MPV_COMMON_OPTS
65     { NULL },
66 };
67
68 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
69                        uint16_t (*qmat16)[2][64],
70                        const uint16_t *quant_matrix,
71                        int bias, int qmin, int qmax, int intra)
72 {
73     int qscale;
74     int shift = 0;
75
76     for (qscale = qmin; qscale <= qmax; qscale++) {
77         int i;
78         if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
79             dsp->fdct == ff_jpeg_fdct_islow_10 ||
80             dsp->fdct == ff_faandct) {
81             for (i = 0; i < 64; i++) {
82                 const int j = dsp->idct_permutation[i];
83                 /* 16 <= qscale * quant_matrix[i] <= 7905
84                  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
85                  *             19952 <=              x  <= 249205026
86                  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
87                  *           3444240 >= (1 << 36) / (x) >= 275 */
88
89                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
90                                         (qscale * quant_matrix[j]));
91             }
92         } else if (dsp->fdct == ff_fdct_ifast) {
93             for (i = 0; i < 64; i++) {
94                 const int j = dsp->idct_permutation[i];
95                 /* 16 <= qscale * quant_matrix[i] <= 7905
96                  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
97                  *             19952 <=              x  <= 249205026
98                  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
99                  *           3444240 >= (1 << 36) / (x) >= 275 */
100
101                 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
102                                         (ff_aanscales[i] * qscale *
103                                          quant_matrix[j]));
104             }
105         } else {
106             for (i = 0; i < 64; i++) {
107                 const int j = dsp->idct_permutation[i];
108                 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
109                  * Assume x = qscale * quant_matrix[i]
110                  * So             16 <=              x  <= 7905
111                  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
112                  * so          32768 >= (1 << 19) / (x) >= 67 */
113                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
114                                         (qscale * quant_matrix[j]));
115                 //qmat  [qscale][i] = (1 << QMAT_SHIFT_MMX) /
116                 //                    (qscale * quant_matrix[i]);
117                 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
118                                        (qscale * quant_matrix[j]);
119
120                 if (qmat16[qscale][0][i] == 0 ||
121                     qmat16[qscale][0][i] == 128 * 256)
122                     qmat16[qscale][0][i] = 128 * 256 - 1;
123                 qmat16[qscale][1][i] =
124                     ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
125                                 qmat16[qscale][0][i]);
126             }
127         }
128
129         for (i = intra; i < 64; i++) {
130             int64_t max = 8191;
131             if (dsp->fdct == ff_fdct_ifast) {
132                 max = (8191LL * ff_aanscales[i]) >> 14;
133             }
134             while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
135                 shift++;
136             }
137         }
138     }
139     if (shift) {
140         av_log(NULL, AV_LOG_INFO,
141                "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
142                QMAT_SHIFT - shift);
143     }
144 }
145
146 static inline void update_qscale(MpegEncContext *s)
147 {
148     s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
149                 (FF_LAMBDA_SHIFT + 7);
150     s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
151
152     s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
153                  FF_LAMBDA_SHIFT;
154 }
155
156 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
157 {
158     int i;
159
160     if (matrix) {
161         put_bits(pb, 1, 1);
162         for (i = 0; i < 64; i++) {
163             put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
164         }
165     } else
166         put_bits(pb, 1, 0);
167 }
168
169 /**
170  * init s->current_picture.qscale_table from s->lambda_table
171  */
172 void ff_init_qscale_tab(MpegEncContext *s)
173 {
174     int8_t * const qscale_table = s->current_picture.qscale_table;
175     int i;
176
177     for (i = 0; i < s->mb_num; i++) {
178         unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
179         int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
180         qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
181                                                   s->avctx->qmax);
182     }
183 }
184
185 static void update_duplicate_context_after_me(MpegEncContext *dst,
186                                               MpegEncContext *src)
187 {
188 #define COPY(a) dst->a= src->a
189     COPY(pict_type);
190     COPY(current_picture);
191     COPY(f_code);
192     COPY(b_code);
193     COPY(qscale);
194     COPY(lambda);
195     COPY(lambda2);
196     COPY(picture_in_gop_number);
197     COPY(gop_picture_number);
198     COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
199     COPY(progressive_frame);    // FIXME don't set in encode_header
200     COPY(partitioned_frame);    // FIXME don't set in encode_header
201 #undef COPY
202 }
203
204 /**
205  * Set the given MpegEncContext to defaults for encoding.
206  * the changed fields will not depend upon the prior state of the MpegEncContext.
207  */
208 static void MPV_encode_defaults(MpegEncContext *s)
209 {
210     int i;
211     ff_MPV_common_defaults(s);
212
213     for (i = -16; i < 16; i++) {
214         default_fcode_tab[i + MAX_MV] = 1;
215     }
216     s->me.mv_penalty = default_mv_penalty;
217     s->fcode_tab     = default_fcode_tab;
218 }
219
220 /* init video encoder */
221 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
222 {
223     MpegEncContext *s = avctx->priv_data;
224     int i;
225     int chroma_h_shift, chroma_v_shift;
226
227     MPV_encode_defaults(s);
228
229     switch (avctx->codec_id) {
230     case AV_CODEC_ID_MPEG2VIDEO:
231         if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
232             avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
233             av_log(avctx, AV_LOG_ERROR,
234                    "only YUV420 and YUV422 are supported\n");
235             return -1;
236         }
237         break;
238     case AV_CODEC_ID_LJPEG:
239         if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
240             avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
241             avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
242             avctx->pix_fmt != AV_PIX_FMT_BGRA     &&
243             ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
244               avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
245               avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
246              avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
247             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
248             return -1;
249         }
250         break;
251     case AV_CODEC_ID_MJPEG:
252         if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
253             avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
254             ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
255               avctx->pix_fmt != AV_PIX_FMT_YUV422P) ||
256              avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
257             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
258             return -1;
259         }
260         break;
261     default:
262         if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
263             av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
264             return -1;
265         }
266     }
267
268     switch (avctx->pix_fmt) {
269     case AV_PIX_FMT_YUVJ422P:
270     case AV_PIX_FMT_YUV422P:
271         s->chroma_format = CHROMA_422;
272         break;
273     case AV_PIX_FMT_YUVJ420P:
274     case AV_PIX_FMT_YUV420P:
275     default:
276         s->chroma_format = CHROMA_420;
277         break;
278     }
279
280     s->bit_rate = avctx->bit_rate;
281     s->width    = avctx->width;
282     s->height   = avctx->height;
283     if (avctx->gop_size > 600 &&
284         avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
285         av_log(avctx, AV_LOG_ERROR,
286                "Warning keyframe interval too large! reducing it ...\n");
287         avctx->gop_size = 600;
288     }
289     s->gop_size     = avctx->gop_size;
290     s->avctx        = avctx;
291     s->flags        = avctx->flags;
292     s->flags2       = avctx->flags2;
293     s->max_b_frames = avctx->max_b_frames;
294     s->codec_id     = avctx->codec->id;
295     s->strict_std_compliance = avctx->strict_std_compliance;
296     s->quarter_sample     = (avctx->flags & CODEC_FLAG_QPEL) != 0;
297     s->mpeg_quant         = avctx->mpeg_quant;
298     s->rtp_mode           = !!avctx->rtp_payload_size;
299     s->intra_dc_precision = avctx->intra_dc_precision;
300     s->user_specified_pts = AV_NOPTS_VALUE;
301
302     if (s->gop_size <= 1) {
303         s->intra_only = 1;
304         s->gop_size   = 12;
305     } else {
306         s->intra_only = 0;
307     }
308
309     s->me_method = avctx->me_method;
310
311     /* Fixed QSCALE */
312     s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
313
314     s->adaptive_quant = (s->avctx->lumi_masking ||
315                          s->avctx->dark_masking ||
316                          s->avctx->temporal_cplx_masking ||
317                          s->avctx->spatial_cplx_masking  ||
318                          s->avctx->p_masking      ||
319                          s->avctx->border_masking ||
320                          (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
321                         !s->fixed_qscale;
322
323     s->loop_filter      = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
324
325     if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
326         av_log(avctx, AV_LOG_ERROR,
327                "a vbv buffer size is needed, "
328                "for encoding with a maximum bitrate\n");
329         return -1;
330     }
331
332     if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
333         av_log(avctx, AV_LOG_INFO,
334                "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
335     }
336
337     if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
338         av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
339         return -1;
340     }
341
342     if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
343         av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
344         return -1;
345     }
346
347     if (avctx->rc_max_rate &&
348         avctx->rc_max_rate == avctx->bit_rate &&
349         avctx->rc_max_rate != avctx->rc_min_rate) {
350         av_log(avctx, AV_LOG_INFO,
351                "impossible bitrate constraints, this will fail\n");
352     }
353
354     if (avctx->rc_buffer_size &&
355         avctx->bit_rate * (int64_t)avctx->time_base.num >
356             avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
357         av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
358         return -1;
359     }
360
361     if (!s->fixed_qscale &&
362         avctx->bit_rate * av_q2d(avctx->time_base) >
363             avctx->bit_rate_tolerance) {
364         av_log(avctx, AV_LOG_ERROR,
365                "bitrate tolerance too small for bitrate\n");
366         return -1;
367     }
368
369     if (s->avctx->rc_max_rate &&
370         s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
371         (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
372          s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
373         90000LL * (avctx->rc_buffer_size - 1) >
374             s->avctx->rc_max_rate * 0xFFFFLL) {
375         av_log(avctx, AV_LOG_INFO,
376                "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
377                "specified vbv buffer is too large for the given bitrate!\n");
378     }
379
380     if ((s->flags & CODEC_FLAG_4MV)  && s->codec_id != AV_CODEC_ID_MPEG4 &&
381         s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
382         s->codec_id != AV_CODEC_ID_FLV1) {
383         av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
384         return -1;
385     }
386
387     if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
388         av_log(avctx, AV_LOG_ERROR,
389                "OBMC is only supported with simple mb decision\n");
390         return -1;
391     }
392
393     if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
394         av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
395         return -1;
396     }
397
398     if (s->max_b_frames                    &&
399         s->codec_id != AV_CODEC_ID_MPEG4      &&
400         s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
401         s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
402         av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
403         return -1;
404     }
405
406     if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
407          s->codec_id == AV_CODEC_ID_H263  ||
408          s->codec_id == AV_CODEC_ID_H263P) &&
409         (avctx->sample_aspect_ratio.num > 255 ||
410          avctx->sample_aspect_ratio.den > 255)) {
411         av_log(avctx, AV_LOG_ERROR,
412                "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
413                avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
414         return -1;
415     }
416
417     if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
418         s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
419         av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
420         return -1;
421     }
422
423     // FIXME mpeg2 uses that too
424     if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
425         av_log(avctx, AV_LOG_ERROR,
426                "mpeg2 style quantization not supported by codec\n");
427         return -1;
428     }
429
430     if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
431         av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
432         return -1;
433     }
434
435     if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
436         s->avctx->mb_decision != FF_MB_DECISION_RD) {
437         av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
438         return -1;
439     }
440
441     if (s->avctx->scenechange_threshold < 1000000000 &&
442         (s->flags & CODEC_FLAG_CLOSED_GOP)) {
443         av_log(avctx, AV_LOG_ERROR,
444                "closed gop with scene change detection are not supported yet, "
445                "set threshold to 1000000000\n");
446         return -1;
447     }
448
449     if (s->flags & CODEC_FLAG_LOW_DELAY) {
450         if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
451             av_log(avctx, AV_LOG_ERROR,
452                   "low delay forcing is only available for mpeg2\n");
453             return -1;
454         }
455         if (s->max_b_frames != 0) {
456             av_log(avctx, AV_LOG_ERROR,
457                    "b frames cannot be used with low delay\n");
458             return -1;
459         }
460     }
461
462     if (s->q_scale_type == 1) {
463         if (avctx->qmax > 12) {
464             av_log(avctx, AV_LOG_ERROR,
465                    "non linear quant only supports qmax <= 12 currently\n");
466             return -1;
467         }
468     }
469
470     if (s->avctx->thread_count > 1         &&
471         s->codec_id != AV_CODEC_ID_MPEG4      &&
472         s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
473         s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
474         (s->codec_id != AV_CODEC_ID_H263P)) {
475         av_log(avctx, AV_LOG_ERROR,
476                "multi threaded encoding not supported by codec\n");
477         return -1;
478     }
479
480     if (s->avctx->thread_count < 1) {
481         av_log(avctx, AV_LOG_ERROR,
482                "automatic thread number detection not supported by codec,"
483                "patch welcome\n");
484         return -1;
485     }
486
487     if (s->avctx->thread_count > 1)
488         s->rtp_mode = 1;
489
490     if (!avctx->time_base.den || !avctx->time_base.num) {
491         av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
492         return -1;
493     }
494
495     i = (INT_MAX / 2 + 128) >> 8;
496     if (avctx->mb_threshold >= i) {
497         av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
498                i - 1);
499         return -1;
500     }
501
502     if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
503         av_log(avctx, AV_LOG_INFO,
504                "notice: b_frame_strategy only affects the first pass\n");
505         avctx->b_frame_strategy = 0;
506     }
507
508     i = av_gcd(avctx->time_base.den, avctx->time_base.num);
509     if (i > 1) {
510         av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
511         avctx->time_base.den /= i;
512         avctx->time_base.num /= i;
513         //return -1;
514     }
515
516     if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
517         s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
518         // (a + x * 3 / 8) / x
519         s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
520         s->inter_quant_bias = 0;
521     } else {
522         s->intra_quant_bias = 0;
523         // (a - x / 4) / x
524         s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
525     }
526
527     if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
528         s->intra_quant_bias = avctx->intra_quant_bias;
529     if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
530         s->inter_quant_bias = avctx->inter_quant_bias;
531
532     av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
533                                      &chroma_v_shift);
534
535     if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
536         s->avctx->time_base.den > (1 << 16) - 1) {
537         av_log(avctx, AV_LOG_ERROR,
538                "timebase %d/%d not supported by MPEG 4 standard, "
539                "the maximum admitted value for the timebase denominator "
540                "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
541                (1 << 16) - 1);
542         return -1;
543     }
544     s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
545
546     switch (avctx->codec->id) {
547     case AV_CODEC_ID_MPEG1VIDEO:
548         s->out_format = FMT_MPEG1;
549         s->low_delay  = !!(s->flags & CODEC_FLAG_LOW_DELAY);
550         avctx->delay  = s->low_delay ? 0 : (s->max_b_frames + 1);
551         break;
552     case AV_CODEC_ID_MPEG2VIDEO:
553         s->out_format = FMT_MPEG1;
554         s->low_delay  = !!(s->flags & CODEC_FLAG_LOW_DELAY);
555         avctx->delay  = s->low_delay ? 0 : (s->max_b_frames + 1);
556         s->rtp_mode   = 1;
557         break;
558     case AV_CODEC_ID_LJPEG:
559     case AV_CODEC_ID_MJPEG:
560         s->out_format = FMT_MJPEG;
561         s->intra_only = 1; /* force intra only for jpeg */
562         if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
563             avctx->pix_fmt   == AV_PIX_FMT_BGRA) {
564             s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
565             s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
566             s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
567         } else {
568             s->mjpeg_vsample[0] = 2;
569             s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
570             s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
571             s->mjpeg_hsample[0] = 2;
572             s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
573             s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
574         }
575         if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
576             ff_mjpeg_encode_init(s) < 0)
577             return -1;
578         avctx->delay = 0;
579         s->low_delay = 1;
580         break;
581     case AV_CODEC_ID_H261:
582         if (!CONFIG_H261_ENCODER)
583             return -1;
584         if (ff_h261_get_picture_format(s->width, s->height) < 0) {
585             av_log(avctx, AV_LOG_ERROR,
586                    "The specified picture size of %dx%d is not valid for the "
587                    "H.261 codec.\nValid sizes are 176x144, 352x288\n",
588                     s->width, s->height);
589             return -1;
590         }
591         s->out_format = FMT_H261;
592         avctx->delay  = 0;
593         s->low_delay  = 1;
594         break;
595     case AV_CODEC_ID_H263:
596         if (!CONFIG_H263_ENCODER)
597         return -1;
598         if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
599                              s->width, s->height) == 8) {
600             av_log(avctx, AV_LOG_INFO,
601                    "The specified picture size of %dx%d is not valid for "
602                    "the H.263 codec.\nValid sizes are 128x96, 176x144, "
603                    "352x288, 704x576, and 1408x1152."
604                    "Try H.263+.\n", s->width, s->height);
605             return -1;
606         }
607         s->out_format = FMT_H263;
608         avctx->delay  = 0;
609         s->low_delay  = 1;
610         break;
611     case AV_CODEC_ID_H263P:
612         s->out_format = FMT_H263;
613         s->h263_plus  = 1;
614         /* Fx */
615         s->h263_aic        = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
616         s->modified_quant  = s->h263_aic;
617         s->loop_filter     = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
618         s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
619
620         /* /Fx */
621         /* These are just to be sure */
622         avctx->delay = 0;
623         s->low_delay = 1;
624         break;
625     case AV_CODEC_ID_FLV1:
626         s->out_format      = FMT_H263;
627         s->h263_flv        = 2; /* format = 1; 11-bit codes */
628         s->unrestricted_mv = 1;
629         s->rtp_mode  = 0; /* don't allow GOB */
630         avctx->delay = 0;
631         s->low_delay = 1;
632         break;
633     case AV_CODEC_ID_RV10:
634         s->out_format = FMT_H263;
635         avctx->delay  = 0;
636         s->low_delay  = 1;
637         break;
638     case AV_CODEC_ID_RV20:
639         s->out_format      = FMT_H263;
640         avctx->delay       = 0;
641         s->low_delay       = 1;
642         s->modified_quant  = 1;
643         s->h263_aic        = 1;
644         s->h263_plus       = 1;
645         s->loop_filter     = 1;
646         s->unrestricted_mv = 0;
647         break;
648     case AV_CODEC_ID_MPEG4:
649         s->out_format      = FMT_H263;
650         s->h263_pred       = 1;
651         s->unrestricted_mv = 1;
652         s->low_delay       = s->max_b_frames ? 0 : 1;
653         avctx->delay       = s->low_delay ? 0 : (s->max_b_frames + 1);
654         break;
655     case AV_CODEC_ID_MSMPEG4V2:
656         s->out_format      = FMT_H263;
657         s->h263_pred       = 1;
658         s->unrestricted_mv = 1;
659         s->msmpeg4_version = 2;
660         avctx->delay       = 0;
661         s->low_delay       = 1;
662         break;
663     case AV_CODEC_ID_MSMPEG4V3:
664         s->out_format        = FMT_H263;
665         s->h263_pred         = 1;
666         s->unrestricted_mv   = 1;
667         s->msmpeg4_version   = 3;
668         s->flipflop_rounding = 1;
669         avctx->delay         = 0;
670         s->low_delay         = 1;
671         break;
672     case AV_CODEC_ID_WMV1:
673         s->out_format        = FMT_H263;
674         s->h263_pred         = 1;
675         s->unrestricted_mv   = 1;
676         s->msmpeg4_version   = 4;
677         s->flipflop_rounding = 1;
678         avctx->delay         = 0;
679         s->low_delay         = 1;
680         break;
681     case AV_CODEC_ID_WMV2:
682         s->out_format        = FMT_H263;
683         s->h263_pred         = 1;
684         s->unrestricted_mv   = 1;
685         s->msmpeg4_version   = 5;
686         s->flipflop_rounding = 1;
687         avctx->delay         = 0;
688         s->low_delay         = 1;
689         break;
690     default:
691         return -1;
692     }
693
694     avctx->has_b_frames = !s->low_delay;
695
696     s->encoding = 1;
697
698     s->progressive_frame    =
699     s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
700                                                 CODEC_FLAG_INTERLACED_ME) ||
701                                 s->alternate_scan);
702
703     /* init */
704     if (ff_MPV_common_init(s) < 0)
705         return -1;
706
707     if (ARCH_X86)
708         ff_MPV_encode_init_x86(s);
709
710     ff_h263dsp_init(&s->h263dsp);
711     if (!s->dct_quantize)
712         s->dct_quantize = ff_dct_quantize_c;
713     if (!s->denoise_dct)
714         s->denoise_dct  = denoise_dct_c;
715     s->fast_dct_quantize = s->dct_quantize;
716     if (avctx->trellis)
717         s->dct_quantize  = dct_quantize_trellis_c;
718
719     if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
720         s->chroma_qscale_table = ff_h263_chroma_qscale_table;
721
722     s->quant_precision = 5;
723
724     ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
725     ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
726
727     if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
728         ff_h261_encode_init(s);
729     if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
730         ff_h263_encode_init(s);
731     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
732         ff_msmpeg4_encode_init(s);
733     if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
734         && s->out_format == FMT_MPEG1)
735         ff_mpeg1_encode_init(s);
736
737     /* init q matrix */
738     for (i = 0; i < 64; i++) {
739         int j = s->dsp.idct_permutation[i];
740         if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
741             s->mpeg_quant) {
742             s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
743             s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
744         } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
745             s->intra_matrix[j] =
746             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
747         } else {
748             /* mpeg1/2 */
749             s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
750             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
751         }
752         if (s->avctx->intra_matrix)
753             s->intra_matrix[j] = s->avctx->intra_matrix[i];
754         if (s->avctx->inter_matrix)
755             s->inter_matrix[j] = s->avctx->inter_matrix[i];
756     }
757
758     /* precompute matrix */
759     /* for mjpeg, we do include qscale in the matrix */
760     if (s->out_format != FMT_MJPEG) {
761         ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
762                           s->intra_matrix, s->intra_quant_bias, avctx->qmin,
763                           31, 1);
764         ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
765                           s->inter_matrix, s->inter_quant_bias, avctx->qmin,
766                           31, 0);
767     }
768
769     if (ff_rate_control_init(s) < 0)
770         return -1;
771
772     return 0;
773 }
774
775 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
776 {
777     MpegEncContext *s = avctx->priv_data;
778
779     ff_rate_control_uninit(s);
780
781     ff_MPV_common_end(s);
782     if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
783         s->out_format == FMT_MJPEG)
784         ff_mjpeg_encode_close(s);
785
786     av_freep(&avctx->extradata);
787
788     return 0;
789 }
790
791 static int get_sae(uint8_t *src, int ref, int stride)
792 {
793     int x,y;
794     int acc = 0;
795
796     for (y = 0; y < 16; y++) {
797         for (x = 0; x < 16; x++) {
798             acc += FFABS(src[x + y * stride] - ref);
799         }
800     }
801
802     return acc;
803 }
804
805 static int get_intra_count(MpegEncContext *s, uint8_t *src,
806                            uint8_t *ref, int stride)
807 {
808     int x, y, w, h;
809     int acc = 0;
810
811     w = s->width  & ~15;
812     h = s->height & ~15;
813
814     for (y = 0; y < h; y += 16) {
815         for (x = 0; x < w; x += 16) {
816             int offset = x + y * stride;
817             int sad  = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
818                                      16);
819             int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
820             int sae  = get_sae(src + offset, mean, stride);
821
822             acc += sae + 500 < sad;
823         }
824     }
825     return acc;
826 }
827
828
829 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
830 {
831     Picture *pic = NULL;
832     int64_t pts;
833     int i, display_picture_number = 0, ret;
834     const int encoding_delay = s->max_b_frames ? s->max_b_frames :
835                                                  (s->low_delay ? 0 : 1);
836     int direct = 1;
837
838     if (pic_arg) {
839         pts = pic_arg->pts;
840         display_picture_number = s->input_picture_number++;
841
842         if (pts != AV_NOPTS_VALUE) {
843             if (s->user_specified_pts != AV_NOPTS_VALUE) {
844                 int64_t time = pts;
845                 int64_t last = s->user_specified_pts;
846
847                 if (time <= last) {
848                     av_log(s->avctx, AV_LOG_ERROR,
849                            "Error, Invalid timestamp=%"PRId64", "
850                            "last=%"PRId64"\n", pts, s->user_specified_pts);
851                     return -1;
852                 }
853
854                 if (!s->low_delay && display_picture_number == 1)
855                     s->dts_delta = time - last;
856             }
857             s->user_specified_pts = pts;
858         } else {
859             if (s->user_specified_pts != AV_NOPTS_VALUE) {
860                 s->user_specified_pts =
861                 pts = s->user_specified_pts + 1;
862                 av_log(s->avctx, AV_LOG_INFO,
863                        "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
864                        pts);
865             } else {
866                 pts = display_picture_number;
867             }
868         }
869     }
870
871     if (pic_arg) {
872         if (!pic_arg->buf[0]);
873             direct = 0;
874         if (pic_arg->linesize[0] != s->linesize)
875             direct = 0;
876         if (pic_arg->linesize[1] != s->uvlinesize)
877             direct = 0;
878         if (pic_arg->linesize[2] != s->uvlinesize)
879             direct = 0;
880
881         av_dlog(s->avctx, "%d %d %td %td\n", pic_arg->linesize[0],
882                 pic_arg->linesize[1], s->linesize, s->uvlinesize);
883
884         if (direct) {
885             i = ff_find_unused_picture(s, 1);
886             if (i < 0)
887                 return i;
888
889             pic = &s->picture[i];
890             pic->reference = 3;
891
892             if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0)
893                 return ret;
894             if (ff_alloc_picture(s, pic, 1) < 0) {
895                 return -1;
896             }
897         } else {
898             i = ff_find_unused_picture(s, 0);
899             if (i < 0)
900                 return i;
901
902             pic = &s->picture[i];
903             pic->reference = 3;
904
905             if (ff_alloc_picture(s, pic, 0) < 0) {
906                 return -1;
907             }
908
909             if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
910                 pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
911                 pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
912                 // empty
913             } else {
914                 int h_chroma_shift, v_chroma_shift;
915                 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
916                                                  &h_chroma_shift,
917                                                  &v_chroma_shift);
918
919                 for (i = 0; i < 3; i++) {
920                     int src_stride = pic_arg->linesize[i];
921                     int dst_stride = i ? s->uvlinesize : s->linesize;
922                     int h_shift = i ? h_chroma_shift : 0;
923                     int v_shift = i ? v_chroma_shift : 0;
924                     int w = s->width  >> h_shift;
925                     int h = s->height >> v_shift;
926                     uint8_t *src = pic_arg->data[i];
927                     uint8_t *dst = pic->f.data[i];
928
929                     if (!s->avctx->rc_buffer_size)
930                         dst += INPLACE_OFFSET;
931
932                     if (src_stride == dst_stride)
933                         memcpy(dst, src, src_stride * h);
934                     else {
935                         while (h--) {
936                             memcpy(dst, src, w);
937                             dst += dst_stride;
938                             src += src_stride;
939                         }
940                     }
941                 }
942             }
943         }
944         ret = av_frame_copy_props(&pic->f, pic_arg);
945         if (ret < 0)
946             return ret;
947
948         pic->f.display_picture_number = display_picture_number;
949         pic->f.pts = pts; // we set this here to avoid modifiying pic_arg
950     }
951
952     /* shift buffer entries */
953     for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
954         s->input_picture[i - 1] = s->input_picture[i];
955
956     s->input_picture[encoding_delay] = (Picture*) pic;
957
958     return 0;
959 }
960
961 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
962 {
963     int x, y, plane;
964     int score = 0;
965     int64_t score64 = 0;
966
967     for (plane = 0; plane < 3; plane++) {
968         const int stride = p->f.linesize[plane];
969         const int bw = plane ? 1 : 2;
970         for (y = 0; y < s->mb_height * bw; y++) {
971             for (x = 0; x < s->mb_width * bw; x++) {
972                 int off = p->shared ? 0 : 16;
973                 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
974                 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
975                 int v   = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
976
977                 switch (s->avctx->frame_skip_exp) {
978                 case 0: score    =  FFMAX(score, v);          break;
979                 case 1: score   += FFABS(v);                  break;
980                 case 2: score   += v * v;                     break;
981                 case 3: score64 += FFABS(v * v * (int64_t)v); break;
982                 case 4: score64 += v * v * (int64_t)(v * v);  break;
983                 }
984             }
985         }
986     }
987
988     if (score)
989         score64 = score;
990
991     if (score64 < s->avctx->frame_skip_threshold)
992         return 1;
993     if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
994         return 1;
995     return 0;
996 }
997
998 static int encode_frame(AVCodecContext *c, AVFrame *frame)
999 {
1000     AVPacket pkt = { 0 };
1001     int ret, got_output;
1002
1003     av_init_packet(&pkt);
1004     ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1005     if (ret < 0)
1006         return ret;
1007
1008     ret = pkt.size;
1009     av_free_packet(&pkt);
1010     return ret;
1011 }
1012
1013 static int estimate_best_b_count(MpegEncContext *s)
1014 {
1015     AVCodec *codec    = avcodec_find_encoder(s->avctx->codec_id);
1016     AVCodecContext *c = avcodec_alloc_context3(NULL);
1017     AVFrame input[FF_MAX_B_FRAMES + 2];
1018     const int scale = s->avctx->brd_scale;
1019     int i, j, out_size, p_lambda, b_lambda, lambda2;
1020     int64_t best_rd  = INT64_MAX;
1021     int best_b_count = -1;
1022
1023     assert(scale >= 0 && scale <= 3);
1024
1025     //emms_c();
1026     //s->next_picture_ptr->quality;
1027     p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1028     //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1029     b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1030     if (!b_lambda) // FIXME we should do this somewhere else
1031         b_lambda = p_lambda;
1032     lambda2  = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1033                FF_LAMBDA_SHIFT;
1034
1035     c->width        = s->width  >> scale;
1036     c->height       = s->height >> scale;
1037     c->flags        = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1038                       CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1039     c->flags       |= s->avctx->flags & CODEC_FLAG_QPEL;
1040     c->mb_decision  = s->avctx->mb_decision;
1041     c->me_cmp       = s->avctx->me_cmp;
1042     c->mb_cmp       = s->avctx->mb_cmp;
1043     c->me_sub_cmp   = s->avctx->me_sub_cmp;
1044     c->pix_fmt      = AV_PIX_FMT_YUV420P;
1045     c->time_base    = s->avctx->time_base;
1046     c->max_b_frames = s->max_b_frames;
1047
1048     if (avcodec_open2(c, codec, NULL) < 0)
1049         return -1;
1050
1051     for (i = 0; i < s->max_b_frames + 2; i++) {
1052         int ysize = c->width * c->height;
1053         int csize = (c->width / 2) * (c->height / 2);
1054         Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1055                                                 s->next_picture_ptr;
1056
1057         avcodec_get_frame_defaults(&input[i]);
1058         input[i].data[0]     = av_malloc(ysize + 2 * csize);
1059         input[i].data[1]     = input[i].data[0] + ysize;
1060         input[i].data[2]     = input[i].data[1] + csize;
1061         input[i].linesize[0] = c->width;
1062         input[i].linesize[1] =
1063         input[i].linesize[2] = c->width / 2;
1064
1065         if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1066             pre_input = *pre_input_ptr;
1067
1068             if (!pre_input.shared && i) {
1069                 pre_input.f.data[0] += INPLACE_OFFSET;
1070                 pre_input.f.data[1] += INPLACE_OFFSET;
1071                 pre_input.f.data[2] += INPLACE_OFFSET;
1072             }
1073
1074             s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
1075                                  pre_input.f.data[0], pre_input.f.linesize[0],
1076                                  c->width,      c->height);
1077             s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
1078                                  pre_input.f.data[1], pre_input.f.linesize[1],
1079                                  c->width >> 1, c->height >> 1);
1080             s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
1081                                  pre_input.f.data[2], pre_input.f.linesize[2],
1082                                  c->width >> 1, c->height >> 1);
1083         }
1084     }
1085
1086     for (j = 0; j < s->max_b_frames + 1; j++) {
1087         int64_t rd = 0;
1088
1089         if (!s->input_picture[j])
1090             break;
1091
1092         c->error[0] = c->error[1] = c->error[2] = 0;
1093
1094         input[0].pict_type = AV_PICTURE_TYPE_I;
1095         input[0].quality   = 1 * FF_QP2LAMBDA;
1096
1097         out_size = encode_frame(c, &input[0]);
1098
1099         //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1100
1101         for (i = 0; i < s->max_b_frames + 1; i++) {
1102             int is_p = i % (j + 1) == j || i == s->max_b_frames;
1103
1104             input[i + 1].pict_type = is_p ?
1105                                      AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1106             input[i + 1].quality   = is_p ? p_lambda : b_lambda;
1107
1108             out_size = encode_frame(c, &input[i + 1]);
1109
1110             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1111         }
1112
1113         /* get the delayed frames */
1114         while (out_size) {
1115             out_size = encode_frame(c, NULL);
1116             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1117         }
1118
1119         rd += c->error[0] + c->error[1] + c->error[2];
1120
1121         if (rd < best_rd) {
1122             best_rd = rd;
1123             best_b_count = j;
1124         }
1125     }
1126
1127     avcodec_close(c);
1128     av_freep(&c);
1129
1130     for (i = 0; i < s->max_b_frames + 2; i++) {
1131         av_freep(&input[i].data[0]);
1132     }
1133
1134     return best_b_count;
1135 }
1136
1137 static int select_input_picture(MpegEncContext *s)
1138 {
1139     int i, ret;
1140
1141     for (i = 1; i < MAX_PICTURE_COUNT; i++)
1142         s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1143     s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1144
1145     /* set next picture type & ordering */
1146     if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1147         if (/*s->picture_in_gop_number >= s->gop_size ||*/
1148             s->next_picture_ptr == NULL || s->intra_only) {
1149             s->reordered_input_picture[0] = s->input_picture[0];
1150             s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1151             s->reordered_input_picture[0]->f.coded_picture_number =
1152                 s->coded_picture_number++;
1153         } else {
1154             int b_frames;
1155
1156             if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1157                 if (s->picture_in_gop_number < s->gop_size &&
1158                     skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1159                     // FIXME check that te gop check above is +-1 correct
1160                     av_frame_unref(&s->input_picture[0]->f);
1161
1162                     emms_c();
1163                     ff_vbv_update(s, 0);
1164
1165                     goto no_output_pic;
1166                 }
1167             }
1168
1169             if (s->flags & CODEC_FLAG_PASS2) {
1170                 for (i = 0; i < s->max_b_frames + 1; i++) {
1171                     int pict_num = s->input_picture[0]->f.display_picture_number + i;
1172
1173                     if (pict_num >= s->rc_context.num_entries)
1174                         break;
1175                     if (!s->input_picture[i]) {
1176                         s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1177                         break;
1178                     }
1179
1180                     s->input_picture[i]->f.pict_type =
1181                         s->rc_context.entry[pict_num].new_pict_type;
1182                 }
1183             }
1184
1185             if (s->avctx->b_frame_strategy == 0) {
1186                 b_frames = s->max_b_frames;
1187                 while (b_frames && !s->input_picture[b_frames])
1188                     b_frames--;
1189             } else if (s->avctx->b_frame_strategy == 1) {
1190                 for (i = 1; i < s->max_b_frames + 1; i++) {
1191                     if (s->input_picture[i] &&
1192                         s->input_picture[i]->b_frame_score == 0) {
1193                         s->input_picture[i]->b_frame_score =
1194                             get_intra_count(s,
1195                                             s->input_picture[i    ]->f.data[0],
1196                                             s->input_picture[i - 1]->f.data[0],
1197                                             s->linesize) + 1;
1198                     }
1199                 }
1200                 for (i = 0; i < s->max_b_frames + 1; i++) {
1201                     if (s->input_picture[i] == NULL ||
1202                         s->input_picture[i]->b_frame_score - 1 >
1203                             s->mb_num / s->avctx->b_sensitivity)
1204                         break;
1205                 }
1206
1207                 b_frames = FFMAX(0, i - 1);
1208
1209                 /* reset scores */
1210                 for (i = 0; i < b_frames + 1; i++) {
1211                     s->input_picture[i]->b_frame_score = 0;
1212                 }
1213             } else if (s->avctx->b_frame_strategy == 2) {
1214                 b_frames = estimate_best_b_count(s);
1215             } else {
1216                 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1217                 b_frames = 0;
1218             }
1219
1220             emms_c();
1221
1222             for (i = b_frames - 1; i >= 0; i--) {
1223                 int type = s->input_picture[i]->f.pict_type;
1224                 if (type && type != AV_PICTURE_TYPE_B)
1225                     b_frames = i;
1226             }
1227             if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1228                 b_frames == s->max_b_frames) {
1229                 av_log(s->avctx, AV_LOG_ERROR,
1230                        "warning, too many b frames in a row\n");
1231             }
1232
1233             if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1234                 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1235                     s->gop_size > s->picture_in_gop_number) {
1236                     b_frames = s->gop_size - s->picture_in_gop_number - 1;
1237                 } else {
1238                     if (s->flags & CODEC_FLAG_CLOSED_GOP)
1239                         b_frames = 0;
1240                     s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1241                 }
1242             }
1243
1244             if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1245                 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1246                 b_frames--;
1247
1248             s->reordered_input_picture[0] = s->input_picture[b_frames];
1249             if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1250                 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1251             s->reordered_input_picture[0]->f.coded_picture_number =
1252                 s->coded_picture_number++;
1253             for (i = 0; i < b_frames; i++) {
1254                 s->reordered_input_picture[i + 1] = s->input_picture[i];
1255                 s->reordered_input_picture[i + 1]->f.pict_type =
1256                     AV_PICTURE_TYPE_B;
1257                 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1258                     s->coded_picture_number++;
1259             }
1260         }
1261     }
1262 no_output_pic:
1263     if (s->reordered_input_picture[0]) {
1264         s->reordered_input_picture[0]->reference =
1265            s->reordered_input_picture[0]->f.pict_type !=
1266                AV_PICTURE_TYPE_B ? 3 : 0;
1267
1268         ff_mpeg_unref_picture(s, &s->new_picture);
1269         if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, s->reordered_input_picture[0])))
1270             return ret;
1271
1272         if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1273             // input is a shared pix, so we can't modifiy it -> alloc a new
1274             // one & ensure that the shared one is reuseable
1275
1276             Picture *pic;
1277             int i = ff_find_unused_picture(s, 0);
1278             if (i < 0)
1279                 return i;
1280             pic = &s->picture[i];
1281
1282             pic->reference = s->reordered_input_picture[0]->reference;
1283             if (ff_alloc_picture(s, pic, 0) < 0) {
1284                 return -1;
1285             }
1286
1287             ret = av_frame_copy_props(&pic->f, &s->reordered_input_picture[0]->f);
1288             if (ret < 0)
1289                 return ret;
1290
1291             /* mark us unused / free shared pic */
1292             av_frame_unref(&s->reordered_input_picture[0]->f);
1293             s->reordered_input_picture[0]->shared = 0;
1294
1295             s->current_picture_ptr = pic;
1296         } else {
1297             // input is not a shared pix -> reuse buffer for current_pix
1298             s->current_picture_ptr = s->reordered_input_picture[0];
1299             for (i = 0; i < 4; i++) {
1300                 s->new_picture.f.data[i] += INPLACE_OFFSET;
1301             }
1302         }
1303         ff_mpeg_unref_picture(s, &s->current_picture);
1304         if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1305                                        s->current_picture_ptr)) < 0)
1306             return ret;
1307
1308         s->picture_number = s->new_picture.f.display_picture_number;
1309     } else {
1310         ff_mpeg_unref_picture(s, &s->new_picture);
1311     }
1312     return 0;
1313 }
1314
1315 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1316                           const AVFrame *pic_arg, int *got_packet)
1317 {
1318     MpegEncContext *s = avctx->priv_data;
1319     int i, stuffing_count, ret;
1320     int context_count = s->slice_context_count;
1321
1322     s->picture_in_gop_number++;
1323
1324     if (load_input_picture(s, pic_arg) < 0)
1325         return -1;
1326
1327     if (select_input_picture(s) < 0) {
1328         return -1;
1329     }
1330
1331     /* output? */
1332     if (s->new_picture.f.data[0]) {
1333         if (!pkt->data &&
1334             (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1335             return ret;
1336         if (s->mb_info) {
1337             s->mb_info_ptr = av_packet_new_side_data(pkt,
1338                                  AV_PKT_DATA_H263_MB_INFO,
1339                                  s->mb_width*s->mb_height*12);
1340             s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1341         }
1342
1343         for (i = 0; i < context_count; i++) {
1344             int start_y = s->thread_context[i]->start_mb_y;
1345             int   end_y = s->thread_context[i]->  end_mb_y;
1346             int h       = s->mb_height;
1347             uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1348             uint8_t *end   = pkt->data + (size_t)(((int64_t) pkt->size) *   end_y / h);
1349
1350             init_put_bits(&s->thread_context[i]->pb, start, end - start);
1351         }
1352
1353         s->pict_type = s->new_picture.f.pict_type;
1354         //emms_c();
1355         ff_MPV_frame_start(s, avctx);
1356 vbv_retry:
1357         if (encode_picture(s, s->picture_number) < 0)
1358             return -1;
1359
1360         avctx->header_bits = s->header_bits;
1361         avctx->mv_bits     = s->mv_bits;
1362         avctx->misc_bits   = s->misc_bits;
1363         avctx->i_tex_bits  = s->i_tex_bits;
1364         avctx->p_tex_bits  = s->p_tex_bits;
1365         avctx->i_count     = s->i_count;
1366         // FIXME f/b_count in avctx
1367         avctx->p_count     = s->mb_num - s->i_count - s->skip_count;
1368         avctx->skip_count  = s->skip_count;
1369
1370         ff_MPV_frame_end(s);
1371
1372         if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1373             ff_mjpeg_encode_picture_trailer(s);
1374
1375         if (avctx->rc_buffer_size) {
1376             RateControlContext *rcc = &s->rc_context;
1377             int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1378
1379             if (put_bits_count(&s->pb) > max_size &&
1380                 s->lambda < s->avctx->lmax) {
1381                 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1382                                        (s->qscale + 1) / s->qscale);
1383                 if (s->adaptive_quant) {
1384                     int i;
1385                     for (i = 0; i < s->mb_height * s->mb_stride; i++)
1386                         s->lambda_table[i] =
1387                             FFMAX(s->lambda_table[i] + 1,
1388                                   s->lambda_table[i] * (s->qscale + 1) /
1389                                   s->qscale);
1390                 }
1391                 s->mb_skipped = 0;        // done in MPV_frame_start()
1392                 // done in encode_picture() so we must undo it
1393                 if (s->pict_type == AV_PICTURE_TYPE_P) {
1394                     if (s->flipflop_rounding          ||
1395                         s->codec_id == AV_CODEC_ID_H263P ||
1396                         s->codec_id == AV_CODEC_ID_MPEG4)
1397                         s->no_rounding ^= 1;
1398                 }
1399                 if (s->pict_type != AV_PICTURE_TYPE_B) {
1400                     s->time_base       = s->last_time_base;
1401                     s->last_non_b_time = s->time - s->pp_time;
1402                 }
1403                 for (i = 0; i < context_count; i++) {
1404                     PutBitContext *pb = &s->thread_context[i]->pb;
1405                     init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1406                 }
1407                 goto vbv_retry;
1408             }
1409
1410             assert(s->avctx->rc_max_rate);
1411         }
1412
1413         if (s->flags & CODEC_FLAG_PASS1)
1414             ff_write_pass1_stats(s);
1415
1416         for (i = 0; i < 4; i++) {
1417             s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1418             avctx->error[i] += s->current_picture_ptr->f.error[i];
1419         }
1420
1421         if (s->flags & CODEC_FLAG_PASS1)
1422             assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1423                    avctx->i_tex_bits + avctx->p_tex_bits ==
1424                        put_bits_count(&s->pb));
1425         flush_put_bits(&s->pb);
1426         s->frame_bits  = put_bits_count(&s->pb);
1427
1428         stuffing_count = ff_vbv_update(s, s->frame_bits);
1429         if (stuffing_count) {
1430             if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1431                     stuffing_count + 50) {
1432                 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1433                 return -1;
1434             }
1435
1436             switch (s->codec_id) {
1437             case AV_CODEC_ID_MPEG1VIDEO:
1438             case AV_CODEC_ID_MPEG2VIDEO:
1439                 while (stuffing_count--) {
1440                     put_bits(&s->pb, 8, 0);
1441                 }
1442             break;
1443             case AV_CODEC_ID_MPEG4:
1444                 put_bits(&s->pb, 16, 0);
1445                 put_bits(&s->pb, 16, 0x1C3);
1446                 stuffing_count -= 4;
1447                 while (stuffing_count--) {
1448                     put_bits(&s->pb, 8, 0xFF);
1449                 }
1450             break;
1451             default:
1452                 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1453             }
1454             flush_put_bits(&s->pb);
1455             s->frame_bits  = put_bits_count(&s->pb);
1456         }
1457
1458         /* update mpeg1/2 vbv_delay for CBR */
1459         if (s->avctx->rc_max_rate                          &&
1460             s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1461             s->out_format == FMT_MPEG1                     &&
1462             90000LL * (avctx->rc_buffer_size - 1) <=
1463                 s->avctx->rc_max_rate * 0xFFFFLL) {
1464             int vbv_delay, min_delay;
1465             double inbits  = s->avctx->rc_max_rate *
1466                              av_q2d(s->avctx->time_base);
1467             int    minbits = s->frame_bits - 8 *
1468                              (s->vbv_delay_ptr - s->pb.buf - 1);
1469             double bits    = s->rc_context.buffer_index + minbits - inbits;
1470
1471             if (bits < 0)
1472                 av_log(s->avctx, AV_LOG_ERROR,
1473                        "Internal error, negative bits\n");
1474
1475             assert(s->repeat_first_field == 0);
1476
1477             vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1478             min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1479                         s->avctx->rc_max_rate;
1480
1481             vbv_delay = FFMAX(vbv_delay, min_delay);
1482
1483             assert(vbv_delay < 0xFFFF);
1484
1485             s->vbv_delay_ptr[0] &= 0xF8;
1486             s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1487             s->vbv_delay_ptr[1]  = vbv_delay >> 5;
1488             s->vbv_delay_ptr[2] &= 0x07;
1489             s->vbv_delay_ptr[2] |= vbv_delay << 3;
1490             avctx->vbv_delay     = vbv_delay * 300;
1491         }
1492         s->total_bits     += s->frame_bits;
1493         avctx->frame_bits  = s->frame_bits;
1494
1495         pkt->pts = s->current_picture.f.pts;
1496         if (!s->low_delay) {
1497             if (!s->current_picture.f.coded_picture_number)
1498                 pkt->dts = pkt->pts - s->dts_delta;
1499             else
1500                 pkt->dts = s->reordered_pts;
1501             s->reordered_pts = s->input_picture[0]->f.pts;
1502         } else
1503             pkt->dts = pkt->pts;
1504         if (s->current_picture.f.key_frame)
1505             pkt->flags |= AV_PKT_FLAG_KEY;
1506         if (s->mb_info)
1507             av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1508     } else {
1509         s->frame_bits = 0;
1510     }
1511     assert((s->frame_bits & 7) == 0);
1512
1513     pkt->size = s->frame_bits / 8;
1514     *got_packet = !!pkt->size;
1515     return 0;
1516 }
1517
1518 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1519                                                 int n, int threshold)
1520 {
1521     static const char tab[64] = {
1522         3, 2, 2, 1, 1, 1, 1, 1,
1523         1, 1, 1, 1, 1, 1, 1, 1,
1524         1, 1, 1, 1, 1, 1, 1, 1,
1525         0, 0, 0, 0, 0, 0, 0, 0,
1526         0, 0, 0, 0, 0, 0, 0, 0,
1527         0, 0, 0, 0, 0, 0, 0, 0,
1528         0, 0, 0, 0, 0, 0, 0, 0,
1529         0, 0, 0, 0, 0, 0, 0, 0
1530     };
1531     int score = 0;
1532     int run = 0;
1533     int i;
1534     int16_t *block = s->block[n];
1535     const int last_index = s->block_last_index[n];
1536     int skip_dc;
1537
1538     if (threshold < 0) {
1539         skip_dc = 0;
1540         threshold = -threshold;
1541     } else
1542         skip_dc = 1;
1543
1544     /* Are all we could set to zero already zero? */
1545     if (last_index <= skip_dc - 1)
1546         return;
1547
1548     for (i = 0; i <= last_index; i++) {
1549         const int j = s->intra_scantable.permutated[i];
1550         const int level = FFABS(block[j]);
1551         if (level == 1) {
1552             if (skip_dc && i == 0)
1553                 continue;
1554             score += tab[run];
1555             run = 0;
1556         } else if (level > 1) {
1557             return;
1558         } else {
1559             run++;
1560         }
1561     }
1562     if (score >= threshold)
1563         return;
1564     for (i = skip_dc; i <= last_index; i++) {
1565         const int j = s->intra_scantable.permutated[i];
1566         block[j] = 0;
1567     }
1568     if (block[0])
1569         s->block_last_index[n] = 0;
1570     else
1571         s->block_last_index[n] = -1;
1572 }
1573
1574 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1575                                int last_index)
1576 {
1577     int i;
1578     const int maxlevel = s->max_qcoeff;
1579     const int minlevel = s->min_qcoeff;
1580     int overflow = 0;
1581
1582     if (s->mb_intra) {
1583         i = 1; // skip clipping of intra dc
1584     } else
1585         i = 0;
1586
1587     for (; i <= last_index; i++) {
1588         const int j = s->intra_scantable.permutated[i];
1589         int level = block[j];
1590
1591         if (level > maxlevel) {
1592             level = maxlevel;
1593             overflow++;
1594         } else if (level < minlevel) {
1595             level = minlevel;
1596             overflow++;
1597         }
1598
1599         block[j] = level;
1600     }
1601
1602     if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1603         av_log(s->avctx, AV_LOG_INFO,
1604                "warning, clipping %d dct coefficients to %d..%d\n",
1605                overflow, minlevel, maxlevel);
1606 }
1607
1608 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1609 {
1610     int x, y;
1611     // FIXME optimize
1612     for (y = 0; y < 8; y++) {
1613         for (x = 0; x < 8; x++) {
1614             int x2, y2;
1615             int sum = 0;
1616             int sqr = 0;
1617             int count = 0;
1618
1619             for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1620                 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1621                     int v = ptr[x2 + y2 * stride];
1622                     sum += v;
1623                     sqr += v * v;
1624                     count++;
1625                 }
1626             }
1627             weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1628         }
1629     }
1630 }
1631
1632 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1633                                                 int motion_x, int motion_y,
1634                                                 int mb_block_height,
1635                                                 int mb_block_count)
1636 {
1637     int16_t weight[8][64];
1638     int16_t orig[8][64];
1639     const int mb_x = s->mb_x;
1640     const int mb_y = s->mb_y;
1641     int i;
1642     int skip_dct[8];
1643     int dct_offset = s->linesize * 8; // default for progressive frames
1644     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1645     ptrdiff_t wrap_y, wrap_c;
1646
1647     for (i = 0; i < mb_block_count; i++)
1648         skip_dct[i] = s->skipdct;
1649
1650     if (s->adaptive_quant) {
1651         const int last_qp = s->qscale;
1652         const int mb_xy = mb_x + mb_y * s->mb_stride;
1653
1654         s->lambda = s->lambda_table[mb_xy];
1655         update_qscale(s);
1656
1657         if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1658             s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
1659             s->dquant = s->qscale - last_qp;
1660
1661             if (s->out_format == FMT_H263) {
1662                 s->dquant = av_clip(s->dquant, -2, 2);
1663
1664                 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1665                     if (!s->mb_intra) {
1666                         if (s->pict_type == AV_PICTURE_TYPE_B) {
1667                             if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1668                                 s->dquant = 0;
1669                         }
1670                         if (s->mv_type == MV_TYPE_8X8)
1671                             s->dquant = 0;
1672                     }
1673                 }
1674             }
1675         }
1676         ff_set_qscale(s, last_qp + s->dquant);
1677     } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1678         ff_set_qscale(s, s->qscale + s->dquant);
1679
1680     wrap_y = s->linesize;
1681     wrap_c = s->uvlinesize;
1682     ptr_y  = s->new_picture.f.data[0] +
1683              (mb_y * 16 * wrap_y)              + mb_x * 16;
1684     ptr_cb = s->new_picture.f.data[1] +
1685              (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1686     ptr_cr = s->new_picture.f.data[2] +
1687              (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1688
1689     if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1690         uint8_t *ebuf = s->edge_emu_buffer + 32;
1691         s->vdsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
1692                                  mb_y * 16, s->width, s->height);
1693         ptr_y = ebuf;
1694         s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
1695                                  mb_block_height, mb_x * 8, mb_y * 8,
1696                                  s->width >> 1, s->height >> 1);
1697         ptr_cb = ebuf + 18 * wrap_y;
1698         s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
1699                                  mb_block_height, mb_x * 8, mb_y * 8,
1700                                  s->width >> 1, s->height >> 1);
1701         ptr_cr = ebuf + 18 * wrap_y + 8;
1702     }
1703
1704     if (s->mb_intra) {
1705         if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1706             int progressive_score, interlaced_score;
1707
1708             s->interlaced_dct = 0;
1709             progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1710                                                     NULL, wrap_y, 8) +
1711                                 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1712                                                     NULL, wrap_y, 8) - 400;
1713
1714             if (progressive_score > 0) {
1715                 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1716                                                        NULL, wrap_y * 2, 8) +
1717                                    s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1718                                                        NULL, wrap_y * 2, 8);
1719                 if (progressive_score > interlaced_score) {
1720                     s->interlaced_dct = 1;
1721
1722                     dct_offset = wrap_y;
1723                     wrap_y <<= 1;
1724                     if (s->chroma_format == CHROMA_422)
1725                         wrap_c <<= 1;
1726                 }
1727             }
1728         }
1729
1730         s->dsp.get_pixels(s->block[0], ptr_y                  , wrap_y);
1731         s->dsp.get_pixels(s->block[1], ptr_y              + 8 , wrap_y);
1732         s->dsp.get_pixels(s->block[2], ptr_y + dct_offset     , wrap_y);
1733         s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1734
1735         if (s->flags & CODEC_FLAG_GRAY) {
1736             skip_dct[4] = 1;
1737             skip_dct[5] = 1;
1738         } else {
1739             s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1740             s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1741             if (!s->chroma_y_shift) { /* 422 */
1742                 s->dsp.get_pixels(s->block[6],
1743                                   ptr_cb + (dct_offset >> 1), wrap_c);
1744                 s->dsp.get_pixels(s->block[7],
1745                                   ptr_cr + (dct_offset >> 1), wrap_c);
1746             }
1747         }
1748     } else {
1749         op_pixels_func (*op_pix)[4];
1750         qpel_mc_func (*op_qpix)[16];
1751         uint8_t *dest_y, *dest_cb, *dest_cr;
1752
1753         dest_y  = s->dest[0];
1754         dest_cb = s->dest[1];
1755         dest_cr = s->dest[2];
1756
1757         if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1758             op_pix  = s->hdsp.put_pixels_tab;
1759             op_qpix = s->dsp.put_qpel_pixels_tab;
1760         } else {
1761             op_pix  = s->hdsp.put_no_rnd_pixels_tab;
1762             op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1763         }
1764
1765         if (s->mv_dir & MV_DIR_FORWARD) {
1766             ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1767                           s->last_picture.f.data,
1768                           op_pix, op_qpix);
1769             op_pix  = s->hdsp.avg_pixels_tab;
1770             op_qpix = s->dsp.avg_qpel_pixels_tab;
1771         }
1772         if (s->mv_dir & MV_DIR_BACKWARD) {
1773             ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1774                           s->next_picture.f.data,
1775                           op_pix, op_qpix);
1776         }
1777
1778         if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1779             int progressive_score, interlaced_score;
1780
1781             s->interlaced_dct = 0;
1782             progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1783                                                     ptr_y,              wrap_y,
1784                                                     8) +
1785                                 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1786                                                     ptr_y + wrap_y * 8, wrap_y,
1787                                                     8) - 400;
1788
1789             if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1790                 progressive_score -= 400;
1791
1792             if (progressive_score > 0) {
1793                 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1794                                                        ptr_y,
1795                                                        wrap_y * 2, 8) +
1796                                    s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1797                                                        ptr_y + wrap_y,
1798                                                        wrap_y * 2, 8);
1799
1800                 if (progressive_score > interlaced_score) {
1801                     s->interlaced_dct = 1;
1802
1803                     dct_offset = wrap_y;
1804                     wrap_y <<= 1;
1805                     if (s->chroma_format == CHROMA_422)
1806                         wrap_c <<= 1;
1807                 }
1808             }
1809         }
1810
1811         s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1812         s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1813         s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1814                            dest_y + dct_offset, wrap_y);
1815         s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1816                            dest_y + dct_offset + 8, wrap_y);
1817
1818         if (s->flags & CODEC_FLAG_GRAY) {
1819             skip_dct[4] = 1;
1820             skip_dct[5] = 1;
1821         } else {
1822             s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1823             s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1824             if (!s->chroma_y_shift) { /* 422 */
1825                 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
1826                                    dest_cb + (dct_offset >> 1), wrap_c);
1827                 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
1828                                    dest_cr + (dct_offset >> 1), wrap_c);
1829             }
1830         }
1831         /* pre quantization */
1832         if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1833                 2 * s->qscale * s->qscale) {
1834             // FIXME optimize
1835             if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1836                               wrap_y, 8) < 20 * s->qscale)
1837                 skip_dct[0] = 1;
1838             if (s->dsp.sad[1](NULL, ptr_y + 8,
1839                               dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1840                 skip_dct[1] = 1;
1841             if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1842                               dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1843                 skip_dct[2] = 1;
1844             if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1845                               dest_y + dct_offset + 8,
1846                               wrap_y, 8) < 20 * s->qscale)
1847                 skip_dct[3] = 1;
1848             if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1849                               wrap_c, 8) < 20 * s->qscale)
1850                 skip_dct[4] = 1;
1851             if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1852                               wrap_c, 8) < 20 * s->qscale)
1853                 skip_dct[5] = 1;
1854             if (!s->chroma_y_shift) { /* 422 */
1855                 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
1856                                   dest_cb + (dct_offset >> 1),
1857                                   wrap_c, 8) < 20 * s->qscale)
1858                     skip_dct[6] = 1;
1859                 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
1860                                   dest_cr + (dct_offset >> 1),
1861                                   wrap_c, 8) < 20 * s->qscale)
1862                     skip_dct[7] = 1;
1863             }
1864         }
1865     }
1866
1867     if (s->quantizer_noise_shaping) {
1868         if (!skip_dct[0])
1869             get_visual_weight(weight[0], ptr_y                 , wrap_y);
1870         if (!skip_dct[1])
1871             get_visual_weight(weight[1], ptr_y              + 8, wrap_y);
1872         if (!skip_dct[2])
1873             get_visual_weight(weight[2], ptr_y + dct_offset    , wrap_y);
1874         if (!skip_dct[3])
1875             get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1876         if (!skip_dct[4])
1877             get_visual_weight(weight[4], ptr_cb                , wrap_c);
1878         if (!skip_dct[5])
1879             get_visual_weight(weight[5], ptr_cr                , wrap_c);
1880         if (!s->chroma_y_shift) { /* 422 */
1881             if (!skip_dct[6])
1882                 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
1883                                   wrap_c);
1884             if (!skip_dct[7])
1885                 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
1886                                   wrap_c);
1887         }
1888         memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
1889     }
1890
1891     /* DCT & quantize */
1892     assert(s->out_format != FMT_MJPEG || s->qscale == 8);
1893     {
1894         for (i = 0; i < mb_block_count; i++) {
1895             if (!skip_dct[i]) {
1896                 int overflow;
1897                 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1898                 // FIXME we could decide to change to quantizer instead of
1899                 // clipping
1900                 // JS: I don't think that would be a good idea it could lower
1901                 //     quality instead of improve it. Just INTRADC clipping
1902                 //     deserves changes in quantizer
1903                 if (overflow)
1904                     clip_coeffs(s, s->block[i], s->block_last_index[i]);
1905             } else
1906                 s->block_last_index[i] = -1;
1907         }
1908         if (s->quantizer_noise_shaping) {
1909             for (i = 0; i < mb_block_count; i++) {
1910                 if (!skip_dct[i]) {
1911                     s->block_last_index[i] =
1912                         dct_quantize_refine(s, s->block[i], weight[i],
1913                                             orig[i], i, s->qscale);
1914                 }
1915             }
1916         }
1917
1918         if (s->luma_elim_threshold && !s->mb_intra)
1919             for (i = 0; i < 4; i++)
1920                 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
1921         if (s->chroma_elim_threshold && !s->mb_intra)
1922             for (i = 4; i < mb_block_count; i++)
1923                 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
1924
1925         if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
1926             for (i = 0; i < mb_block_count; i++) {
1927                 if (s->block_last_index[i] == -1)
1928                     s->coded_score[i] = INT_MAX / 256;
1929             }
1930         }
1931     }
1932
1933     if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
1934         s->block_last_index[4] =
1935         s->block_last_index[5] = 0;
1936         s->block[4][0] =
1937         s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
1938     }
1939
1940     // non c quantize code returns incorrect block_last_index FIXME
1941     if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
1942         for (i = 0; i < mb_block_count; i++) {
1943             int j;
1944             if (s->block_last_index[i] > 0) {
1945                 for (j = 63; j > 0; j--) {
1946                     if (s->block[i][s->intra_scantable.permutated[j]])
1947                         break;
1948                 }
1949                 s->block_last_index[i] = j;
1950             }
1951         }
1952     }
1953
1954     /* huffman encode */
1955     switch(s->codec_id){ //FIXME funct ptr could be slightly faster
1956     case AV_CODEC_ID_MPEG1VIDEO:
1957     case AV_CODEC_ID_MPEG2VIDEO:
1958         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1959             ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
1960         break;
1961     case AV_CODEC_ID_MPEG4:
1962         if (CONFIG_MPEG4_ENCODER)
1963             ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
1964         break;
1965     case AV_CODEC_ID_MSMPEG4V2:
1966     case AV_CODEC_ID_MSMPEG4V3:
1967     case AV_CODEC_ID_WMV1:
1968         if (CONFIG_MSMPEG4_ENCODER)
1969             ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
1970         break;
1971     case AV_CODEC_ID_WMV2:
1972         if (CONFIG_WMV2_ENCODER)
1973             ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
1974         break;
1975     case AV_CODEC_ID_H261:
1976         if (CONFIG_H261_ENCODER)
1977             ff_h261_encode_mb(s, s->block, motion_x, motion_y);
1978         break;
1979     case AV_CODEC_ID_H263:
1980     case AV_CODEC_ID_H263P:
1981     case AV_CODEC_ID_FLV1:
1982     case AV_CODEC_ID_RV10:
1983     case AV_CODEC_ID_RV20:
1984         if (CONFIG_H263_ENCODER)
1985             ff_h263_encode_mb(s, s->block, motion_x, motion_y);
1986         break;
1987     case AV_CODEC_ID_MJPEG:
1988         if (CONFIG_MJPEG_ENCODER)
1989             ff_mjpeg_encode_mb(s, s->block);
1990         break;
1991     default:
1992         assert(0);
1993     }
1994 }
1995
1996 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
1997 {
1998     if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y,  8, 6);
1999     else                                encode_mb_internal(s, motion_x, motion_y, 16, 8);
2000 }
2001
2002 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2003     int i;
2004
2005     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2006
2007     /* mpeg1 */
2008     d->mb_skip_run= s->mb_skip_run;
2009     for(i=0; i<3; i++)
2010         d->last_dc[i] = s->last_dc[i];
2011
2012     /* statistics */
2013     d->mv_bits= s->mv_bits;
2014     d->i_tex_bits= s->i_tex_bits;
2015     d->p_tex_bits= s->p_tex_bits;
2016     d->i_count= s->i_count;
2017     d->f_count= s->f_count;
2018     d->b_count= s->b_count;
2019     d->skip_count= s->skip_count;
2020     d->misc_bits= s->misc_bits;
2021     d->last_bits= 0;
2022
2023     d->mb_skipped= 0;
2024     d->qscale= s->qscale;
2025     d->dquant= s->dquant;
2026
2027     d->esc3_level_length= s->esc3_level_length;
2028 }
2029
2030 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2031     int i;
2032
2033     memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2034     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2035
2036     /* mpeg1 */
2037     d->mb_skip_run= s->mb_skip_run;
2038     for(i=0; i<3; i++)
2039         d->last_dc[i] = s->last_dc[i];
2040
2041     /* statistics */
2042     d->mv_bits= s->mv_bits;
2043     d->i_tex_bits= s->i_tex_bits;
2044     d->p_tex_bits= s->p_tex_bits;
2045     d->i_count= s->i_count;
2046     d->f_count= s->f_count;
2047     d->b_count= s->b_count;
2048     d->skip_count= s->skip_count;
2049     d->misc_bits= s->misc_bits;
2050
2051     d->mb_intra= s->mb_intra;
2052     d->mb_skipped= s->mb_skipped;
2053     d->mv_type= s->mv_type;
2054     d->mv_dir= s->mv_dir;
2055     d->pb= s->pb;
2056     if(s->data_partitioning){
2057         d->pb2= s->pb2;
2058         d->tex_pb= s->tex_pb;
2059     }
2060     d->block= s->block;
2061     for(i=0; i<8; i++)
2062         d->block_last_index[i]= s->block_last_index[i];
2063     d->interlaced_dct= s->interlaced_dct;
2064     d->qscale= s->qscale;
2065
2066     d->esc3_level_length= s->esc3_level_length;
2067 }
2068
2069 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2070                            PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2071                            int *dmin, int *next_block, int motion_x, int motion_y)
2072 {
2073     int score;
2074     uint8_t *dest_backup[3];
2075
2076     copy_context_before_encode(s, backup, type);
2077
2078     s->block= s->blocks[*next_block];
2079     s->pb= pb[*next_block];
2080     if(s->data_partitioning){
2081         s->pb2   = pb2   [*next_block];
2082         s->tex_pb= tex_pb[*next_block];
2083     }
2084
2085     if(*next_block){
2086         memcpy(dest_backup, s->dest, sizeof(s->dest));
2087         s->dest[0] = s->rd_scratchpad;
2088         s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2089         s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2090         assert(s->linesize >= 32); //FIXME
2091     }
2092
2093     encode_mb(s, motion_x, motion_y);
2094
2095     score= put_bits_count(&s->pb);
2096     if(s->data_partitioning){
2097         score+= put_bits_count(&s->pb2);
2098         score+= put_bits_count(&s->tex_pb);
2099     }
2100
2101     if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2102         ff_MPV_decode_mb(s, s->block);
2103
2104         score *= s->lambda2;
2105         score += sse_mb(s) << FF_LAMBDA_SHIFT;
2106     }
2107
2108     if(*next_block){
2109         memcpy(s->dest, dest_backup, sizeof(s->dest));
2110     }
2111
2112     if(score<*dmin){
2113         *dmin= score;
2114         *next_block^=1;
2115
2116         copy_context_after_encode(best, s, type);
2117     }
2118 }
2119
2120 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2121     uint32_t *sq = ff_squareTbl + 256;
2122     int acc=0;
2123     int x,y;
2124
2125     if(w==16 && h==16)
2126         return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2127     else if(w==8 && h==8)
2128         return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2129
2130     for(y=0; y<h; y++){
2131         for(x=0; x<w; x++){
2132             acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2133         }
2134     }
2135
2136     assert(acc>=0);
2137
2138     return acc;
2139 }
2140
2141 static int sse_mb(MpegEncContext *s){
2142     int w= 16;
2143     int h= 16;
2144
2145     if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2146     if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2147
2148     if(w==16 && h==16)
2149       if(s->avctx->mb_cmp == FF_CMP_NSSE){
2150         return  s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2151                +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2152                +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2153       }else{
2154         return  s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2155                +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2156                +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2157       }
2158     else
2159         return  sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2160                +sse(s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2161                +sse(s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2162 }
2163
2164 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2165     MpegEncContext *s= *(void**)arg;
2166
2167
2168     s->me.pre_pass=1;
2169     s->me.dia_size= s->avctx->pre_dia_size;
2170     s->first_slice_line=1;
2171     for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2172         for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2173             ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2174         }
2175         s->first_slice_line=0;
2176     }
2177
2178     s->me.pre_pass=0;
2179
2180     return 0;
2181 }
2182
2183 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2184     MpegEncContext *s= *(void**)arg;
2185
2186     ff_check_alignment();
2187
2188     s->me.dia_size= s->avctx->dia_size;
2189     s->first_slice_line=1;
2190     for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2191         s->mb_x=0; //for block init below
2192         ff_init_block_index(s);
2193         for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2194             s->block_index[0]+=2;
2195             s->block_index[1]+=2;
2196             s->block_index[2]+=2;
2197             s->block_index[3]+=2;
2198
2199             /* compute motion vector & mb_type and store in context */
2200             if(s->pict_type==AV_PICTURE_TYPE_B)
2201                 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2202             else
2203                 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2204         }
2205         s->first_slice_line=0;
2206     }
2207     return 0;
2208 }
2209
2210 static int mb_var_thread(AVCodecContext *c, void *arg){
2211     MpegEncContext *s= *(void**)arg;
2212     int mb_x, mb_y;
2213
2214     ff_check_alignment();
2215
2216     for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2217         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2218             int xx = mb_x * 16;
2219             int yy = mb_y * 16;
2220             uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2221             int varc;
2222             int sum = s->dsp.pix_sum(pix, s->linesize);
2223
2224             varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2225
2226             s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2227             s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2228             s->me.mb_var_sum_temp    += varc;
2229         }
2230     }
2231     return 0;
2232 }
2233
2234 static void write_slice_end(MpegEncContext *s){
2235     if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2236         if(s->partitioned_frame){
2237             ff_mpeg4_merge_partitions(s);
2238         }
2239
2240         ff_mpeg4_stuffing(&s->pb);
2241     }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2242         ff_mjpeg_encode_stuffing(&s->pb);
2243     }
2244
2245     avpriv_align_put_bits(&s->pb);
2246     flush_put_bits(&s->pb);
2247
2248     if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2249         s->misc_bits+= get_bits_diff(s);
2250 }
2251
2252 static void write_mb_info(MpegEncContext *s)
2253 {
2254     uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2255     int offset = put_bits_count(&s->pb);
2256     int mba  = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2257     int gobn = s->mb_y / s->gob_index;
2258     int pred_x, pred_y;
2259     if (CONFIG_H263_ENCODER)
2260         ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2261     bytestream_put_le32(&ptr, offset);
2262     bytestream_put_byte(&ptr, s->qscale);
2263     bytestream_put_byte(&ptr, gobn);
2264     bytestream_put_le16(&ptr, mba);
2265     bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2266     bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2267     /* 4MV not implemented */
2268     bytestream_put_byte(&ptr, 0); /* hmv2 */
2269     bytestream_put_byte(&ptr, 0); /* vmv2 */
2270 }
2271
2272 static void update_mb_info(MpegEncContext *s, int startcode)
2273 {
2274     if (!s->mb_info)
2275         return;
2276     if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2277         s->mb_info_size += 12;
2278         s->prev_mb_info = s->last_mb_info;
2279     }
2280     if (startcode) {
2281         s->prev_mb_info = put_bits_count(&s->pb)/8;
2282         /* This might have incremented mb_info_size above, and we return without
2283          * actually writing any info into that slot yet. But in that case,
2284          * this will be called again at the start of the after writing the
2285          * start code, actually writing the mb info. */
2286         return;
2287     }
2288
2289     s->last_mb_info = put_bits_count(&s->pb)/8;
2290     if (!s->mb_info_size)
2291         s->mb_info_size += 12;
2292     write_mb_info(s);
2293 }
2294
2295 static int encode_thread(AVCodecContext *c, void *arg){
2296     MpegEncContext *s= *(void**)arg;
2297     int mb_x, mb_y, pdif = 0;
2298     int chr_h= 16>>s->chroma_y_shift;
2299     int i, j;
2300     MpegEncContext best_s, backup_s;
2301     uint8_t bit_buf[2][MAX_MB_BYTES];
2302     uint8_t bit_buf2[2][MAX_MB_BYTES];
2303     uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2304     PutBitContext pb[2], pb2[2], tex_pb[2];
2305
2306     ff_check_alignment();
2307
2308     for(i=0; i<2; i++){
2309         init_put_bits(&pb    [i], bit_buf    [i], MAX_MB_BYTES);
2310         init_put_bits(&pb2   [i], bit_buf2   [i], MAX_MB_BYTES);
2311         init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2312     }
2313
2314     s->last_bits= put_bits_count(&s->pb);
2315     s->mv_bits=0;
2316     s->misc_bits=0;
2317     s->i_tex_bits=0;
2318     s->p_tex_bits=0;
2319     s->i_count=0;
2320     s->f_count=0;
2321     s->b_count=0;
2322     s->skip_count=0;
2323
2324     for(i=0; i<3; i++){
2325         /* init last dc values */
2326         /* note: quant matrix value (8) is implied here */
2327         s->last_dc[i] = 128 << s->intra_dc_precision;
2328
2329         s->current_picture.f.error[i] = 0;
2330     }
2331     s->mb_skip_run = 0;
2332     memset(s->last_mv, 0, sizeof(s->last_mv));
2333
2334     s->last_mv_dir = 0;
2335
2336     switch(s->codec_id){
2337     case AV_CODEC_ID_H263:
2338     case AV_CODEC_ID_H263P:
2339     case AV_CODEC_ID_FLV1:
2340         if (CONFIG_H263_ENCODER)
2341             s->gob_index = ff_h263_get_gob_height(s);
2342         break;
2343     case AV_CODEC_ID_MPEG4:
2344         if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2345             ff_mpeg4_init_partitions(s);
2346         break;
2347     }
2348
2349     s->resync_mb_x=0;
2350     s->resync_mb_y=0;
2351     s->first_slice_line = 1;
2352     s->ptr_lastgob = s->pb.buf;
2353     for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2354         s->mb_x=0;
2355         s->mb_y= mb_y;
2356
2357         ff_set_qscale(s, s->qscale);
2358         ff_init_block_index(s);
2359
2360         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2361             int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2362             int mb_type= s->mb_type[xy];
2363 //            int d;
2364             int dmin= INT_MAX;
2365             int dir;
2366
2367             if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2368                 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2369                 return -1;
2370             }
2371             if(s->data_partitioning){
2372                 if(   s->pb2   .buf_end - s->pb2   .buf - (put_bits_count(&s->    pb2)>>3) < MAX_MB_BYTES
2373                    || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2374                     av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2375                     return -1;
2376                 }
2377             }
2378
2379             s->mb_x = mb_x;
2380             s->mb_y = mb_y;  // moved into loop, can get changed by H.261
2381             ff_update_block_index(s);
2382
2383             if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2384                 ff_h261_reorder_mb_index(s);
2385                 xy= s->mb_y*s->mb_stride + s->mb_x;
2386                 mb_type= s->mb_type[xy];
2387             }
2388
2389             /* write gob / video packet header  */
2390             if(s->rtp_mode){
2391                 int current_packet_size, is_gob_start;
2392
2393                 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2394
2395                 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2396
2397                 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2398
2399                 switch(s->codec_id){
2400                 case AV_CODEC_ID_H263:
2401                 case AV_CODEC_ID_H263P:
2402                     if(!s->h263_slice_structured)
2403                         if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2404                     break;
2405                 case AV_CODEC_ID_MPEG2VIDEO:
2406                     if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2407                 case AV_CODEC_ID_MPEG1VIDEO:
2408                     if(s->mb_skip_run) is_gob_start=0;
2409                     break;
2410                 }
2411
2412                 if(is_gob_start){
2413                     if(s->start_mb_y != mb_y || mb_x!=0){
2414                         write_slice_end(s);
2415
2416                         if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2417                             ff_mpeg4_init_partitions(s);
2418                         }
2419                     }
2420
2421                     assert((put_bits_count(&s->pb)&7) == 0);
2422                     current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2423
2424                     if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
2425                         int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2426                         int d= 100 / s->avctx->error_rate;
2427                         if(r % d == 0){
2428                             current_packet_size=0;
2429                             s->pb.buf_ptr= s->ptr_lastgob;
2430                             assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2431                         }
2432                     }
2433
2434                     if (s->avctx->rtp_callback){
2435                         int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2436                         s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2437                     }
2438                     update_mb_info(s, 1);
2439
2440                     switch(s->codec_id){
2441                     case AV_CODEC_ID_MPEG4:
2442                         if (CONFIG_MPEG4_ENCODER) {
2443                             ff_mpeg4_encode_video_packet_header(s);
2444                             ff_mpeg4_clean_buffers(s);
2445                         }
2446                     break;
2447                     case AV_CODEC_ID_MPEG1VIDEO:
2448                     case AV_CODEC_ID_MPEG2VIDEO:
2449                         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2450                             ff_mpeg1_encode_slice_header(s);
2451                             ff_mpeg1_clean_buffers(s);
2452                         }
2453                     break;
2454                     case AV_CODEC_ID_H263:
2455                     case AV_CODEC_ID_H263P:
2456                         if (CONFIG_H263_ENCODER)
2457                             ff_h263_encode_gob_header(s, mb_y);
2458                     break;
2459                     }
2460
2461                     if(s->flags&CODEC_FLAG_PASS1){
2462                         int bits= put_bits_count(&s->pb);
2463                         s->misc_bits+= bits - s->last_bits;
2464                         s->last_bits= bits;
2465                     }
2466
2467                     s->ptr_lastgob += current_packet_size;
2468                     s->first_slice_line=1;
2469                     s->resync_mb_x=mb_x;
2470                     s->resync_mb_y=mb_y;
2471                 }
2472             }
2473
2474             if(  (s->resync_mb_x   == s->mb_x)
2475                && s->resync_mb_y+1 == s->mb_y){
2476                 s->first_slice_line=0;
2477             }
2478
2479             s->mb_skipped=0;
2480             s->dquant=0; //only for QP_RD
2481
2482             update_mb_info(s, 0);
2483
2484             if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2485                 int next_block=0;
2486                 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2487
2488                 copy_context_before_encode(&backup_s, s, -1);
2489                 backup_s.pb= s->pb;
2490                 best_s.data_partitioning= s->data_partitioning;
2491                 best_s.partitioned_frame= s->partitioned_frame;
2492                 if(s->data_partitioning){
2493                     backup_s.pb2= s->pb2;
2494                     backup_s.tex_pb= s->tex_pb;
2495                 }
2496
2497                 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2498                     s->mv_dir = MV_DIR_FORWARD;
2499                     s->mv_type = MV_TYPE_16X16;
2500                     s->mb_intra= 0;
2501                     s->mv[0][0][0] = s->p_mv_table[xy][0];
2502                     s->mv[0][0][1] = s->p_mv_table[xy][1];
2503                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2504                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2505                 }
2506                 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2507                     s->mv_dir = MV_DIR_FORWARD;
2508                     s->mv_type = MV_TYPE_FIELD;
2509                     s->mb_intra= 0;
2510                     for(i=0; i<2; i++){
2511                         j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2512                         s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2513                         s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2514                     }
2515                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2516                                  &dmin, &next_block, 0, 0);
2517                 }
2518                 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2519                     s->mv_dir = MV_DIR_FORWARD;
2520                     s->mv_type = MV_TYPE_16X16;
2521                     s->mb_intra= 0;
2522                     s->mv[0][0][0] = 0;
2523                     s->mv[0][0][1] = 0;
2524                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2525                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2526                 }
2527                 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2528                     s->mv_dir = MV_DIR_FORWARD;
2529                     s->mv_type = MV_TYPE_8X8;
2530                     s->mb_intra= 0;
2531                     for(i=0; i<4; i++){
2532                         s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2533                         s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2534                     }
2535                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2536                                  &dmin, &next_block, 0, 0);
2537                 }
2538                 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2539                     s->mv_dir = MV_DIR_FORWARD;
2540                     s->mv_type = MV_TYPE_16X16;
2541                     s->mb_intra= 0;
2542                     s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2543                     s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2544                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2545                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2546                 }
2547                 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2548                     s->mv_dir = MV_DIR_BACKWARD;
2549                     s->mv_type = MV_TYPE_16X16;
2550                     s->mb_intra= 0;
2551                     s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2552                     s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2553                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2554                                  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2555                 }
2556                 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2557                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2558                     s->mv_type = MV_TYPE_16X16;
2559                     s->mb_intra= 0;
2560                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2561                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2562                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2563                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2564                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2565                                  &dmin, &next_block, 0, 0);
2566                 }
2567                 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2568                     s->mv_dir = MV_DIR_FORWARD;
2569                     s->mv_type = MV_TYPE_FIELD;
2570                     s->mb_intra= 0;
2571                     for(i=0; i<2; i++){
2572                         j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2573                         s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2574                         s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2575                     }
2576                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2577                                  &dmin, &next_block, 0, 0);
2578                 }
2579                 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2580                     s->mv_dir = MV_DIR_BACKWARD;
2581                     s->mv_type = MV_TYPE_FIELD;
2582                     s->mb_intra= 0;
2583                     for(i=0; i<2; i++){
2584                         j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2585                         s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2586                         s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2587                     }
2588                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2589                                  &dmin, &next_block, 0, 0);
2590                 }
2591                 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2592                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2593                     s->mv_type = MV_TYPE_FIELD;
2594                     s->mb_intra= 0;
2595                     for(dir=0; dir<2; dir++){
2596                         for(i=0; i<2; i++){
2597                             j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2598                             s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2599                             s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2600                         }
2601                     }
2602                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2603                                  &dmin, &next_block, 0, 0);
2604                 }
2605                 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2606                     s->mv_dir = 0;
2607                     s->mv_type = MV_TYPE_16X16;
2608                     s->mb_intra= 1;
2609                     s->mv[0][0][0] = 0;
2610                     s->mv[0][0][1] = 0;
2611                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2612                                  &dmin, &next_block, 0, 0);
2613                     if(s->h263_pred || s->h263_aic){
2614                         if(best_s.mb_intra)
2615                             s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2616                         else
2617                             ff_clean_intra_table_entries(s); //old mode?
2618                     }
2619                 }
2620
2621                 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2622                     if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2623                         const int last_qp= backup_s.qscale;
2624                         int qpi, qp, dc[6];
2625                         int16_t ac[6][16];
2626                         const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2627                         static const int dquant_tab[4]={-1,1,-2,2};
2628
2629                         assert(backup_s.dquant == 0);
2630
2631                         //FIXME intra
2632                         s->mv_dir= best_s.mv_dir;
2633                         s->mv_type = MV_TYPE_16X16;
2634                         s->mb_intra= best_s.mb_intra;
2635                         s->mv[0][0][0] = best_s.mv[0][0][0];
2636                         s->mv[0][0][1] = best_s.mv[0][0][1];
2637                         s->mv[1][0][0] = best_s.mv[1][0][0];
2638                         s->mv[1][0][1] = best_s.mv[1][0][1];
2639
2640                         qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2641                         for(; qpi<4; qpi++){
2642                             int dquant= dquant_tab[qpi];
2643                             qp= last_qp + dquant;
2644                             if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2645                                 continue;
2646                             backup_s.dquant= dquant;
2647                             if(s->mb_intra && s->dc_val[0]){
2648                                 for(i=0; i<6; i++){
2649                                     dc[i]= s->dc_val[0][ s->block_index[i] ];
2650                                     memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
2651                                 }
2652                             }
2653
2654                             encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2655                                          &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2656                             if(best_s.qscale != qp){
2657                                 if(s->mb_intra && s->dc_val[0]){
2658                                     for(i=0; i<6; i++){
2659                                         s->dc_val[0][ s->block_index[i] ]= dc[i];
2660                                         memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
2661                                     }
2662                                 }
2663                             }
2664                         }
2665                     }
2666                 }
2667                 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2668                     int mx= s->b_direct_mv_table[xy][0];
2669                     int my= s->b_direct_mv_table[xy][1];
2670
2671                     backup_s.dquant = 0;
2672                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2673                     s->mb_intra= 0;
2674                     ff_mpeg4_set_direct_mv(s, mx, my);
2675                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2676                                  &dmin, &next_block, mx, my);
2677                 }
2678                 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2679                     backup_s.dquant = 0;
2680                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2681                     s->mb_intra= 0;
2682                     ff_mpeg4_set_direct_mv(s, 0, 0);
2683                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2684                                  &dmin, &next_block, 0, 0);
2685                 }
2686                 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2687                     int coded=0;
2688                     for(i=0; i<6; i++)
2689                         coded |= s->block_last_index[i];
2690                     if(coded){
2691                         int mx,my;
2692                         memcpy(s->mv, best_s.mv, sizeof(s->mv));
2693                         if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2694                             mx=my=0; //FIXME find the one we actually used
2695                             ff_mpeg4_set_direct_mv(s, mx, my);
2696                         }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2697                             mx= s->mv[1][0][0];
2698                             my= s->mv[1][0][1];
2699                         }else{
2700                             mx= s->mv[0][0][0];
2701                             my= s->mv[0][0][1];
2702                         }
2703
2704                         s->mv_dir= best_s.mv_dir;
2705                         s->mv_type = best_s.mv_type;
2706                         s->mb_intra= 0;
2707 /*                        s->mv[0][0][0] = best_s.mv[0][0][0];
2708                         s->mv[0][0][1] = best_s.mv[0][0][1];
2709                         s->mv[1][0][0] = best_s.mv[1][0][0];
2710                         s->mv[1][0][1] = best_s.mv[1][0][1];*/
2711                         backup_s.dquant= 0;
2712                         s->skipdct=1;
2713                         encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2714                                         &dmin, &next_block, mx, my);
2715                         s->skipdct=0;
2716                     }
2717                 }
2718
2719                 s->current_picture.qscale_table[xy] = best_s.qscale;
2720
2721                 copy_context_after_encode(s, &best_s, -1);
2722
2723                 pb_bits_count= put_bits_count(&s->pb);
2724                 flush_put_bits(&s->pb);
2725                 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2726                 s->pb= backup_s.pb;
2727
2728                 if(s->data_partitioning){
2729                     pb2_bits_count= put_bits_count(&s->pb2);
2730                     flush_put_bits(&s->pb2);
2731                     avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2732                     s->pb2= backup_s.pb2;
2733
2734                     tex_pb_bits_count= put_bits_count(&s->tex_pb);
2735                     flush_put_bits(&s->tex_pb);
2736                     avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2737                     s->tex_pb= backup_s.tex_pb;
2738                 }
2739                 s->last_bits= put_bits_count(&s->pb);
2740
2741                 if (CONFIG_H263_ENCODER &&
2742                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2743                     ff_h263_update_motion_val(s);
2744
2745                 if(next_block==0){ //FIXME 16 vs linesize16
2746                     s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad                     , s->linesize  ,16);
2747                     s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize    , s->uvlinesize, 8);
2748                     s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2749                 }
2750
2751                 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2752                     ff_MPV_decode_mb(s, s->block);
2753             } else {
2754                 int motion_x = 0, motion_y = 0;
2755                 s->mv_type=MV_TYPE_16X16;
2756                 // only one MB-Type possible
2757
2758                 switch(mb_type){
2759                 case CANDIDATE_MB_TYPE_INTRA:
2760                     s->mv_dir = 0;
2761                     s->mb_intra= 1;
2762                     motion_x= s->mv[0][0][0] = 0;
2763                     motion_y= s->mv[0][0][1] = 0;
2764                     break;
2765                 case CANDIDATE_MB_TYPE_INTER:
2766                     s->mv_dir = MV_DIR_FORWARD;
2767                     s->mb_intra= 0;
2768                     motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2769                     motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2770                     break;
2771                 case CANDIDATE_MB_TYPE_INTER_I:
2772                     s->mv_dir = MV_DIR_FORWARD;
2773                     s->mv_type = MV_TYPE_FIELD;
2774                     s->mb_intra= 0;
2775                     for(i=0; i<2; i++){
2776                         j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2777                         s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2778                         s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2779                     }
2780                     break;
2781                 case CANDIDATE_MB_TYPE_INTER4V:
2782                     s->mv_dir = MV_DIR_FORWARD;
2783                     s->mv_type = MV_TYPE_8X8;
2784                     s->mb_intra= 0;
2785                     for(i=0; i<4; i++){
2786                         s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2787                         s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2788                     }
2789                     break;
2790                 case CANDIDATE_MB_TYPE_DIRECT:
2791                     if (CONFIG_MPEG4_ENCODER) {
2792                         s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2793                         s->mb_intra= 0;
2794                         motion_x=s->b_direct_mv_table[xy][0];
2795                         motion_y=s->b_direct_mv_table[xy][1];
2796                         ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2797                     }
2798                     break;
2799                 case CANDIDATE_MB_TYPE_DIRECT0:
2800                     if (CONFIG_MPEG4_ENCODER) {
2801                         s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2802                         s->mb_intra= 0;
2803                         ff_mpeg4_set_direct_mv(s, 0, 0);
2804                     }
2805                     break;
2806                 case CANDIDATE_MB_TYPE_BIDIR:
2807                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2808                     s->mb_intra= 0;
2809                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2810                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2811                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2812                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2813                     break;
2814                 case CANDIDATE_MB_TYPE_BACKWARD:
2815                     s->mv_dir = MV_DIR_BACKWARD;
2816                     s->mb_intra= 0;
2817                     motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2818                     motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2819                     break;
2820                 case CANDIDATE_MB_TYPE_FORWARD:
2821                     s->mv_dir = MV_DIR_FORWARD;
2822                     s->mb_intra= 0;
2823                     motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2824                     motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2825                     break;
2826                 case CANDIDATE_MB_TYPE_FORWARD_I:
2827                     s->mv_dir = MV_DIR_FORWARD;
2828                     s->mv_type = MV_TYPE_FIELD;
2829                     s->mb_intra= 0;
2830                     for(i=0; i<2; i++){
2831                         j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2832                         s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2833                         s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2834                     }
2835                     break;
2836                 case CANDIDATE_MB_TYPE_BACKWARD_I:
2837                     s->mv_dir = MV_DIR_BACKWARD;
2838                     s->mv_type = MV_TYPE_FIELD;
2839                     s->mb_intra= 0;
2840                     for(i=0; i<2; i++){
2841                         j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2842                         s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2843                         s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2844                     }
2845                     break;
2846                 case CANDIDATE_MB_TYPE_BIDIR_I:
2847                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2848                     s->mv_type = MV_TYPE_FIELD;
2849                     s->mb_intra= 0;
2850                     for(dir=0; dir<2; dir++){
2851                         for(i=0; i<2; i++){
2852                             j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2853                             s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2854                             s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2855                         }
2856                     }
2857                     break;
2858                 default:
2859                     av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2860                 }
2861
2862                 encode_mb(s, motion_x, motion_y);
2863
2864                 // RAL: Update last macroblock type
2865                 s->last_mv_dir = s->mv_dir;
2866
2867                 if (CONFIG_H263_ENCODER &&
2868                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2869                     ff_h263_update_motion_val(s);
2870
2871                 ff_MPV_decode_mb(s, s->block);
2872             }
2873
2874             /* clean the MV table in IPS frames for direct mode in B frames */
2875             if(s->mb_intra /* && I,P,S_TYPE */){
2876                 s->p_mv_table[xy][0]=0;
2877                 s->p_mv_table[xy][1]=0;
2878             }
2879
2880             if(s->flags&CODEC_FLAG_PSNR){
2881                 int w= 16;
2882                 int h= 16;
2883
2884                 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2885                 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2886
2887                 s->current_picture.f.error[0] += sse(
2888                     s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2889                     s->dest[0], w, h, s->linesize);
2890                 s->current_picture.f.error[1] += sse(
2891                     s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
2892                     s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2893                 s->current_picture.f.error[2] += sse(
2894                     s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
2895                     s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2896             }
2897             if(s->loop_filter){
2898                 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
2899                     ff_h263_loop_filter(s);
2900             }
2901             av_dlog(s->avctx, "MB %d %d bits\n",
2902                     s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
2903         }
2904     }
2905
2906     //not beautiful here but we must write it before flushing so it has to be here
2907     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
2908         ff_msmpeg4_encode_ext_header(s);
2909
2910     write_slice_end(s);
2911
2912     /* Send the last GOB if RTP */
2913     if (s->avctx->rtp_callback) {
2914         int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
2915         pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
2916         /* Call the RTP callback to send the last GOB */
2917         emms_c();
2918         s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
2919     }
2920
2921     return 0;
2922 }
2923
2924 #define MERGE(field) dst->field += src->field; src->field=0
2925 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
2926     MERGE(me.scene_change_score);
2927     MERGE(me.mc_mb_var_sum_temp);
2928     MERGE(me.mb_var_sum_temp);
2929 }
2930
2931 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
2932     int i;
2933
2934     MERGE(dct_count[0]); //note, the other dct vars are not part of the context
2935     MERGE(dct_count[1]);
2936     MERGE(mv_bits);
2937     MERGE(i_tex_bits);
2938     MERGE(p_tex_bits);
2939     MERGE(i_count);
2940     MERGE(f_count);
2941     MERGE(b_count);
2942     MERGE(skip_count);
2943     MERGE(misc_bits);
2944     MERGE(er.error_count);
2945     MERGE(padding_bug_score);
2946     MERGE(current_picture.f.error[0]);
2947     MERGE(current_picture.f.error[1]);
2948     MERGE(current_picture.f.error[2]);
2949
2950     if(dst->avctx->noise_reduction){
2951         for(i=0; i<64; i++){
2952             MERGE(dct_error_sum[0][i]);
2953             MERGE(dct_error_sum[1][i]);
2954         }
2955     }
2956
2957     assert(put_bits_count(&src->pb) % 8 ==0);
2958     assert(put_bits_count(&dst->pb) % 8 ==0);
2959     avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
2960     flush_put_bits(&dst->pb);
2961 }
2962
2963 static int estimate_qp(MpegEncContext *s, int dry_run){
2964     if (s->next_lambda){
2965         s->current_picture_ptr->f.quality =
2966         s->current_picture.f.quality = s->next_lambda;
2967         if(!dry_run) s->next_lambda= 0;
2968     } else if (!s->fixed_qscale) {
2969         s->current_picture_ptr->f.quality =
2970         s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
2971         if (s->current_picture.f.quality < 0)
2972             return -1;
2973     }
2974
2975     if(s->adaptive_quant){
2976         switch(s->codec_id){
2977         case AV_CODEC_ID_MPEG4:
2978             if (CONFIG_MPEG4_ENCODER)
2979                 ff_clean_mpeg4_qscales(s);
2980             break;
2981         case AV_CODEC_ID_H263:
2982         case AV_CODEC_ID_H263P:
2983         case AV_CODEC_ID_FLV1:
2984             if (CONFIG_H263_ENCODER)
2985                 ff_clean_h263_qscales(s);
2986             break;
2987         default:
2988             ff_init_qscale_tab(s);
2989         }
2990
2991         s->lambda= s->lambda_table[0];
2992         //FIXME broken
2993     }else
2994         s->lambda = s->current_picture.f.quality;
2995     update_qscale(s);
2996     return 0;
2997 }
2998
2999 /* must be called before writing the header */
3000 static void set_frame_distances(MpegEncContext * s){
3001     assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3002     s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3003
3004     if(s->pict_type==AV_PICTURE_TYPE_B){
3005         s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3006         assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3007     }else{
3008         s->pp_time= s->time - s->last_non_b_time;
3009         s->last_non_b_time= s->time;
3010         assert(s->picture_number==0 || s->pp_time > 0);
3011     }
3012 }
3013
3014 static int encode_picture(MpegEncContext *s, int picture_number)
3015 {
3016     int i, ret;
3017     int bits;
3018     int context_count = s->slice_context_count;
3019
3020     s->picture_number = picture_number;
3021
3022     /* Reset the average MB variance */
3023     s->me.mb_var_sum_temp    =
3024     s->me.mc_mb_var_sum_temp = 0;
3025
3026     /* we need to initialize some time vars before we can encode b-frames */
3027     // RAL: Condition added for MPEG1VIDEO
3028     if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3029         set_frame_distances(s);
3030     if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3031         ff_set_mpeg4_time(s);
3032
3033     s->me.scene_change_score=0;
3034
3035 //    s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3036
3037     if(s->pict_type==AV_PICTURE_TYPE_I){
3038         if(s->msmpeg4_version >= 3) s->no_rounding=1;
3039         else                        s->no_rounding=0;
3040     }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3041         if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3042             s->no_rounding ^= 1;
3043     }
3044
3045     if(s->flags & CODEC_FLAG_PASS2){
3046         if (estimate_qp(s,1) < 0)
3047             return -1;
3048         ff_get_2pass_fcode(s);
3049     }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3050         if(s->pict_type==AV_PICTURE_TYPE_B)
3051             s->lambda= s->last_lambda_for[s->pict_type];
3052         else
3053             s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3054         update_qscale(s);
3055     }
3056
3057     s->mb_intra=0; //for the rate distortion & bit compare functions
3058     for(i=1; i<context_count; i++){
3059         ret = ff_update_duplicate_context(s->thread_context[i], s);
3060         if (ret < 0)
3061             return ret;
3062     }
3063
3064     if(ff_init_me(s)<0)
3065         return -1;
3066
3067     /* Estimate motion for every MB */
3068     if(s->pict_type != AV_PICTURE_TYPE_I){
3069         s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3070         s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3071         if (s->pict_type != AV_PICTURE_TYPE_B) {
3072             if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3073                 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3074             }
3075         }
3076
3077         s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3078     }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3079         /* I-Frame */
3080         for(i=0; i<s->mb_stride*s->mb_height; i++)
3081             s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3082
3083         if(!s->fixed_qscale){
3084             /* finding spatial complexity for I-frame rate control */
3085             s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3086         }
3087     }
3088     for(i=1; i<context_count; i++){
3089         merge_context_after_me(s, s->thread_context[i]);
3090     }
3091     s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3092     s->current_picture.   mb_var_sum= s->current_picture_ptr->   mb_var_sum= s->me.   mb_var_sum_temp;
3093     emms_c();
3094
3095     if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3096         s->pict_type= AV_PICTURE_TYPE_I;
3097         for(i=0; i<s->mb_stride*s->mb_height; i++)
3098             s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3099         av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3100                 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3101     }
3102
3103     if(!s->umvplus){
3104         if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3105             s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3106
3107             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3108                 int a,b;
3109                 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3110                 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3111                 s->f_code= FFMAX3(s->f_code, a, b);
3112             }
3113
3114             ff_fix_long_p_mvs(s);
3115             ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3116             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3117                 int j;
3118                 for(i=0; i<2; i++){
3119                     for(j=0; j<2; j++)
3120                         ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3121                                         s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3122                 }
3123             }
3124         }
3125
3126         if(s->pict_type==AV_PICTURE_TYPE_B){
3127             int a, b;
3128
3129             a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3130             b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3131             s->f_code = FFMAX(a, b);
3132
3133             a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3134             b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3135             s->b_code = FFMAX(a, b);
3136
3137             ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3138             ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3139             ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3140             ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3141             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3142                 int dir, j;
3143                 for(dir=0; dir<2; dir++){
3144                     for(i=0; i<2; i++){
3145                         for(j=0; j<2; j++){
3146                             int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3147                                           : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3148                             ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3149                                             s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3150                         }
3151                     }
3152                 }
3153             }
3154         }
3155     }
3156
3157     if (estimate_qp(s, 0) < 0)
3158         return -1;
3159
3160     if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3161         s->qscale= 3; //reduce clipping problems
3162
3163     if (s->out_format == FMT_MJPEG) {
3164         /* for mjpeg, we do include qscale in the matrix */
3165         for(i=1;i<64;i++){
3166             int j= s->dsp.idct_permutation[i];
3167
3168             s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3169         }
3170         s->y_dc_scale_table=
3171         s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3172         s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3173         ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3174                        s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3175         s->qscale= 8;
3176     }
3177
3178     //FIXME var duplication
3179     s->current_picture_ptr->f.key_frame =
3180     s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3181     s->current_picture_ptr->f.pict_type =
3182     s->current_picture.f.pict_type = s->pict_type;
3183
3184     if (s->current_picture.f.key_frame)
3185         s->picture_in_gop_number=0;
3186
3187     s->last_bits= put_bits_count(&s->pb);
3188     switch(s->out_format) {
3189     case FMT_MJPEG:
3190         if (CONFIG_MJPEG_ENCODER)
3191             ff_mjpeg_encode_picture_header(s);
3192         break;
3193     case FMT_H261:
3194         if (CONFIG_H261_ENCODER)
3195             ff_h261_encode_picture_header(s, picture_number);
3196         break;
3197     case FMT_H263:
3198         if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3199             ff_wmv2_encode_picture_header(s, picture_number);
3200         else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3201             ff_msmpeg4_encode_picture_header(s, picture_number);
3202         else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3203             ff_mpeg4_encode_picture_header(s, picture_number);
3204         else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3205             ff_rv10_encode_picture_header(s, picture_number);
3206         else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3207             ff_rv20_encode_picture_header(s, picture_number);
3208         else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3209             ff_flv_encode_picture_header(s, picture_number);
3210         else if (CONFIG_H263_ENCODER)
3211             ff_h263_encode_picture_header(s, picture_number);
3212         break;
3213     case FMT_MPEG1:
3214         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3215             ff_mpeg1_encode_picture_header(s, picture_number);
3216         break;
3217     default:
3218         assert(0);
3219     }
3220     bits= put_bits_count(&s->pb);
3221     s->header_bits= bits - s->last_bits;
3222
3223     for(i=1; i<context_count; i++){
3224         update_duplicate_context_after_me(s->thread_context[i], s);
3225     }
3226     s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3227     for(i=1; i<context_count; i++){
3228         merge_context_after_encode(s, s->thread_context[i]);
3229     }
3230     emms_c();
3231     return 0;
3232 }
3233
3234 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3235     const int intra= s->mb_intra;
3236     int i;
3237
3238     s->dct_count[intra]++;
3239
3240     for(i=0; i<64; i++){
3241         int level= block[i];
3242
3243         if(level){
3244             if(level>0){
3245                 s->dct_error_sum[intra][i] += level;
3246                 level -= s->dct_offset[intra][i];
3247                 if(level<0) level=0;
3248             }else{
3249                 s->dct_error_sum[intra][i] -= level;
3250                 level += s->dct_offset[intra][i];
3251                 if(level>0) level=0;
3252             }
3253             block[i]= level;
3254         }
3255     }
3256 }
3257
3258 static int dct_quantize_trellis_c(MpegEncContext *s,
3259                                   int16_t *block, int n,
3260                                   int qscale, int *overflow){
3261     const int *qmat;
3262     const uint8_t *scantable= s->intra_scantable.scantable;
3263     const uint8_t *perm_scantable= s->intra_scantable.permutated;
3264     int max=0;
3265     unsigned int threshold1, threshold2;
3266     int bias=0;
3267     int run_tab[65];
3268     int level_tab[65];
3269     int score_tab[65];
3270     int survivor[65];
3271     int survivor_count;
3272     int last_run=0;
3273     int last_level=0;
3274     int last_score= 0;
3275     int last_i;
3276     int coeff[2][64];
3277     int coeff_count[64];
3278     int qmul, qadd, start_i, last_non_zero, i, dc;
3279     const int esc_length= s->ac_esc_length;
3280     uint8_t * length;
3281     uint8_t * last_length;
3282     const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3283
3284     s->dsp.fdct (block);
3285
3286     if(s->dct_error_sum)
3287         s->denoise_dct(s, block);
3288     qmul= qscale*16;
3289     qadd= ((qscale-1)|1)*8;
3290
3291     if (s->mb_intra) {
3292         int q;
3293         if (!s->h263_aic) {
3294             if (n < 4)
3295                 q = s->y_dc_scale;
3296             else
3297                 q = s->c_dc_scale;
3298             q = q << 3;
3299         } else{
3300             /* For AIC we skip quant/dequant of INTRADC */
3301             q = 1 << 3;
3302             qadd=0;
3303         }
3304
3305         /* note: block[0] is assumed to be positive */
3306         block[0] = (block[0] + (q >> 1)) / q;
3307         start_i = 1;
3308         last_non_zero = 0;
3309         qmat = s->q_intra_matrix[qscale];
3310         if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3311             bias= 1<<(QMAT_SHIFT-1);
3312         length     = s->intra_ac_vlc_length;
3313         last_length= s->intra_ac_vlc_last_length;
3314     } else {
3315         start_i = 0;
3316         last_non_zero = -1;
3317         qmat = s->q_inter_matrix[qscale];
3318         length     = s->inter_ac_vlc_length;
3319         last_length= s->inter_ac_vlc_last_length;
3320     }
3321     last_i= start_i;
3322
3323     threshold1= (1<<QMAT_SHIFT) - bias - 1;
3324     threshold2= (threshold1<<1);
3325
3326     for(i=63; i>=start_i; i--) {
3327         const int j = scantable[i];
3328         int level = block[j] * qmat[j];
3329
3330         if(((unsigned)(level+threshold1))>threshold2){
3331             last_non_zero = i;
3332             break;
3333         }
3334     }
3335
3336     for(i=start_i; i<=last_non_zero; i++) {
3337         const int j = scantable[i];
3338         int level = block[j] * qmat[j];
3339
3340 //        if(   bias+level >= (1<<(QMAT_SHIFT - 3))
3341 //           || bias-level >= (1<<(QMAT_SHIFT - 3))){
3342         if(((unsigned)(level+threshold1))>threshold2){
3343             if(level>0){
3344                 level= (bias + level)>>QMAT_SHIFT;
3345                 coeff[0][i]= level;
3346                 coeff[1][i]= level-1;
3347 //                coeff[2][k]= level-2;
3348             }else{
3349                 level= (bias - level)>>QMAT_SHIFT;
3350                 coeff[0][i]= -level;
3351                 coeff[1][i]= -level+1;
3352 //                coeff[2][k]= -level+2;
3353             }
3354             coeff_count[i]= FFMIN(level, 2);
3355             assert(coeff_count[i]);
3356             max |=level;
3357         }else{
3358             coeff[0][i]= (level>>31)|1;
3359             coeff_count[i]= 1;
3360         }
3361     }
3362
3363     *overflow= s->max_qcoeff < max; //overflow might have happened
3364
3365     if(last_non_zero < start_i){
3366         memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3367         return last_non_zero;
3368     }
3369
3370     score_tab[start_i]= 0;
3371     survivor[0]= start_i;
3372     survivor_count= 1;
3373
3374     for(i=start_i; i<=last_non_zero; i++){
3375         int level_index, j, zero_distortion;
3376         int dct_coeff= FFABS(block[ scantable[i] ]);
3377         int best_score=256*256*256*120;
3378
3379         if (s->dsp.fdct == ff_fdct_ifast)
3380             dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3381         zero_distortion= dct_coeff*dct_coeff;
3382
3383         for(level_index=0; level_index < coeff_count[i]; level_index++){
3384             int distortion;
3385             int level= coeff[level_index][i];
3386             const int alevel= FFABS(level);
3387             int unquant_coeff;
3388
3389             assert(level);
3390
3391             if(s->out_format == FMT_H263){
3392                 unquant_coeff= alevel*qmul + qadd;
3393             }else{ //MPEG1
3394                 j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3395                 if(s->mb_intra){
3396                         unquant_coeff = (int)(  alevel  * qscale * s->intra_matrix[j]) >> 3;
3397                         unquant_coeff =   (unquant_coeff - 1) | 1;
3398                 }else{
3399                         unquant_coeff = (((  alevel  << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3400                         unquant_coeff =   (unquant_coeff - 1) | 1;
3401                 }
3402                 unquant_coeff<<= 3;
3403             }
3404
3405             distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3406             level+=64;
3407             if((level&(~127)) == 0){
3408                 for(j=survivor_count-1; j>=0; j--){
3409                     int run= i - survivor[j];
3410                     int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3411                     score += score_tab[i-run];
3412
3413                     if(score < best_score){
3414                         best_score= score;
3415                         run_tab[i+1]= run;
3416                         level_tab[i+1]= level-64;
3417                     }
3418                 }
3419
3420                 if(s->out_format == FMT_H263){
3421                     for(j=survivor_count-1; j>=0; j--){
3422                         int run= i - survivor[j];
3423                         int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3424                         score += score_tab[i-run];
3425                         if(score < last_score){
3426                             last_score= score;
3427                             last_run= run;
3428                             last_level= level-64;
3429                             last_i= i+1;
3430                         }
3431                     }
3432                 }
3433             }else{
3434                 distortion += esc_length*lambda;
3435                 for(j=survivor_count-1; j>=0; j--){
3436                     int run= i - survivor[j];
3437                     int score= distortion + score_tab[i-run];
3438
3439                     if(score < best_score){
3440                         best_score= score;
3441                         run_tab[i+1]= run;
3442                         level_tab[i+1]= level-64;
3443                     }
3444                 }
3445
3446                 if(s->out_format == FMT_H263){
3447                   for(j=survivor_count-1; j>=0; j--){
3448                         int run= i - survivor[j];
3449                         int score= distortion + score_tab[i-run];
3450                         if(score < last_score){
3451                             last_score= score;
3452                             last_run= run;
3453                             last_level= level-64;
3454                             last_i= i+1;
3455                         }
3456                     }
3457                 }
3458             }
3459         }
3460
3461         score_tab[i+1]= best_score;
3462
3463         //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3464         if(last_non_zero <= 27){
3465             for(; survivor_count; survivor_count--){
3466                 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3467                     break;
3468             }
3469         }else{
3470             for(; survivor_count; survivor_count--){
3471                 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3472                     break;
3473             }
3474         }
3475
3476         survivor[ survivor_count++ ]= i+1;
3477     }
3478
3479     if(s->out_format != FMT_H263){
3480         last_score= 256*256*256*120;
3481         for(i= survivor[0]; i<=last_non_zero + 1; i++){
3482             int score= score_tab[i];
3483             if(i) score += lambda*2; //FIXME exacter?
3484
3485             if(score < last_score){
3486                 last_score= score;
3487                 last_i= i;
3488                 last_level= level_tab[i];
3489                 last_run= run_tab[i];
3490             }
3491         }
3492     }
3493
3494     s->coded_score[n] = last_score;
3495
3496     dc= FFABS(block[0]);
3497     last_non_zero= last_i - 1;
3498     memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3499
3500     if(last_non_zero < start_i)
3501         return last_non_zero;
3502
3503     if(last_non_zero == 0 && start_i == 0){
3504         int best_level= 0;
3505         int best_score= dc * dc;
3506
3507         for(i=0; i<coeff_count[0]; i++){
3508             int level= coeff[i][0];
3509             int alevel= FFABS(level);
3510             int unquant_coeff, score, distortion;
3511
3512             if(s->out_format == FMT_H263){
3513                     unquant_coeff= (alevel*qmul + qadd)>>3;
3514             }else{ //MPEG1
3515                     unquant_coeff = (((  alevel  << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3516                     unquant_coeff =   (unquant_coeff - 1) | 1;
3517             }
3518             unquant_coeff = (unquant_coeff + 4) >> 3;
3519             unquant_coeff<<= 3 + 3;
3520
3521             distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3522             level+=64;
3523             if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3524             else                    score= distortion + esc_length*lambda;
3525
3526             if(score < best_score){
3527                 best_score= score;
3528                 best_level= level - 64;
3529             }
3530         }
3531         block[0]= best_level;
3532         s->coded_score[n] = best_score - dc*dc;
3533         if(best_level == 0) return -1;
3534         else                return last_non_zero;
3535     }
3536
3537     i= last_i;
3538     assert(last_level);
3539
3540     block[ perm_scantable[last_non_zero] ]= last_level;
3541     i -= last_run + 1;
3542
3543     for(; i>start_i; i -= run_tab[i] + 1){
3544         block[ perm_scantable[i-1] ]= level_tab[i];
3545     }
3546
3547     return last_non_zero;
3548 }
3549
3550 //#define REFINE_STATS 1
3551 static int16_t basis[64][64];
3552
3553 static void build_basis(uint8_t *perm){
3554     int i, j, x, y;
3555     emms_c();
3556     for(i=0; i<8; i++){
3557         for(j=0; j<8; j++){
3558             for(y=0; y<8; y++){
3559                 for(x=0; x<8; x++){
3560                     double s= 0.25*(1<<BASIS_SHIFT);
3561                     int index= 8*i + j;
3562                     int perm_index= perm[index];
3563                     if(i==0) s*= sqrt(0.5);
3564                     if(j==0) s*= sqrt(0.5);
3565                     basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3566                 }
3567             }
3568         }
3569     }
3570 }
3571
3572 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3573                         int16_t *block, int16_t *weight, int16_t *orig,
3574                         int n, int qscale){
3575     int16_t rem[64];
3576     LOCAL_ALIGNED_16(int16_t, d1, [64]);
3577     const uint8_t *scantable= s->intra_scantable.scantable;
3578     const uint8_t *perm_scantable= s->intra_scantable.permutated;
3579 //    unsigned int threshold1, threshold2;
3580 //    int bias=0;
3581     int run_tab[65];
3582     int prev_run=0;
3583     int prev_level=0;
3584     int qmul, qadd, start_i, last_non_zero, i, dc;
3585     uint8_t * length;
3586     uint8_t * last_length;
3587     int lambda;
3588     int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3589 #ifdef REFINE_STATS
3590 static int count=0;
3591 static int after_last=0;
3592 static int to_zero=0;
3593 static int from_zero=0;
3594 static int raise=0;
3595 static int lower=0;
3596 static int messed_sign=0;
3597 #endif
3598
3599     if(basis[0][0] == 0)
3600         build_basis(s->dsp.idct_permutation);
3601
3602     qmul= qscale*2;
3603     qadd= (qscale-1)|1;
3604     if (s->mb_intra) {
3605         if (!s->h263_aic) {
3606             if (n < 4)
3607                 q = s->y_dc_scale;
3608             else
3609                 q = s->c_dc_scale;
3610         } else{
3611             /* For AIC we skip quant/dequant of INTRADC */
3612             q = 1;
3613             qadd=0;
3614         }
3615         q <<= RECON_SHIFT-3;
3616         /* note: block[0] is assumed to be positive */
3617         dc= block[0]*q;
3618 //        block[0] = (block[0] + (q >> 1)) / q;
3619         start_i = 1;
3620 //        if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3621 //            bias= 1<<(QMAT_SHIFT-1);
3622         length     = s->intra_ac_vlc_length;
3623         last_length= s->intra_ac_vlc_last_length;
3624     } else {
3625         dc= 0;
3626         start_i = 0;
3627         length     = s->inter_ac_vlc_length;
3628         last_length= s->inter_ac_vlc_last_length;
3629     }
3630     last_non_zero = s->block_last_index[n];
3631
3632 #ifdef REFINE_STATS
3633 {START_TIMER
3634 #endif
3635     dc += (1<<(RECON_SHIFT-1));
3636     for(i=0; i<64; i++){
3637         rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME  use orig dirrectly instead of copying to rem[]
3638     }
3639 #ifdef REFINE_STATS
3640 STOP_TIMER("memset rem[]")}
3641 #endif
3642     sum=0;
3643     for(i=0; i<64; i++){
3644         int one= 36;
3645         int qns=4;
3646         int w;
3647
3648         w= FFABS(weight[i]) + qns*one;
3649         w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3650
3651         weight[i] = w;
3652 //        w=weight[i] = (63*qns + (w/2)) / w;
3653
3654         assert(w>0);
3655         assert(w<(1<<6));
3656         sum += w*w;
3657     }
3658     lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3659 #ifdef REFINE_STATS
3660 {START_TIMER
3661 #endif
3662     run=0;
3663     rle_index=0;
3664     for(i=start_i; i<=last_non_zero; i++){
3665         int j= perm_scantable[i];
3666         const int level= block[j];
3667         int coeff;
3668
3669         if(level){
3670             if(level<0) coeff= qmul*level - qadd;
3671             else        coeff= qmul*level + qadd;
3672             run_tab[rle_index++]=run;
3673             run=0;
3674
3675             s->dsp.add_8x8basis(rem, basis[j], coeff);
3676         }else{
3677             run++;
3678         }
3679     }
3680 #ifdef REFINE_STATS
3681 if(last_non_zero>0){
3682 STOP_TIMER("init rem[]")
3683 }
3684 }
3685
3686 {START_TIMER
3687 #endif
3688     for(;;){
3689         int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3690         int best_coeff=0;
3691         int best_change=0;
3692         int run2, best_unquant_change=0, analyze_gradient;
3693 #ifdef REFINE_STATS
3694 {START_TIMER
3695 #endif
3696         analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
3697
3698         if(analyze_gradient){
3699 #ifdef REFINE_STATS
3700 {START_TIMER
3701 #endif
3702             for(i=0; i<64; i++){
3703                 int w= weight[i];
3704
3705                 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3706             }
3707 #ifdef REFINE_STATS
3708 STOP_TIMER("rem*w*w")}
3709 {START_TIMER
3710 #endif
3711             s->dsp.fdct(d1);
3712 #ifdef REFINE_STATS
3713 STOP_TIMER("dct")}
3714 #endif
3715         }
3716
3717         if(start_i){
3718             const int level= block[0];
3719             int change, old_coeff;
3720
3721             assert(s->mb_intra);
3722
3723             old_coeff= q*level;
3724
3725             for(change=-1; change<=1; change+=2){
3726                 int new_level= level + change;
3727                 int score, new_coeff;
3728
3729                 new_coeff= q*new_level;
3730                 if(new_coeff >= 2048 || new_coeff < 0)
3731                     continue;
3732
3733                 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
3734                 if(score<best_score){
3735                     best_score= score;
3736                     best_coeff= 0;
3737                     best_change= change;
3738                     best_unquant_change= new_coeff - old_coeff;
3739                 }
3740             }
3741         }
3742
3743         run=0;
3744         rle_index=0;
3745         run2= run_tab[rle_index++];
3746         prev_level=0;
3747         prev_run=0;
3748
3749         for(i=start_i; i<64; i++){
3750             int j= perm_scantable[i];
3751             const int level= block[j];
3752             int change, old_coeff;
3753
3754             if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
3755                 break;
3756
3757             if(level){
3758                 if(level<0) old_coeff= qmul*level - qadd;
3759                 else        old_coeff= qmul*level + qadd;
3760                 run2= run_tab[rle_index++]; //FIXME ! maybe after last
3761             }else{
3762                 old_coeff=0;
3763                 run2--;
3764                 assert(run2>=0 || i >= last_non_zero );
3765             }
3766
3767             for(change=-1; change<=1; change+=2){
3768                 int new_level= level + change;
3769                 int score, new_coeff, unquant_change;
3770
3771                 score=0;
3772                 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
3773                    continue;
3774
3775                 if(new_level){
3776                     if(new_level<0) new_coeff= qmul*new_level - qadd;
3777                     else            new_coeff= qmul*new_level + qadd;
3778                     if(new_coeff >= 2048 || new_coeff <= -2048)
3779                         continue;
3780                     //FIXME check for overflow
3781
3782                     if(level){
3783                         if(level < 63 && level > -63){
3784                             if(i < last_non_zero)
3785                                 score +=   length[UNI_AC_ENC_INDEX(run, new_level+64)]
3786                                          - length[UNI_AC_ENC_INDEX(run, level+64)];
3787                             else
3788                                 score +=   last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
3789                                          - last_length[UNI_AC_ENC_INDEX(run, level+64)];
3790                         }
3791                     }else{
3792                         assert(FFABS(new_level)==1);
3793
3794                         if(analyze_gradient){
3795                             int g= d1[ scantable[i] ];
3796                             if(g && (g^new_level) >= 0)
3797                                 continue;
3798                         }
3799
3800                         if(i < last_non_zero){
3801                             int next_i= i + run2 + 1;
3802                             int next_level= block[ perm_scantable[next_i] ] + 64;
3803
3804                             if(next_level&(~127))
3805                                 next_level= 0;
3806
3807                             if(next_i < last_non_zero)
3808                                 score +=   length[UNI_AC_ENC_INDEX(run, 65)]
3809                                          + length[UNI_AC_ENC_INDEX(run2, next_level)]
3810                                          - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3811                             else
3812                                 score +=  length[UNI_AC_ENC_INDEX(run, 65)]
3813                                         + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3814                                         - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3815                         }else{
3816                             score += last_length[UNI_AC_ENC_INDEX(run, 65)];
3817                             if(prev_level){
3818                                 score +=  length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3819                                         - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3820                             }
3821                         }
3822                     }
3823                 }else{
3824                     new_coeff=0;
3825                     assert(FFABS(level)==1);
3826
3827                     if(i < last_non_zero){
3828                         int next_i= i + run2 + 1;
3829                         int next_level= block[ perm_scantable[next_i] ] + 64;
3830
3831                         if(next_level&(~127))
3832                             next_level= 0;
3833
3834                         if(next_i < last_non_zero)
3835                             score +=   length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3836                                      - length[UNI_AC_ENC_INDEX(run2, next_level)]
3837                                      - length[UNI_AC_ENC_INDEX(run, 65)];
3838                         else
3839                             score +=   last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3840                                      - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3841                                      - length[UNI_AC_ENC_INDEX(run, 65)];
3842                     }else{
3843                         score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
3844                         if(prev_level){
3845                             score +=  last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3846                                     - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3847                         }
3848                     }
3849                 }
3850
3851                 score *= lambda;
3852
3853                 unquant_change= new_coeff - old_coeff;
3854                 assert((score < 100*lambda && score > -100*lambda) || lambda==0);
3855
3856                 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
3857                 if(score<best_score){
3858                     best_score= score;
3859                     best_coeff= i;
3860                     best_change= change;
3861                     best_unquant_change= unquant_change;
3862                 }
3863             }
3864             if(level){
3865                 prev_level= level + 64;
3866                 if(prev_level&(~127))
3867                     prev_level= 0;
3868                 prev_run= run;
3869                 run=0;
3870             }else{
3871                 run++;
3872             }
3873         }
3874 #ifdef REFINE_STATS
3875 STOP_TIMER("iterative step")}
3876 #endif
3877
3878         if(best_change){
3879             int j= perm_scantable[ best_coeff ];
3880
3881             block[j] += best_change;
3882
3883             if(best_coeff > last_non_zero){
3884                 last_non_zero= best_coeff;
3885                 assert(block[j]);
3886 #ifdef REFINE_STATS
3887 after_last++;
3888 #endif
3889             }else{
3890 #ifdef REFINE_STATS
3891 if(block[j]){
3892     if(block[j] - best_change){
3893         if(FFABS(block[j]) > FFABS(block[j] - best_change)){
3894             raise++;
3895         }else{
3896             lower++;
3897         }
3898     }else{
3899         from_zero++;
3900     }
3901 }else{
3902     to_zero++;
3903 }
3904 #endif
3905                 for(; last_non_zero>=start_i; last_non_zero--){
3906                     if(block[perm_scantable[last_non_zero]])
3907                         break;
3908                 }
3909             }
3910 #ifdef REFINE_STATS
3911 count++;
3912 if(256*256*256*64 % count == 0){
3913     printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
3914 }
3915 #endif
3916             run=0;
3917             rle_index=0;
3918             for(i=start_i; i<=last_non_zero; i++){
3919                 int j= perm_scantable[i];
3920                 const int level= block[j];
3921
3922                  if(level){
3923                      run_tab[rle_index++]=run;
3924                      run=0;
3925                  }else{
3926                      run++;
3927                  }
3928             }
3929
3930             s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
3931         }else{
3932             break;
3933         }
3934     }
3935 #ifdef REFINE_STATS
3936 if(last_non_zero>0){
3937 STOP_TIMER("iterative search")
3938 }
3939 }
3940 #endif
3941
3942     return last_non_zero;
3943 }
3944
3945 int ff_dct_quantize_c(MpegEncContext *s,
3946                         int16_t *block, int n,
3947                         int qscale, int *overflow)
3948 {
3949     int i, j, level, last_non_zero, q, start_i;
3950     const int *qmat;
3951     const uint8_t *scantable= s->intra_scantable.scantable;
3952     int bias;
3953     int max=0;
3954     unsigned int threshold1, threshold2;
3955
3956     s->dsp.fdct (block);
3957
3958     if(s->dct_error_sum)
3959         s->denoise_dct(s, block);
3960
3961     if (s->mb_intra) {
3962         if (!s->h263_aic) {
3963             if (n < 4)
3964                 q = s->y_dc_scale;
3965             else
3966                 q = s->c_dc_scale;
3967             q = q << 3;
3968         } else
3969             /* For AIC we skip quant/dequant of INTRADC */
3970             q = 1 << 3;
3971
3972         /* note: block[0] is assumed to be positive */
3973         block[0] = (block[0] + (q >> 1)) / q;
3974         start_i = 1;
3975         last_non_zero = 0;
3976         qmat = s->q_intra_matrix[qscale];
3977         bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3978     } else {
3979         start_i = 0;
3980         last_non_zero = -1;
3981         qmat = s->q_inter_matrix[qscale];
3982         bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3983     }
3984     threshold1= (1<<QMAT_SHIFT) - bias - 1;
3985     threshold2= (threshold1<<1);
3986     for(i=63;i>=start_i;i--) {
3987         j = scantable[i];
3988         level = block[j] * qmat[j];
3989
3990         if(((unsigned)(level+threshold1))>threshold2){
3991             last_non_zero = i;
3992             break;
3993         }else{
3994             block[j]=0;
3995         }
3996     }
3997     for(i=start_i; i<=last_non_zero; i++) {
3998         j = scantable[i];
3999         level = block[j] * qmat[j];
4000
4001 //        if(   bias+level >= (1<<QMAT_SHIFT)
4002 //           || bias-level >= (1<<QMAT_SHIFT)){
4003         if(((unsigned)(level+threshold1))>threshold2){
4004             if(level>0){
4005                 level= (bias + level)>>QMAT_SHIFT;
4006                 block[j]= level;
4007             }else{
4008                 level= (bias - level)>>QMAT_SHIFT;
4009                 block[j]= -level;
4010             }
4011             max |=level;
4012         }else{
4013             block[j]=0;
4014         }
4015     }
4016     *overflow= s->max_qcoeff < max; //overflow might have happened
4017
4018     /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4019     if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4020         ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4021
4022     return last_non_zero;
4023 }
4024
4025 #define OFFSET(x) offsetof(MpegEncContext, x)
4026 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4027 static const AVOption h263_options[] = {
4028     { "obmc",         "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4029     { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4030     { "mb_info",      "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4031     FF_MPV_COMMON_OPTS
4032     { NULL },
4033 };
4034
4035 static const AVClass h263_class = {
4036     .class_name = "H.263 encoder",
4037     .item_name  = av_default_item_name,
4038     .option     = h263_options,
4039     .version    = LIBAVUTIL_VERSION_INT,
4040 };
4041
4042 AVCodec ff_h263_encoder = {
4043     .name           = "h263",
4044     .long_name      = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4045     .type           = AVMEDIA_TYPE_VIDEO,
4046     .id             = AV_CODEC_ID_H263,
4047     .priv_data_size = sizeof(MpegEncContext),
4048     .init           = ff_MPV_encode_init,
4049     .encode2        = ff_MPV_encode_picture,
4050     .close          = ff_MPV_encode_end,
4051     .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4052     .priv_class     = &h263_class,
4053 };
4054
4055 static const AVOption h263p_options[] = {
4056     { "umv",        "Use unlimited motion vectors.",    OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4057     { "aiv",        "Use alternative inter VLC.",       OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4058     { "obmc",       "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4059     { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4060     FF_MPV_COMMON_OPTS
4061     { NULL },
4062 };
4063 static const AVClass h263p_class = {
4064     .class_name = "H.263p encoder",
4065     .item_name  = av_default_item_name,
4066     .option     = h263p_options,
4067     .version    = LIBAVUTIL_VERSION_INT,
4068 };
4069
4070 AVCodec ff_h263p_encoder = {
4071     .name           = "h263p",
4072     .long_name      = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4073     .type           = AVMEDIA_TYPE_VIDEO,
4074     .id             = AV_CODEC_ID_H263P,
4075     .priv_data_size = sizeof(MpegEncContext),
4076     .init           = ff_MPV_encode_init,
4077     .encode2        = ff_MPV_encode_picture,
4078     .close          = ff_MPV_encode_end,
4079     .capabilities   = CODEC_CAP_SLICE_THREADS,
4080     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4081     .priv_class     = &h263p_class,
4082 };
4083
4084 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4085
4086 AVCodec ff_msmpeg4v2_encoder = {
4087     .name           = "msmpeg4v2",
4088     .long_name      = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4089     .type           = AVMEDIA_TYPE_VIDEO,
4090     .id             = AV_CODEC_ID_MSMPEG4V2,
4091     .priv_data_size = sizeof(MpegEncContext),
4092     .init           = ff_MPV_encode_init,
4093     .encode2        = ff_MPV_encode_picture,
4094     .close          = ff_MPV_encode_end,
4095     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4096     .priv_class     = &msmpeg4v2_class,
4097 };
4098
4099 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4100
4101 AVCodec ff_msmpeg4v3_encoder = {
4102     .name           = "msmpeg4",
4103     .long_name      = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4104     .type           = AVMEDIA_TYPE_VIDEO,
4105     .id             = AV_CODEC_ID_MSMPEG4V3,
4106     .priv_data_size = sizeof(MpegEncContext),
4107     .init           = ff_MPV_encode_init,
4108     .encode2        = ff_MPV_encode_picture,
4109     .close          = ff_MPV_encode_end,
4110     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4111     .priv_class     = &msmpeg4v3_class,
4112 };
4113
4114 FF_MPV_GENERIC_CLASS(wmv1)
4115
4116 AVCodec ff_wmv1_encoder = {
4117     .name           = "wmv1",
4118     .long_name      = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4119     .type           = AVMEDIA_TYPE_VIDEO,
4120     .id             = AV_CODEC_ID_WMV1,
4121     .priv_data_size = sizeof(MpegEncContext),
4122     .init           = ff_MPV_encode_init,
4123     .encode2        = ff_MPV_encode_picture,
4124     .close          = ff_MPV_encode_end,
4125     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4126     .priv_class     = &wmv1_class,
4127 };