]> git.sesse.net Git - ffmpeg/blob - libavcodec/mpegvideo_enc.c
Merge commit 'c74f81786d434dfaf9b3dff06aa96bfd23d0127b'
[ffmpeg] / libavcodec / mpegvideo_enc.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29
30 #include "libavutil/intmath.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/opt.h"
34 #include "avcodec.h"
35 #include "dsputil.h"
36 #include "mpegvideo.h"
37 #include "h263.h"
38 #include "mathops.h"
39 #include "mjpegenc.h"
40 #include "msmpeg4.h"
41 #include "faandct.h"
42 #include "thread.h"
43 #include "aandcttab.h"
44 #include "flv.h"
45 #include "mpeg4video.h"
46 #include "internal.h"
47 #include "bytestream.h"
48 #include <limits.h>
49 #include "sp5x.h"
50
51 //#undef NDEBUG
52 //#include <assert.h>
53
54 static int encode_picture(MpegEncContext *s, int picture_number);
55 static int dct_quantize_refine(MpegEncContext *s, DCTELEM *block, int16_t *weight, DCTELEM *orig, int n, int qscale);
56 static int sse_mb(MpegEncContext *s);
57 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block);
58 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
59
60 /* enable all paranoid tests for rounding, overflows, etc... */
61 //#define PARANOID
62
63 //#define DEBUG
64
65 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
66 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
67
68 const AVOption ff_mpv_generic_options[] = {
69     FF_MPV_COMMON_OPTS
70     { NULL },
71 };
72
73 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
74                        uint16_t (*qmat16)[2][64],
75                        const uint16_t *quant_matrix,
76                        int bias, int qmin, int qmax, int intra)
77 {
78     int qscale;
79     int shift = 0;
80
81     for (qscale = qmin; qscale <= qmax; qscale++) {
82         int i;
83         if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
84             dsp->fdct == ff_jpeg_fdct_islow_10 ||
85             dsp->fdct == ff_faandct) {
86             for (i = 0; i < 64; i++) {
87                 const int j = dsp->idct_permutation[i];
88                 /* 16 <= qscale * quant_matrix[i] <= 7905
89                  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
90                  *             19952 <=              x  <= 249205026
91                  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
92                  *           3444240 >= (1 << 36) / (x) >= 275 */
93
94                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
95                                         (qscale * quant_matrix[j]));
96             }
97         } else if (dsp->fdct == ff_fdct_ifast) {
98             for (i = 0; i < 64; i++) {
99                 const int j = dsp->idct_permutation[i];
100                 /* 16 <= qscale * quant_matrix[i] <= 7905
101                  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
102                  *             19952 <=              x  <= 249205026
103                  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
104                  *           3444240 >= (1 << 36) / (x) >= 275 */
105
106                 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
107                                         (ff_aanscales[i] * (int64_t)qscale * quant_matrix[j]));
108             }
109         } else {
110             for (i = 0; i < 64; i++) {
111                 const int j = dsp->idct_permutation[i];
112                 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
113                  * Assume x = qscale * quant_matrix[i]
114                  * So             16 <=              x  <= 7905
115                  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
116                  * so          32768 >= (1 << 19) / (x) >= 67 */
117                 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
118                                         (qscale * quant_matrix[j]));
119                 //qmat  [qscale][i] = (1 << QMAT_SHIFT_MMX) /
120                 //                    (qscale * quant_matrix[i]);
121                 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
122                                        (qscale * quant_matrix[j]);
123
124                 if (qmat16[qscale][0][i] == 0 ||
125                     qmat16[qscale][0][i] == 128 * 256)
126                     qmat16[qscale][0][i] = 128 * 256 - 1;
127                 qmat16[qscale][1][i] =
128                     ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
129                                 qmat16[qscale][0][i]);
130             }
131         }
132
133         for (i = intra; i < 64; i++) {
134             int64_t max = 8191;
135             if (dsp->fdct == ff_fdct_ifast) {
136                 max = (8191LL * ff_aanscales[i]) >> 14;
137             }
138             while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
139                 shift++;
140             }
141         }
142     }
143     if (shift) {
144         av_log(NULL, AV_LOG_INFO,
145                "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
146                QMAT_SHIFT - shift);
147     }
148 }
149
150 static inline void update_qscale(MpegEncContext *s)
151 {
152     s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
153                 (FF_LAMBDA_SHIFT + 7);
154     s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
155
156     s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
157                  FF_LAMBDA_SHIFT;
158 }
159
160 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
161 {
162     int i;
163
164     if (matrix) {
165         put_bits(pb, 1, 1);
166         for (i = 0; i < 64; i++) {
167             put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
168         }
169     } else
170         put_bits(pb, 1, 0);
171 }
172
173 /**
174  * init s->current_picture.qscale_table from s->lambda_table
175  */
176 void ff_init_qscale_tab(MpegEncContext *s)
177 {
178     int8_t * const qscale_table = s->current_picture.f.qscale_table;
179     int i;
180
181     for (i = 0; i < s->mb_num; i++) {
182         unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
183         int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
184         qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
185                                                   s->avctx->qmax);
186     }
187 }
188
189 static void copy_picture_attributes(MpegEncContext *s,
190                                     AVFrame *dst,
191                                     AVFrame *src)
192 {
193     int i;
194
195     dst->pict_type              = src->pict_type;
196     dst->quality                = src->quality;
197     dst->coded_picture_number   = src->coded_picture_number;
198     dst->display_picture_number = src->display_picture_number;
199     //dst->reference              = src->reference;
200     dst->pts                    = src->pts;
201     dst->interlaced_frame       = src->interlaced_frame;
202     dst->top_field_first        = src->top_field_first;
203
204     if (s->avctx->me_threshold) {
205         if (!src->motion_val[0])
206             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
207         if (!src->mb_type)
208             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
209         if (!src->ref_index[0])
210             av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
211         if (src->motion_subsample_log2 != dst->motion_subsample_log2)
212             av_log(s->avctx, AV_LOG_ERROR,
213                    "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
214                    src->motion_subsample_log2, dst->motion_subsample_log2);
215
216         memcpy(dst->mb_type, src->mb_type,
217                s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
218
219         for (i = 0; i < 2; i++) {
220             int stride = ((16 * s->mb_width ) >>
221                           src->motion_subsample_log2) + 1;
222             int height = ((16 * s->mb_height) >> src->motion_subsample_log2);
223
224             if (src->motion_val[i] &&
225                 src->motion_val[i] != dst->motion_val[i]) {
226                 memcpy(dst->motion_val[i], src->motion_val[i],
227                        2 * stride * height * sizeof(int16_t));
228             }
229             if (src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]) {
230                 memcpy(dst->ref_index[i], src->ref_index[i],
231                        s->mb_stride * 4 * s->mb_height * sizeof(int8_t));
232             }
233         }
234     }
235 }
236
237 static void update_duplicate_context_after_me(MpegEncContext *dst,
238                                               MpegEncContext *src)
239 {
240 #define COPY(a) dst->a= src->a
241     COPY(pict_type);
242     COPY(current_picture);
243     COPY(f_code);
244     COPY(b_code);
245     COPY(qscale);
246     COPY(lambda);
247     COPY(lambda2);
248     COPY(picture_in_gop_number);
249     COPY(gop_picture_number);
250     COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
251     COPY(progressive_frame);    // FIXME don't set in encode_header
252     COPY(partitioned_frame);    // FIXME don't set in encode_header
253 #undef COPY
254 }
255
256 /**
257  * Set the given MpegEncContext to defaults for encoding.
258  * the changed fields will not depend upon the prior state of the MpegEncContext.
259  */
260 static void MPV_encode_defaults(MpegEncContext *s)
261 {
262     int i;
263     ff_MPV_common_defaults(s);
264
265     for (i = -16; i < 16; i++) {
266         default_fcode_tab[i + MAX_MV] = 1;
267     }
268     s->me.mv_penalty = default_mv_penalty;
269     s->fcode_tab     = default_fcode_tab;
270 }
271
272 av_cold int ff_dct_encode_init(MpegEncContext *s) {
273     if (ARCH_X86)
274         ff_dct_encode_init_x86(s);
275
276     if (!s->dct_quantize)
277         s->dct_quantize = ff_dct_quantize_c;
278     if (!s->denoise_dct)
279         s->denoise_dct  = denoise_dct_c;
280     s->fast_dct_quantize = s->dct_quantize;
281     if (s->avctx->trellis)
282         s->dct_quantize  = dct_quantize_trellis_c;
283
284     return 0;
285 }
286
287 /* init video encoder */
288 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
289 {
290     MpegEncContext *s = avctx->priv_data;
291     int i;
292     int chroma_h_shift, chroma_v_shift;
293
294     MPV_encode_defaults(s);
295
296     switch (avctx->codec_id) {
297     case AV_CODEC_ID_MPEG2VIDEO:
298         if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
299             avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
300             av_log(avctx, AV_LOG_ERROR,
301                    "only YUV420 and YUV422 are supported\n");
302             return -1;
303         }
304         break;
305     case AV_CODEC_ID_LJPEG:
306         if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
307             avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
308             avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
309             avctx->pix_fmt != AV_PIX_FMT_BGR0     &&
310             avctx->pix_fmt != AV_PIX_FMT_BGRA     &&
311             avctx->pix_fmt != AV_PIX_FMT_BGR24    &&
312             ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
313               avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
314               avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
315              avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
316             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
317             return -1;
318         }
319         break;
320     case AV_CODEC_ID_MJPEG:
321     case AV_CODEC_ID_AMV:
322         if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
323             avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
324             avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
325             ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
326               avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
327               avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
328              avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
329             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
330             return -1;
331         }
332         break;
333     default:
334         if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
335             av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
336             return -1;
337         }
338     }
339
340     switch (avctx->pix_fmt) {
341     case AV_PIX_FMT_YUVJ444P:
342     case AV_PIX_FMT_YUV444P:
343         s->chroma_format = CHROMA_444;
344         break;
345     case AV_PIX_FMT_YUVJ422P:
346     case AV_PIX_FMT_YUV422P:
347         s->chroma_format = CHROMA_422;
348         break;
349     case AV_PIX_FMT_YUVJ420P:
350     case AV_PIX_FMT_YUV420P:
351     default:
352         s->chroma_format = CHROMA_420;
353         break;
354     }
355
356     s->bit_rate = avctx->bit_rate;
357     s->width    = avctx->width;
358     s->height   = avctx->height;
359     if (avctx->gop_size > 600 &&
360         avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
361         av_log(avctx, AV_LOG_WARNING,
362                "keyframe interval too large!, reducing it from %d to %d\n",
363                avctx->gop_size, 600);
364         avctx->gop_size = 600;
365     }
366     s->gop_size     = avctx->gop_size;
367     s->avctx        = avctx;
368     s->flags        = avctx->flags;
369     s->flags2       = avctx->flags2;
370     s->max_b_frames = avctx->max_b_frames;
371     s->codec_id     = avctx->codec->id;
372 #if FF_API_MPV_GLOBAL_OPTS
373     if (avctx->luma_elim_threshold)
374         s->luma_elim_threshold   = avctx->luma_elim_threshold;
375     if (avctx->chroma_elim_threshold)
376         s->chroma_elim_threshold = avctx->chroma_elim_threshold;
377 #endif
378     s->strict_std_compliance = avctx->strict_std_compliance;
379     s->quarter_sample     = (avctx->flags & CODEC_FLAG_QPEL) != 0;
380     s->mpeg_quant         = avctx->mpeg_quant;
381     s->rtp_mode           = !!avctx->rtp_payload_size;
382     s->intra_dc_precision = avctx->intra_dc_precision;
383     s->user_specified_pts = AV_NOPTS_VALUE;
384
385     if (s->gop_size <= 1) {
386         s->intra_only = 1;
387         s->gop_size   = 12;
388     } else {
389         s->intra_only = 0;
390     }
391
392     s->me_method = avctx->me_method;
393
394     /* Fixed QSCALE */
395     s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
396
397 #if FF_API_MPV_GLOBAL_OPTS
398     if (s->flags & CODEC_FLAG_QP_RD)
399         s->mpv_flags |= FF_MPV_FLAG_QP_RD;
400 #endif
401
402     s->adaptive_quant = (s->avctx->lumi_masking ||
403                          s->avctx->dark_masking ||
404                          s->avctx->temporal_cplx_masking ||
405                          s->avctx->spatial_cplx_masking  ||
406                          s->avctx->p_masking      ||
407                          s->avctx->border_masking ||
408                          (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
409                         !s->fixed_qscale;
410
411     s->loop_filter      = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
412
413     if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
414         switch(avctx->codec_id) {
415         case AV_CODEC_ID_MPEG1VIDEO:
416         case AV_CODEC_ID_MPEG2VIDEO:
417             avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112L / 15000000 * 16384;
418             break;
419         case AV_CODEC_ID_MPEG4:
420         case AV_CODEC_ID_MSMPEG4V1:
421         case AV_CODEC_ID_MSMPEG4V2:
422         case AV_CODEC_ID_MSMPEG4V3:
423             if       (avctx->rc_max_rate >= 15000000) {
424                 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000L) * (760-320) / (38400000 - 15000000);
425             } else if(avctx->rc_max_rate >=  2000000) {
426                 avctx->rc_buffer_size =  80 + (avctx->rc_max_rate -  2000000L) * (320- 80) / (15000000 -  2000000);
427             } else if(avctx->rc_max_rate >=   384000) {
428                 avctx->rc_buffer_size =  40 + (avctx->rc_max_rate -   384000L) * ( 80- 40) / ( 2000000 -   384000);
429             } else
430                 avctx->rc_buffer_size = 40;
431             avctx->rc_buffer_size *= 16384;
432             break;
433         }
434         if (avctx->rc_buffer_size) {
435             av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
436         }
437     }
438
439     if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
440         av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
441         if (avctx->rc_max_rate && !avctx->rc_buffer_size)
442             return -1;
443     }
444
445     if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
446         av_log(avctx, AV_LOG_INFO,
447                "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
448     }
449
450     if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
451         av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
452         return -1;
453     }
454
455     if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
456         av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
457         return -1;
458     }
459
460     if (avctx->rc_max_rate &&
461         avctx->rc_max_rate == avctx->bit_rate &&
462         avctx->rc_max_rate != avctx->rc_min_rate) {
463         av_log(avctx, AV_LOG_INFO,
464                "impossible bitrate constraints, this will fail\n");
465     }
466
467     if (avctx->rc_buffer_size &&
468         avctx->bit_rate * (int64_t)avctx->time_base.num >
469             avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
470         av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
471         return -1;
472     }
473
474     if (!s->fixed_qscale &&
475         avctx->bit_rate * av_q2d(avctx->time_base) >
476             avctx->bit_rate_tolerance) {
477         av_log(avctx, AV_LOG_ERROR,
478                "bitrate tolerance too small for bitrate\n");
479         return -1;
480     }
481
482     if (s->avctx->rc_max_rate &&
483         s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
484         (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
485          s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
486         90000LL * (avctx->rc_buffer_size - 1) >
487             s->avctx->rc_max_rate * 0xFFFFLL) {
488         av_log(avctx, AV_LOG_INFO,
489                "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
490                "specified vbv buffer is too large for the given bitrate!\n");
491     }
492
493     if ((s->flags & CODEC_FLAG_4MV)  && s->codec_id != AV_CODEC_ID_MPEG4 &&
494         s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
495         s->codec_id != AV_CODEC_ID_FLV1) {
496         av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
497         return -1;
498     }
499
500     if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
501         av_log(avctx, AV_LOG_ERROR,
502                "OBMC is only supported with simple mb decision\n");
503         return -1;
504     }
505
506     if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
507         av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
508         return -1;
509     }
510
511     if (s->max_b_frames                    &&
512         s->codec_id != AV_CODEC_ID_MPEG4      &&
513         s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
514         s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
515         av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
516         return -1;
517     }
518
519     if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
520          s->codec_id == AV_CODEC_ID_H263  ||
521          s->codec_id == AV_CODEC_ID_H263P) &&
522         (avctx->sample_aspect_ratio.num > 255 ||
523          avctx->sample_aspect_ratio.den > 255)) {
524         av_log(avctx, AV_LOG_WARNING,
525                "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
526                avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
527         av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
528                    avctx->sample_aspect_ratio.num,  avctx->sample_aspect_ratio.den, 255);
529     }
530
531     if ((s->codec_id == AV_CODEC_ID_H263  ||
532          s->codec_id == AV_CODEC_ID_H263P) &&
533         (avctx->width  > 2048 ||
534          avctx->height > 1152 )) {
535         av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
536         return -1;
537     }
538     if ((s->codec_id == AV_CODEC_ID_H263  ||
539          s->codec_id == AV_CODEC_ID_H263P) &&
540         ((avctx->width &3) ||
541          (avctx->height&3) )) {
542         av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
543         return -1;
544     }
545
546     if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
547         (avctx->width  > 4095 ||
548          avctx->height > 4095 )) {
549         av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
550         return -1;
551     }
552
553     if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
554         (avctx->width  > 16383 ||
555          avctx->height > 16383 )) {
556         av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
557         return -1;
558     }
559
560     if ((s->codec_id == AV_CODEC_ID_WMV1 ||
561          s->codec_id == AV_CODEC_ID_WMV2) &&
562          avctx->width & 1) {
563          av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
564          return -1;
565     }
566
567     if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
568         s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
569         av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
570         return -1;
571     }
572
573     // FIXME mpeg2 uses that too
574     if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
575         av_log(avctx, AV_LOG_ERROR,
576                "mpeg2 style quantization not supported by codec\n");
577         return -1;
578     }
579
580 #if FF_API_MPV_GLOBAL_OPTS
581     if (s->flags & CODEC_FLAG_CBP_RD)
582         s->mpv_flags |= FF_MPV_FLAG_CBP_RD;
583 #endif
584
585     if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
586         av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
587         return -1;
588     }
589
590     if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
591         s->avctx->mb_decision != FF_MB_DECISION_RD) {
592         av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
593         return -1;
594     }
595
596     if (s->avctx->scenechange_threshold < 1000000000 &&
597         (s->flags & CODEC_FLAG_CLOSED_GOP)) {
598         av_log(avctx, AV_LOG_ERROR,
599                "closed gop with scene change detection are not supported yet, "
600                "set threshold to 1000000000\n");
601         return -1;
602     }
603
604     if (s->flags & CODEC_FLAG_LOW_DELAY) {
605         if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
606             av_log(avctx, AV_LOG_ERROR,
607                   "low delay forcing is only available for mpeg2\n");
608             return -1;
609         }
610         if (s->max_b_frames != 0) {
611             av_log(avctx, AV_LOG_ERROR,
612                    "b frames cannot be used with low delay\n");
613             return -1;
614         }
615     }
616
617     if (s->q_scale_type == 1) {
618         if (avctx->qmax > 12) {
619             av_log(avctx, AV_LOG_ERROR,
620                    "non linear quant only supports qmax <= 12 currently\n");
621             return -1;
622         }
623     }
624
625     if (s->avctx->thread_count > 1         &&
626         s->codec_id != AV_CODEC_ID_MPEG4      &&
627         s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
628         s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
629         s->codec_id != AV_CODEC_ID_MJPEG      &&
630         (s->codec_id != AV_CODEC_ID_H263P)) {
631         av_log(avctx, AV_LOG_ERROR,
632                "multi threaded encoding not supported by codec\n");
633         return -1;
634     }
635
636     if (s->avctx->thread_count < 1) {
637         av_log(avctx, AV_LOG_ERROR,
638                "automatic thread number detection not supported by codec, "
639                "patch welcome\n");
640         return -1;
641     }
642
643     if (s->avctx->thread_count > 1)
644         s->rtp_mode = 1;
645
646     if (s->avctx->thread_count > 1 && s->codec_id == AV_CODEC_ID_H263P)
647         s->h263_slice_structured = 1;
648
649     if (!avctx->time_base.den || !avctx->time_base.num) {
650         av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
651         return -1;
652     }
653
654     i = (INT_MAX / 2 + 128) >> 8;
655     if (avctx->me_threshold >= i) {
656         av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n",
657                i - 1);
658         return -1;
659     }
660     if (avctx->mb_threshold >= i) {
661         av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
662                i - 1);
663         return -1;
664     }
665
666     if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
667         av_log(avctx, AV_LOG_INFO,
668                "notice: b_frame_strategy only affects the first pass\n");
669         avctx->b_frame_strategy = 0;
670     }
671
672     i = av_gcd(avctx->time_base.den, avctx->time_base.num);
673     if (i > 1) {
674         av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
675         avctx->time_base.den /= i;
676         avctx->time_base.num /= i;
677         //return -1;
678     }
679
680     if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
681         // (a + x * 3 / 8) / x
682         s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
683         s->inter_quant_bias = 0;
684     } else {
685         s->intra_quant_bias = 0;
686         // (a - x / 4) / x
687         s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
688     }
689
690     if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
691         s->intra_quant_bias = avctx->intra_quant_bias;
692     if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
693         s->inter_quant_bias = avctx->inter_quant_bias;
694
695     av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
696
697     avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
698
699     if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
700         s->avctx->time_base.den > (1 << 16) - 1) {
701         av_log(avctx, AV_LOG_ERROR,
702                "timebase %d/%d not supported by MPEG 4 standard, "
703                "the maximum admitted value for the timebase denominator "
704                "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
705                (1 << 16) - 1);
706         return -1;
707     }
708     s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
709
710 #if FF_API_MPV_GLOBAL_OPTS
711     if (avctx->flags2 & CODEC_FLAG2_SKIP_RD)
712         s->mpv_flags |= FF_MPV_FLAG_SKIP_RD;
713     if (avctx->flags2 & CODEC_FLAG2_STRICT_GOP)
714         s->mpv_flags |= FF_MPV_FLAG_STRICT_GOP;
715     if (avctx->quantizer_noise_shaping)
716         s->quantizer_noise_shaping = avctx->quantizer_noise_shaping;
717 #endif
718
719     switch (avctx->codec->id) {
720     case AV_CODEC_ID_MPEG1VIDEO:
721         s->out_format = FMT_MPEG1;
722         s->low_delay  = !!(s->flags & CODEC_FLAG_LOW_DELAY);
723         avctx->delay  = s->low_delay ? 0 : (s->max_b_frames + 1);
724         break;
725     case AV_CODEC_ID_MPEG2VIDEO:
726         s->out_format = FMT_MPEG1;
727         s->low_delay  = !!(s->flags & CODEC_FLAG_LOW_DELAY);
728         avctx->delay  = s->low_delay ? 0 : (s->max_b_frames + 1);
729         s->rtp_mode   = 1;
730         break;
731     case AV_CODEC_ID_LJPEG:
732     case AV_CODEC_ID_MJPEG:
733     case AV_CODEC_ID_AMV:
734         s->out_format = FMT_MJPEG;
735         s->intra_only = 1; /* force intra only for jpeg */
736         if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
737             (avctx->pix_fmt == AV_PIX_FMT_BGR0
738              || s->avctx->pix_fmt == AV_PIX_FMT_BGRA
739              || s->avctx->pix_fmt == AV_PIX_FMT_BGR24)) {
740             s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
741             s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
742             s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
743         } else {
744             s->mjpeg_vsample[0] = 2;
745             s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
746             s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
747             s->mjpeg_hsample[0] = 2;
748             s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
749             s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
750         }
751         if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
752             ff_mjpeg_encode_init(s) < 0)
753             return -1;
754         avctx->delay = 0;
755         s->low_delay = 1;
756         break;
757     case AV_CODEC_ID_H261:
758         if (!CONFIG_H261_ENCODER)
759             return -1;
760         if (ff_h261_get_picture_format(s->width, s->height) < 0) {
761             av_log(avctx, AV_LOG_ERROR,
762                    "The specified picture size of %dx%d is not valid for the "
763                    "H.261 codec.\nValid sizes are 176x144, 352x288\n",
764                     s->width, s->height);
765             return -1;
766         }
767         s->out_format = FMT_H261;
768         avctx->delay  = 0;
769         s->low_delay  = 1;
770         break;
771     case AV_CODEC_ID_H263:
772         if (!CONFIG_H263_ENCODER)
773             return -1;
774         if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
775                              s->width, s->height) == 8) {
776             av_log(avctx, AV_LOG_ERROR,
777                    "The specified picture size of %dx%d is not valid for "
778                    "the H.263 codec.\nValid sizes are 128x96, 176x144, "
779                    "352x288, 704x576, and 1408x1152. "
780                    "Try H.263+.\n", s->width, s->height);
781             return -1;
782         }
783         s->out_format = FMT_H263;
784         avctx->delay  = 0;
785         s->low_delay  = 1;
786         break;
787     case AV_CODEC_ID_H263P:
788         s->out_format = FMT_H263;
789         s->h263_plus  = 1;
790         /* Fx */
791         s->h263_aic        = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
792         s->modified_quant  = s->h263_aic;
793         s->loop_filter     = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
794         s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
795
796         /* /Fx */
797         /* These are just to be sure */
798         avctx->delay = 0;
799         s->low_delay = 1;
800         break;
801     case AV_CODEC_ID_FLV1:
802         s->out_format      = FMT_H263;
803         s->h263_flv        = 2; /* format = 1; 11-bit codes */
804         s->unrestricted_mv = 1;
805         s->rtp_mode  = 0; /* don't allow GOB */
806         avctx->delay = 0;
807         s->low_delay = 1;
808         break;
809     case AV_CODEC_ID_RV10:
810         s->out_format = FMT_H263;
811         avctx->delay  = 0;
812         s->low_delay  = 1;
813         break;
814     case AV_CODEC_ID_RV20:
815         s->out_format      = FMT_H263;
816         avctx->delay       = 0;
817         s->low_delay       = 1;
818         s->modified_quant  = 1;
819         s->h263_aic        = 1;
820         s->h263_plus       = 1;
821         s->loop_filter     = 1;
822         s->unrestricted_mv = 0;
823         break;
824     case AV_CODEC_ID_MPEG4:
825         s->out_format      = FMT_H263;
826         s->h263_pred       = 1;
827         s->unrestricted_mv = 1;
828         s->low_delay       = s->max_b_frames ? 0 : 1;
829         avctx->delay       = s->low_delay ? 0 : (s->max_b_frames + 1);
830         break;
831     case AV_CODEC_ID_MSMPEG4V2:
832         s->out_format      = FMT_H263;
833         s->h263_pred       = 1;
834         s->unrestricted_mv = 1;
835         s->msmpeg4_version = 2;
836         avctx->delay       = 0;
837         s->low_delay       = 1;
838         break;
839     case AV_CODEC_ID_MSMPEG4V3:
840         s->out_format        = FMT_H263;
841         s->h263_pred         = 1;
842         s->unrestricted_mv   = 1;
843         s->msmpeg4_version   = 3;
844         s->flipflop_rounding = 1;
845         avctx->delay         = 0;
846         s->low_delay         = 1;
847         break;
848     case AV_CODEC_ID_WMV1:
849         s->out_format        = FMT_H263;
850         s->h263_pred         = 1;
851         s->unrestricted_mv   = 1;
852         s->msmpeg4_version   = 4;
853         s->flipflop_rounding = 1;
854         avctx->delay         = 0;
855         s->low_delay         = 1;
856         break;
857     case AV_CODEC_ID_WMV2:
858         s->out_format        = FMT_H263;
859         s->h263_pred         = 1;
860         s->unrestricted_mv   = 1;
861         s->msmpeg4_version   = 5;
862         s->flipflop_rounding = 1;
863         avctx->delay         = 0;
864         s->low_delay         = 1;
865         break;
866     default:
867         return -1;
868     }
869
870     avctx->has_b_frames = !s->low_delay;
871
872     s->encoding = 1;
873
874     s->progressive_frame    =
875     s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
876                                                 CODEC_FLAG_INTERLACED_ME) ||
877                                 s->alternate_scan);
878
879     /* init */
880     if (ff_MPV_common_init(s) < 0)
881         return -1;
882
883     ff_dct_encode_init(s);
884
885     if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
886         s->chroma_qscale_table = ff_h263_chroma_qscale_table;
887
888     s->quant_precision = 5;
889
890     ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
891     ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
892
893     if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
894         ff_h261_encode_init(s);
895     if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
896         ff_h263_encode_init(s);
897     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
898         ff_msmpeg4_encode_init(s);
899     if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
900         && s->out_format == FMT_MPEG1)
901         ff_mpeg1_encode_init(s);
902
903     /* init q matrix */
904     for (i = 0; i < 64; i++) {
905         int j = s->dsp.idct_permutation[i];
906         if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
907             s->mpeg_quant) {
908             s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
909             s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
910         } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
911             s->intra_matrix[j] =
912             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
913         } else {
914             /* mpeg1/2 */
915             s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
916             s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
917         }
918         if (s->avctx->intra_matrix)
919             s->intra_matrix[j] = s->avctx->intra_matrix[i];
920         if (s->avctx->inter_matrix)
921             s->inter_matrix[j] = s->avctx->inter_matrix[i];
922     }
923
924     /* precompute matrix */
925     /* for mjpeg, we do include qscale in the matrix */
926     if (s->out_format != FMT_MJPEG) {
927         ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
928                           s->intra_matrix, s->intra_quant_bias, avctx->qmin,
929                           31, 1);
930         ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
931                           s->inter_matrix, s->inter_quant_bias, avctx->qmin,
932                           31, 0);
933     }
934
935     if (ff_rate_control_init(s) < 0)
936         return -1;
937
938     return 0;
939 }
940
941 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
942 {
943     MpegEncContext *s = avctx->priv_data;
944
945     ff_rate_control_uninit(s);
946
947     ff_MPV_common_end(s);
948     if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
949         s->out_format == FMT_MJPEG)
950         ff_mjpeg_encode_close(s);
951
952     av_freep(&avctx->extradata);
953
954     return 0;
955 }
956
957 static int get_sae(uint8_t *src, int ref, int stride)
958 {
959     int x,y;
960     int acc = 0;
961
962     for (y = 0; y < 16; y++) {
963         for (x = 0; x < 16; x++) {
964             acc += FFABS(src[x + y * stride] - ref);
965         }
966     }
967
968     return acc;
969 }
970
971 static int get_intra_count(MpegEncContext *s, uint8_t *src,
972                            uint8_t *ref, int stride)
973 {
974     int x, y, w, h;
975     int acc = 0;
976
977     w = s->width  & ~15;
978     h = s->height & ~15;
979
980     for (y = 0; y < h; y += 16) {
981         for (x = 0; x < w; x += 16) {
982             int offset = x + y * stride;
983             int sad  = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
984                                      16);
985             int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
986             int sae  = get_sae(src + offset, mean, stride);
987
988             acc += sae + 500 < sad;
989         }
990     }
991     return acc;
992 }
993
994
995 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg)
996 {
997     AVFrame *pic = NULL;
998     int64_t pts;
999     int i;
1000     const int encoding_delay = s->max_b_frames ? s->max_b_frames :
1001                                                  (s->low_delay ? 0 : 1);
1002     int direct = 1;
1003
1004     if (pic_arg) {
1005         pts = pic_arg->pts;
1006         pic_arg->display_picture_number = s->input_picture_number++;
1007
1008         if (pts != AV_NOPTS_VALUE) {
1009             if (s->user_specified_pts != AV_NOPTS_VALUE) {
1010                 int64_t time = pts;
1011                 int64_t last = s->user_specified_pts;
1012
1013                 if (time <= last) {
1014                     av_log(s->avctx, AV_LOG_ERROR,
1015                            "Error, Invalid timestamp=%"PRId64", "
1016                            "last=%"PRId64"\n", pts, s->user_specified_pts);
1017                     return -1;
1018                 }
1019
1020                 if (!s->low_delay && pic_arg->display_picture_number == 1)
1021                     s->dts_delta = time - last;
1022             }
1023             s->user_specified_pts = pts;
1024         } else {
1025             if (s->user_specified_pts != AV_NOPTS_VALUE) {
1026                 s->user_specified_pts =
1027                 pts = s->user_specified_pts + 1;
1028                 av_log(s->avctx, AV_LOG_INFO,
1029                        "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1030                        pts);
1031             } else {
1032                 pts = pic_arg->display_picture_number;
1033             }
1034         }
1035     }
1036
1037   if (pic_arg) {
1038     if (encoding_delay && !(s->flags & CODEC_FLAG_INPUT_PRESERVED))
1039         direct = 0;
1040     if (pic_arg->linesize[0] != s->linesize)
1041         direct = 0;
1042     if (pic_arg->linesize[1] != s->uvlinesize)
1043         direct = 0;
1044     if (pic_arg->linesize[2] != s->uvlinesize)
1045         direct = 0;
1046
1047     av_dlog(s->avctx, "%d %d %d %d\n", pic_arg->linesize[0],
1048             pic_arg->linesize[1], s->linesize, s->uvlinesize);
1049
1050     if (direct) {
1051         i = ff_find_unused_picture(s, 1);
1052         if (i < 0)
1053             return i;
1054
1055         pic = &s->picture[i].f;
1056         pic->reference = 3;
1057
1058         for (i = 0; i < 4; i++) {
1059             pic->data[i]     = pic_arg->data[i];
1060             pic->linesize[i] = pic_arg->linesize[i];
1061         }
1062         if (ff_alloc_picture(s, (Picture *) pic, 1) < 0) {
1063             return -1;
1064         }
1065     } else {
1066         i = ff_find_unused_picture(s, 0);
1067         if (i < 0)
1068             return i;
1069
1070         pic = &s->picture[i].f;
1071         pic->reference = 3;
1072
1073         if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) {
1074             return -1;
1075         }
1076
1077         if (pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1078             pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1079             pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1080             // empty
1081         } else {
1082             int h_chroma_shift, v_chroma_shift;
1083             avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1084
1085             for (i = 0; i < 3; i++) {
1086                 int src_stride = pic_arg->linesize[i];
1087                 int dst_stride = i ? s->uvlinesize : s->linesize;
1088                 int h_shift = i ? h_chroma_shift : 0;
1089                 int v_shift = i ? v_chroma_shift : 0;
1090                 int w = s->width  >> h_shift;
1091                 int h = s->height >> v_shift;
1092                 uint8_t *src = pic_arg->data[i];
1093                 uint8_t *dst = pic->data[i];
1094
1095                 if(s->codec_id == AV_CODEC_ID_AMV && !(s->avctx->flags & CODEC_FLAG_EMU_EDGE)){
1096                     h= ((s->height+15)/16*16)>>v_shift;
1097                 }
1098
1099                 if (!s->avctx->rc_buffer_size)
1100                     dst += INPLACE_OFFSET;
1101
1102                 if (src_stride == dst_stride)
1103                     memcpy(dst, src, src_stride * h);
1104                 else {
1105                     while (h--) {
1106                         memcpy(dst, src, w);
1107                         dst += dst_stride;
1108                         src += src_stride;
1109                     }
1110                 }
1111             }
1112         }
1113     }
1114     copy_picture_attributes(s, pic, pic_arg);
1115     pic->pts = pts; // we set this here to avoid modifiying pic_arg
1116   }
1117
1118     /* shift buffer entries */
1119     for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1120         s->input_picture[i - 1] = s->input_picture[i];
1121
1122     s->input_picture[encoding_delay] = (Picture*) pic;
1123
1124     return 0;
1125 }
1126
1127 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1128 {
1129     int x, y, plane;
1130     int score = 0;
1131     int64_t score64 = 0;
1132
1133     for (plane = 0; plane < 3; plane++) {
1134         const int stride = p->f.linesize[plane];
1135         const int bw = plane ? 1 : 2;
1136         for (y = 0; y < s->mb_height * bw; y++) {
1137             for (x = 0; x < s->mb_width * bw; x++) {
1138                 int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0 : 16;
1139                 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
1140                 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
1141                 int v   = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1142
1143                 switch (s->avctx->frame_skip_exp) {
1144                 case 0: score    =  FFMAX(score, v);          break;
1145                 case 1: score   += FFABS(v);                  break;
1146                 case 2: score   += v * v;                     break;
1147                 case 3: score64 += FFABS(v * v * (int64_t)v); break;
1148                 case 4: score64 += v * v * (int64_t)(v * v);  break;
1149                 }
1150             }
1151         }
1152     }
1153
1154     if (score)
1155         score64 = score;
1156
1157     if (score64 < s->avctx->frame_skip_threshold)
1158         return 1;
1159     if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1160         return 1;
1161     return 0;
1162 }
1163
1164 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1165 {
1166     AVPacket pkt = { 0 };
1167     int ret, got_output;
1168
1169     av_init_packet(&pkt);
1170     ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1171     if (ret < 0)
1172         return ret;
1173
1174     ret = pkt.size;
1175     av_free_packet(&pkt);
1176     return ret;
1177 }
1178
1179 static int estimate_best_b_count(MpegEncContext *s)
1180 {
1181     AVCodec *codec    = avcodec_find_encoder(s->avctx->codec_id);
1182     AVCodecContext *c = avcodec_alloc_context3(NULL);
1183     AVFrame input[FF_MAX_B_FRAMES + 2];
1184     const int scale = s->avctx->brd_scale;
1185     int i, j, out_size, p_lambda, b_lambda, lambda2;
1186     int64_t best_rd  = INT64_MAX;
1187     int best_b_count = -1;
1188
1189     av_assert0(scale >= 0 && scale <= 3);
1190
1191     //emms_c();
1192     //s->next_picture_ptr->quality;
1193     p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1194     //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1195     b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1196     if (!b_lambda) // FIXME we should do this somewhere else
1197         b_lambda = p_lambda;
1198     lambda2  = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1199                FF_LAMBDA_SHIFT;
1200
1201     c->width        = s->width  >> scale;
1202     c->height       = s->height >> scale;
1203     c->flags        = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1204                       CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1205     c->flags       |= s->avctx->flags & CODEC_FLAG_QPEL;
1206     c->mb_decision  = s->avctx->mb_decision;
1207     c->me_cmp       = s->avctx->me_cmp;
1208     c->mb_cmp       = s->avctx->mb_cmp;
1209     c->me_sub_cmp   = s->avctx->me_sub_cmp;
1210     c->pix_fmt      = AV_PIX_FMT_YUV420P;
1211     c->time_base    = s->avctx->time_base;
1212     c->max_b_frames = s->max_b_frames;
1213
1214     if (avcodec_open2(c, codec, NULL) < 0)
1215         return -1;
1216
1217     for (i = 0; i < s->max_b_frames + 2; i++) {
1218         int ysize = c->width * c->height;
1219         int csize = (c->width / 2) * (c->height / 2);
1220         Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1221                                                 s->next_picture_ptr;
1222
1223         avcodec_get_frame_defaults(&input[i]);
1224         input[i].data[0]     = av_malloc(ysize + 2 * csize);
1225         input[i].data[1]     = input[i].data[0] + ysize;
1226         input[i].data[2]     = input[i].data[1] + csize;
1227         input[i].linesize[0] = c->width;
1228         input[i].linesize[1] =
1229         input[i].linesize[2] = c->width / 2;
1230
1231         if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1232             pre_input = *pre_input_ptr;
1233
1234             if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
1235                 pre_input.f.data[0] += INPLACE_OFFSET;
1236                 pre_input.f.data[1] += INPLACE_OFFSET;
1237                 pre_input.f.data[2] += INPLACE_OFFSET;
1238             }
1239
1240             s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
1241                                  pre_input.f.data[0], pre_input.f.linesize[0],
1242                                  c->width,      c->height);
1243             s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
1244                                  pre_input.f.data[1], pre_input.f.linesize[1],
1245                                  c->width >> 1, c->height >> 1);
1246             s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
1247                                  pre_input.f.data[2], pre_input.f.linesize[2],
1248                                  c->width >> 1, c->height >> 1);
1249         }
1250     }
1251
1252     for (j = 0; j < s->max_b_frames + 1; j++) {
1253         int64_t rd = 0;
1254
1255         if (!s->input_picture[j])
1256             break;
1257
1258         c->error[0] = c->error[1] = c->error[2] = 0;
1259
1260         input[0].pict_type = AV_PICTURE_TYPE_I;
1261         input[0].quality   = 1 * FF_QP2LAMBDA;
1262
1263         out_size = encode_frame(c, &input[0]);
1264
1265         //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1266
1267         for (i = 0; i < s->max_b_frames + 1; i++) {
1268             int is_p = i % (j + 1) == j || i == s->max_b_frames;
1269
1270             input[i + 1].pict_type = is_p ?
1271                                      AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1272             input[i + 1].quality   = is_p ? p_lambda : b_lambda;
1273
1274             out_size = encode_frame(c, &input[i + 1]);
1275
1276             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1277         }
1278
1279         /* get the delayed frames */
1280         while (out_size) {
1281             out_size = encode_frame(c, NULL);
1282             rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1283         }
1284
1285         rd += c->error[0] + c->error[1] + c->error[2];
1286
1287         if (rd < best_rd) {
1288             best_rd = rd;
1289             best_b_count = j;
1290         }
1291     }
1292
1293     avcodec_close(c);
1294     av_freep(&c);
1295
1296     for (i = 0; i < s->max_b_frames + 2; i++) {
1297         av_freep(&input[i].data[0]);
1298     }
1299
1300     return best_b_count;
1301 }
1302
1303 static int select_input_picture(MpegEncContext *s)
1304 {
1305     int i;
1306
1307     for (i = 1; i < MAX_PICTURE_COUNT; i++)
1308         s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1309     s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1310
1311     /* set next picture type & ordering */
1312     if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1313         if (/*s->picture_in_gop_number >= s->gop_size ||*/
1314             s->next_picture_ptr == NULL || s->intra_only) {
1315             s->reordered_input_picture[0] = s->input_picture[0];
1316             s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1317             s->reordered_input_picture[0]->f.coded_picture_number =
1318                 s->coded_picture_number++;
1319         } else {
1320             int b_frames;
1321
1322             if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1323                 if (s->picture_in_gop_number < s->gop_size &&
1324                     skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1325                     // FIXME check that te gop check above is +-1 correct
1326                     if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) {
1327                         for (i = 0; i < 4; i++)
1328                             s->input_picture[0]->f.data[i] = NULL;
1329                         s->input_picture[0]->f.type = 0;
1330                     } else {
1331                         assert(s->input_picture[0]->f.type == FF_BUFFER_TYPE_USER ||
1332                                s->input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL);
1333
1334                         s->avctx->release_buffer(s->avctx,
1335                                                  &s->input_picture[0]->f);
1336                     }
1337
1338                     emms_c();
1339                     ff_vbv_update(s, 0);
1340
1341                     goto no_output_pic;
1342                 }
1343             }
1344
1345             if (s->flags & CODEC_FLAG_PASS2) {
1346                 for (i = 0; i < s->max_b_frames + 1; i++) {
1347                     int pict_num = s->input_picture[0]->f.display_picture_number + i;
1348
1349                     if (pict_num >= s->rc_context.num_entries)
1350                         break;
1351                     if (!s->input_picture[i]) {
1352                         s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1353                         break;
1354                     }
1355
1356                     s->input_picture[i]->f.pict_type =
1357                         s->rc_context.entry[pict_num].new_pict_type;
1358                 }
1359             }
1360
1361             if (s->avctx->b_frame_strategy == 0) {
1362                 b_frames = s->max_b_frames;
1363                 while (b_frames && !s->input_picture[b_frames])
1364                     b_frames--;
1365             } else if (s->avctx->b_frame_strategy == 1) {
1366                 for (i = 1; i < s->max_b_frames + 1; i++) {
1367                     if (s->input_picture[i] &&
1368                         s->input_picture[i]->b_frame_score == 0) {
1369                         s->input_picture[i]->b_frame_score =
1370                             get_intra_count(s,
1371                                             s->input_picture[i    ]->f.data[0],
1372                                             s->input_picture[i - 1]->f.data[0],
1373                                             s->linesize) + 1;
1374                     }
1375                 }
1376                 for (i = 0; i < s->max_b_frames + 1; i++) {
1377                     if (s->input_picture[i] == NULL ||
1378                         s->input_picture[i]->b_frame_score - 1 >
1379                             s->mb_num / s->avctx->b_sensitivity)
1380                         break;
1381                 }
1382
1383                 b_frames = FFMAX(0, i - 1);
1384
1385                 /* reset scores */
1386                 for (i = 0; i < b_frames + 1; i++) {
1387                     s->input_picture[i]->b_frame_score = 0;
1388                 }
1389             } else if (s->avctx->b_frame_strategy == 2) {
1390                 b_frames = estimate_best_b_count(s);
1391             } else {
1392                 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1393                 b_frames = 0;
1394             }
1395
1396             emms_c();
1397
1398             for (i = b_frames - 1; i >= 0; i--) {
1399                 int type = s->input_picture[i]->f.pict_type;
1400                 if (type && type != AV_PICTURE_TYPE_B)
1401                     b_frames = i;
1402             }
1403             if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1404                 b_frames == s->max_b_frames) {
1405                 av_log(s->avctx, AV_LOG_ERROR,
1406                        "warning, too many b frames in a row\n");
1407             }
1408
1409             if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1410                 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1411                     s->gop_size > s->picture_in_gop_number) {
1412                     b_frames = s->gop_size - s->picture_in_gop_number - 1;
1413                 } else {
1414                     if (s->flags & CODEC_FLAG_CLOSED_GOP)
1415                         b_frames = 0;
1416                     s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1417                 }
1418             }
1419
1420             if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1421                 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1422                 b_frames--;
1423
1424             s->reordered_input_picture[0] = s->input_picture[b_frames];
1425             if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1426                 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1427             s->reordered_input_picture[0]->f.coded_picture_number =
1428                 s->coded_picture_number++;
1429             for (i = 0; i < b_frames; i++) {
1430                 s->reordered_input_picture[i + 1] = s->input_picture[i];
1431                 s->reordered_input_picture[i + 1]->f.pict_type =
1432                     AV_PICTURE_TYPE_B;
1433                 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1434                     s->coded_picture_number++;
1435             }
1436         }
1437     }
1438 no_output_pic:
1439     if (s->reordered_input_picture[0]) {
1440         s->reordered_input_picture[0]->f.reference =
1441            s->reordered_input_picture[0]->f.pict_type !=
1442                AV_PICTURE_TYPE_B ? 3 : 0;
1443
1444         ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
1445
1446         if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED ||
1447             s->avctx->rc_buffer_size) {
1448             // input is a shared pix, so we can't modifiy it -> alloc a new
1449             // one & ensure that the shared one is reuseable
1450
1451             Picture *pic;
1452             int i = ff_find_unused_picture(s, 0);
1453             if (i < 0)
1454                 return i;
1455             pic = &s->picture[i];
1456
1457             pic->f.reference = s->reordered_input_picture[0]->f.reference;
1458             if (ff_alloc_picture(s, pic, 0) < 0) {
1459                 return -1;
1460             }
1461
1462             /* mark us unused / free shared pic */
1463             if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL)
1464                 s->avctx->release_buffer(s->avctx,
1465                                          &s->reordered_input_picture[0]->f);
1466             for (i = 0; i < 4; i++)
1467                 s->reordered_input_picture[0]->f.data[i] = NULL;
1468             s->reordered_input_picture[0]->f.type = 0;
1469
1470             copy_picture_attributes(s, &pic->f,
1471                                     &s->reordered_input_picture[0]->f);
1472
1473             s->current_picture_ptr = pic;
1474         } else {
1475             // input is not a shared pix -> reuse buffer for current_pix
1476
1477             assert(s->reordered_input_picture[0]->f.type ==
1478                        FF_BUFFER_TYPE_USER ||
1479                    s->reordered_input_picture[0]->f.type ==
1480                        FF_BUFFER_TYPE_INTERNAL);
1481
1482             s->current_picture_ptr = s->reordered_input_picture[0];
1483             for (i = 0; i < 4; i++) {
1484                 s->new_picture.f.data[i] += INPLACE_OFFSET;
1485             }
1486         }
1487         ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1488
1489         s->picture_number = s->new_picture.f.display_picture_number;
1490     } else {
1491         memset(&s->new_picture, 0, sizeof(Picture));
1492     }
1493     return 0;
1494 }
1495
1496 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1497                           AVFrame *pic_arg, int *got_packet)
1498 {
1499     MpegEncContext *s = avctx->priv_data;
1500     int i, stuffing_count, ret;
1501     int context_count = s->slice_context_count;
1502
1503     s->picture_in_gop_number++;
1504
1505     if (load_input_picture(s, pic_arg) < 0)
1506         return -1;
1507
1508     if (select_input_picture(s) < 0) {
1509         return -1;
1510     }
1511
1512     /* output? */
1513     if (s->new_picture.f.data[0]) {
1514         if ((ret = ff_alloc_packet2(avctx, pkt, s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000)) < 0)
1515             return ret;
1516         if (s->mb_info) {
1517             s->mb_info_ptr = av_packet_new_side_data(pkt,
1518                                  AV_PKT_DATA_H263_MB_INFO,
1519                                  s->mb_width*s->mb_height*12);
1520             s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1521         }
1522
1523         for (i = 0; i < context_count; i++) {
1524             int start_y = s->thread_context[i]->start_mb_y;
1525             int   end_y = s->thread_context[i]->  end_mb_y;
1526             int h       = s->mb_height;
1527             uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1528             uint8_t *end   = pkt->data + (size_t)(((int64_t) pkt->size) *   end_y / h);
1529
1530             init_put_bits(&s->thread_context[i]->pb, start, end - start);
1531         }
1532
1533         s->pict_type = s->new_picture.f.pict_type;
1534         //emms_c();
1535         if (ff_MPV_frame_start(s, avctx) < 0)
1536             return -1;
1537 vbv_retry:
1538         if (encode_picture(s, s->picture_number) < 0)
1539             return -1;
1540
1541         avctx->header_bits = s->header_bits;
1542         avctx->mv_bits     = s->mv_bits;
1543         avctx->misc_bits   = s->misc_bits;
1544         avctx->i_tex_bits  = s->i_tex_bits;
1545         avctx->p_tex_bits  = s->p_tex_bits;
1546         avctx->i_count     = s->i_count;
1547         // FIXME f/b_count in avctx
1548         avctx->p_count     = s->mb_num - s->i_count - s->skip_count;
1549         avctx->skip_count  = s->skip_count;
1550
1551         ff_MPV_frame_end(s);
1552
1553         if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1554             ff_mjpeg_encode_picture_trailer(s);
1555
1556         if (avctx->rc_buffer_size) {
1557             RateControlContext *rcc = &s->rc_context;
1558             int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1559
1560             if (put_bits_count(&s->pb) > max_size &&
1561                 s->lambda < s->avctx->lmax) {
1562                 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1563                                        (s->qscale + 1) / s->qscale);
1564                 if (s->adaptive_quant) {
1565                     int i;
1566                     for (i = 0; i < s->mb_height * s->mb_stride; i++)
1567                         s->lambda_table[i] =
1568                             FFMAX(s->lambda_table[i] + 1,
1569                                   s->lambda_table[i] * (s->qscale + 1) /
1570                                   s->qscale);
1571                 }
1572                 s->mb_skipped = 0;        // done in MPV_frame_start()
1573                 // done in encode_picture() so we must undo it
1574                 if (s->pict_type == AV_PICTURE_TYPE_P) {
1575                     if (s->flipflop_rounding          ||
1576                         s->codec_id == AV_CODEC_ID_H263P ||
1577                         s->codec_id == AV_CODEC_ID_MPEG4)
1578                         s->no_rounding ^= 1;
1579                 }
1580                 if (s->pict_type != AV_PICTURE_TYPE_B) {
1581                     s->time_base       = s->last_time_base;
1582                     s->last_non_b_time = s->time - s->pp_time;
1583                 }
1584                 for (i = 0; i < context_count; i++) {
1585                     PutBitContext *pb = &s->thread_context[i]->pb;
1586                     init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1587                 }
1588                 goto vbv_retry;
1589             }
1590
1591             assert(s->avctx->rc_max_rate);
1592         }
1593
1594         if (s->flags & CODEC_FLAG_PASS1)
1595             ff_write_pass1_stats(s);
1596
1597         for (i = 0; i < 4; i++) {
1598             s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1599             avctx->error[i] += s->current_picture_ptr->f.error[i];
1600         }
1601
1602         if (s->flags & CODEC_FLAG_PASS1)
1603             assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1604                    avctx->i_tex_bits + avctx->p_tex_bits ==
1605                        put_bits_count(&s->pb));
1606         flush_put_bits(&s->pb);
1607         s->frame_bits  = put_bits_count(&s->pb);
1608
1609         stuffing_count = ff_vbv_update(s, s->frame_bits);
1610         s->stuffing_bits = 8*stuffing_count;
1611         if (stuffing_count) {
1612             if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1613                     stuffing_count + 50) {
1614                 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1615                 return -1;
1616             }
1617
1618             switch (s->codec_id) {
1619             case AV_CODEC_ID_MPEG1VIDEO:
1620             case AV_CODEC_ID_MPEG2VIDEO:
1621                 while (stuffing_count--) {
1622                     put_bits(&s->pb, 8, 0);
1623                 }
1624             break;
1625             case AV_CODEC_ID_MPEG4:
1626                 put_bits(&s->pb, 16, 0);
1627                 put_bits(&s->pb, 16, 0x1C3);
1628                 stuffing_count -= 4;
1629                 while (stuffing_count--) {
1630                     put_bits(&s->pb, 8, 0xFF);
1631                 }
1632             break;
1633             default:
1634                 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1635             }
1636             flush_put_bits(&s->pb);
1637             s->frame_bits  = put_bits_count(&s->pb);
1638         }
1639
1640         /* update mpeg1/2 vbv_delay for CBR */
1641         if (s->avctx->rc_max_rate                          &&
1642             s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1643             s->out_format == FMT_MPEG1                     &&
1644             90000LL * (avctx->rc_buffer_size - 1) <=
1645                 s->avctx->rc_max_rate * 0xFFFFLL) {
1646             int vbv_delay, min_delay;
1647             double inbits  = s->avctx->rc_max_rate *
1648                              av_q2d(s->avctx->time_base);
1649             int    minbits = s->frame_bits - 8 *
1650                              (s->vbv_delay_ptr - s->pb.buf - 1);
1651             double bits    = s->rc_context.buffer_index + minbits - inbits;
1652
1653             if (bits < 0)
1654                 av_log(s->avctx, AV_LOG_ERROR,
1655                        "Internal error, negative bits\n");
1656
1657             assert(s->repeat_first_field == 0);
1658
1659             vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1660             min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1661                         s->avctx->rc_max_rate;
1662
1663             vbv_delay = FFMAX(vbv_delay, min_delay);
1664
1665             av_assert0(vbv_delay < 0xFFFF);
1666
1667             s->vbv_delay_ptr[0] &= 0xF8;
1668             s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1669             s->vbv_delay_ptr[1]  = vbv_delay >> 5;
1670             s->vbv_delay_ptr[2] &= 0x07;
1671             s->vbv_delay_ptr[2] |= vbv_delay << 3;
1672             avctx->vbv_delay     = vbv_delay * 300;
1673         }
1674         s->total_bits     += s->frame_bits;
1675         avctx->frame_bits  = s->frame_bits;
1676
1677         pkt->pts = s->current_picture.f.pts;
1678         if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1679             if (!s->current_picture.f.coded_picture_number)
1680                 pkt->dts = pkt->pts - s->dts_delta;
1681             else
1682                 pkt->dts = s->reordered_pts;
1683             s->reordered_pts = pkt->pts;
1684         } else
1685             pkt->dts = pkt->pts;
1686         if (s->current_picture.f.key_frame)
1687             pkt->flags |= AV_PKT_FLAG_KEY;
1688         if (s->mb_info)
1689             av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1690     } else {
1691         s->frame_bits = 0;
1692     }
1693     assert((s->frame_bits & 7) == 0);
1694
1695     pkt->size = s->frame_bits / 8;
1696     *got_packet = !!pkt->size;
1697     return 0;
1698 }
1699
1700 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1701                                                 int n, int threshold)
1702 {
1703     static const char tab[64] = {
1704         3, 2, 2, 1, 1, 1, 1, 1,
1705         1, 1, 1, 1, 1, 1, 1, 1,
1706         1, 1, 1, 1, 1, 1, 1, 1,
1707         0, 0, 0, 0, 0, 0, 0, 0,
1708         0, 0, 0, 0, 0, 0, 0, 0,
1709         0, 0, 0, 0, 0, 0, 0, 0,
1710         0, 0, 0, 0, 0, 0, 0, 0,
1711         0, 0, 0, 0, 0, 0, 0, 0
1712     };
1713     int score = 0;
1714     int run = 0;
1715     int i;
1716     DCTELEM *block = s->block[n];
1717     const int last_index = s->block_last_index[n];
1718     int skip_dc;
1719
1720     if (threshold < 0) {
1721         skip_dc = 0;
1722         threshold = -threshold;
1723     } else
1724         skip_dc = 1;
1725
1726     /* Are all we could set to zero already zero? */
1727     if (last_index <= skip_dc - 1)
1728         return;
1729
1730     for (i = 0; i <= last_index; i++) {
1731         const int j = s->intra_scantable.permutated[i];
1732         const int level = FFABS(block[j]);
1733         if (level == 1) {
1734             if (skip_dc && i == 0)
1735                 continue;
1736             score += tab[run];
1737             run = 0;
1738         } else if (level > 1) {
1739             return;
1740         } else {
1741             run++;
1742         }
1743     }
1744     if (score >= threshold)
1745         return;
1746     for (i = skip_dc; i <= last_index; i++) {
1747         const int j = s->intra_scantable.permutated[i];
1748         block[j] = 0;
1749     }
1750     if (block[0])
1751         s->block_last_index[n] = 0;
1752     else
1753         s->block_last_index[n] = -1;
1754 }
1755
1756 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block,
1757                                int last_index)
1758 {
1759     int i;
1760     const int maxlevel = s->max_qcoeff;
1761     const int minlevel = s->min_qcoeff;
1762     int overflow = 0;
1763
1764     if (s->mb_intra) {
1765         i = 1; // skip clipping of intra dc
1766     } else
1767         i = 0;
1768
1769     for (; i <= last_index; i++) {
1770         const int j = s->intra_scantable.permutated[i];
1771         int level = block[j];
1772
1773         if (level > maxlevel) {
1774             level = maxlevel;
1775             overflow++;
1776         } else if (level < minlevel) {
1777             level = minlevel;
1778             overflow++;
1779         }
1780
1781         block[j] = level;
1782     }
1783
1784     if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1785         av_log(s->avctx, AV_LOG_INFO,
1786                "warning, clipping %d dct coefficients to %d..%d\n",
1787                overflow, minlevel, maxlevel);
1788 }
1789
1790 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1791 {
1792     int x, y;
1793     // FIXME optimize
1794     for (y = 0; y < 8; y++) {
1795         for (x = 0; x < 8; x++) {
1796             int x2, y2;
1797             int sum = 0;
1798             int sqr = 0;
1799             int count = 0;
1800
1801             for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1802                 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1803                     int v = ptr[x2 + y2 * stride];
1804                     sum += v;
1805                     sqr += v * v;
1806                     count++;
1807                 }
1808             }
1809             weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1810         }
1811     }
1812 }
1813
1814 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1815                                                 int motion_x, int motion_y,
1816                                                 int mb_block_height,
1817                                                 int mb_block_width,
1818                                                 int mb_block_count)
1819 {
1820     int16_t weight[12][64];
1821     DCTELEM orig[12][64];
1822     const int mb_x = s->mb_x;
1823     const int mb_y = s->mb_y;
1824     int i;
1825     int skip_dct[12];
1826     int dct_offset = s->linesize * 8; // default for progressive frames
1827     int uv_dct_offset = s->uvlinesize * 8;
1828     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1829     int wrap_y, wrap_c;
1830
1831     for (i = 0; i < mb_block_count; i++)
1832         skip_dct[i] = s->skipdct;
1833
1834     if (s->adaptive_quant) {
1835         const int last_qp = s->qscale;
1836         const int mb_xy = mb_x + mb_y * s->mb_stride;
1837
1838         s->lambda = s->lambda_table[mb_xy];
1839         update_qscale(s);
1840
1841         if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1842             s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
1843             s->dquant = s->qscale - last_qp;
1844
1845             if (s->out_format == FMT_H263) {
1846                 s->dquant = av_clip(s->dquant, -2, 2);
1847
1848                 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1849                     if (!s->mb_intra) {
1850                         if (s->pict_type == AV_PICTURE_TYPE_B) {
1851                             if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1852                                 s->dquant = 0;
1853                         }
1854                         if (s->mv_type == MV_TYPE_8X8)
1855                             s->dquant = 0;
1856                     }
1857                 }
1858             }
1859         }
1860         ff_set_qscale(s, last_qp + s->dquant);
1861     } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1862         ff_set_qscale(s, s->qscale + s->dquant);
1863
1864     wrap_y = s->linesize;
1865     wrap_c = s->uvlinesize;
1866     ptr_y  = s->new_picture.f.data[0] +
1867              (mb_y * 16 * wrap_y)              + mb_x * 16;
1868     ptr_cb = s->new_picture.f.data[1] +
1869              (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
1870     ptr_cr = s->new_picture.f.data[2] +
1871              (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
1872
1873     if((mb_x*16+16 > s->width || mb_y*16+16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
1874         uint8_t *ebuf = s->edge_emu_buffer + 32;
1875         s->dsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
1876                                 mb_y * 16, s->width, s->height);
1877         ptr_y = ebuf;
1878         s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, mb_block_width,
1879                                 mb_block_height, mb_x * 8, mb_y * 8,
1880                                 (s->width+1) >> 1, (s->height+1) >> 1);
1881         ptr_cb = ebuf + 18 * wrap_y;
1882         s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, mb_block_width,
1883                                 mb_block_height, mb_x * 8, mb_y * 8,
1884                                 (s->width+1) >> 1, (s->height+1) >> 1);
1885         ptr_cr = ebuf + 18 * wrap_y + 8;
1886     }
1887
1888     if (s->mb_intra) {
1889         if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1890             int progressive_score, interlaced_score;
1891
1892             s->interlaced_dct = 0;
1893             progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1894                                                     NULL, wrap_y, 8) +
1895                                 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1896                                                     NULL, wrap_y, 8) - 400;
1897
1898             if (progressive_score > 0) {
1899                 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1900                                                        NULL, wrap_y * 2, 8) +
1901                                    s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1902                                                        NULL, wrap_y * 2, 8);
1903                 if (progressive_score > interlaced_score) {
1904                     s->interlaced_dct = 1;
1905
1906                     dct_offset = wrap_y;
1907                     uv_dct_offset = wrap_c;
1908                     wrap_y <<= 1;
1909                     if (s->chroma_format == CHROMA_422 ||
1910                         s->chroma_format == CHROMA_444)
1911                         wrap_c <<= 1;
1912                 }
1913             }
1914         }
1915
1916         s->dsp.get_pixels(s->block[0], ptr_y                  , wrap_y);
1917         s->dsp.get_pixels(s->block[1], ptr_y              + 8 , wrap_y);
1918         s->dsp.get_pixels(s->block[2], ptr_y + dct_offset     , wrap_y);
1919         s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1920
1921         if (s->flags & CODEC_FLAG_GRAY) {
1922             skip_dct[4] = 1;
1923             skip_dct[5] = 1;
1924         } else {
1925             s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1926             s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1927             if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
1928                 s->dsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
1929                 s->dsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
1930             } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
1931                 s->dsp.get_pixels(s->block[6], ptr_cb + 8, wrap_c);
1932                 s->dsp.get_pixels(s->block[7], ptr_cr + 8, wrap_c);
1933                 s->dsp.get_pixels(s->block[8], ptr_cb + uv_dct_offset, wrap_c);
1934                 s->dsp.get_pixels(s->block[9], ptr_cr + uv_dct_offset, wrap_c);
1935                 s->dsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
1936                 s->dsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
1937             }
1938         }
1939     } else {
1940         op_pixels_func (*op_pix)[4];
1941         qpel_mc_func (*op_qpix)[16];
1942         uint8_t *dest_y, *dest_cb, *dest_cr;
1943
1944         dest_y  = s->dest[0];
1945         dest_cb = s->dest[1];
1946         dest_cr = s->dest[2];
1947
1948         if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1949             op_pix  = s->dsp.put_pixels_tab;
1950             op_qpix = s->dsp.put_qpel_pixels_tab;
1951         } else {
1952             op_pix  = s->dsp.put_no_rnd_pixels_tab;
1953             op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1954         }
1955
1956         if (s->mv_dir & MV_DIR_FORWARD) {
1957             ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1958                           s->last_picture.f.data,
1959                           op_pix, op_qpix);
1960             op_pix  = s->dsp.avg_pixels_tab;
1961             op_qpix = s->dsp.avg_qpel_pixels_tab;
1962         }
1963         if (s->mv_dir & MV_DIR_BACKWARD) {
1964             ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1965                           s->next_picture.f.data,
1966                           op_pix, op_qpix);
1967         }
1968
1969         if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1970             int progressive_score, interlaced_score;
1971
1972             s->interlaced_dct = 0;
1973             progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1974                                                     ptr_y,              wrap_y,
1975                                                     8) +
1976                                 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1977                                                     ptr_y + wrap_y * 8, wrap_y,
1978                                                     8) - 400;
1979
1980             if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1981                 progressive_score -= 400;
1982
1983             if (progressive_score > 0) {
1984                 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1985                                                        ptr_y,
1986                                                        wrap_y * 2, 8) +
1987                                    s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1988                                                        ptr_y + wrap_y,
1989                                                        wrap_y * 2, 8);
1990
1991                 if (progressive_score > interlaced_score) {
1992                     s->interlaced_dct = 1;
1993
1994                     dct_offset = wrap_y;
1995                     uv_dct_offset = wrap_c;
1996                     wrap_y <<= 1;
1997                     if (s->chroma_format == CHROMA_422)
1998                         wrap_c <<= 1;
1999                 }
2000             }
2001         }
2002
2003         s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2004         s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2005         s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2006                            dest_y + dct_offset, wrap_y);
2007         s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2008                            dest_y + dct_offset + 8, wrap_y);
2009
2010         if (s->flags & CODEC_FLAG_GRAY) {
2011             skip_dct[4] = 1;
2012             skip_dct[5] = 1;
2013         } else {
2014             s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2015             s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2016             if (!s->chroma_y_shift) { /* 422 */
2017                 s->dsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2018                                    dest_cb + uv_dct_offset, wrap_c);
2019                 s->dsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2020                                    dest_cr + uv_dct_offset, wrap_c);
2021             }
2022         }
2023         /* pre quantization */
2024         if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2025                 2 * s->qscale * s->qscale) {
2026             // FIXME optimize
2027             if (s->dsp.sad[1](NULL, ptr_y , dest_y,
2028                               wrap_y, 8) < 20 * s->qscale)
2029                 skip_dct[0] = 1;
2030             if (s->dsp.sad[1](NULL, ptr_y + 8,
2031                               dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2032                 skip_dct[1] = 1;
2033             if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
2034                               dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
2035                 skip_dct[2] = 1;
2036             if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
2037                               dest_y + dct_offset + 8,
2038                               wrap_y, 8) < 20 * s->qscale)
2039                 skip_dct[3] = 1;
2040             if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
2041                               wrap_c, 8) < 20 * s->qscale)
2042                 skip_dct[4] = 1;
2043             if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
2044                               wrap_c, 8) < 20 * s->qscale)
2045                 skip_dct[5] = 1;
2046             if (!s->chroma_y_shift) { /* 422 */
2047                 if (s->dsp.sad[1](NULL, ptr_cb + uv_dct_offset,
2048                                   dest_cb + uv_dct_offset,
2049                                   wrap_c, 8) < 20 * s->qscale)
2050                     skip_dct[6] = 1;
2051                 if (s->dsp.sad[1](NULL, ptr_cr + uv_dct_offset,
2052                                   dest_cr + uv_dct_offset,
2053                                   wrap_c, 8) < 20 * s->qscale)
2054                     skip_dct[7] = 1;
2055             }
2056         }
2057     }
2058
2059     if (s->quantizer_noise_shaping) {
2060         if (!skip_dct[0])
2061             get_visual_weight(weight[0], ptr_y                 , wrap_y);
2062         if (!skip_dct[1])
2063             get_visual_weight(weight[1], ptr_y              + 8, wrap_y);
2064         if (!skip_dct[2])
2065             get_visual_weight(weight[2], ptr_y + dct_offset    , wrap_y);
2066         if (!skip_dct[3])
2067             get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2068         if (!skip_dct[4])
2069             get_visual_weight(weight[4], ptr_cb                , wrap_c);
2070         if (!skip_dct[5])
2071             get_visual_weight(weight[5], ptr_cr                , wrap_c);
2072         if (!s->chroma_y_shift) { /* 422 */
2073             if (!skip_dct[6])
2074                 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2075                                   wrap_c);
2076             if (!skip_dct[7])
2077                 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2078                                   wrap_c);
2079         }
2080         memcpy(orig[0], s->block[0], sizeof(DCTELEM) * 64 * mb_block_count);
2081     }
2082
2083     /* DCT & quantize */
2084     av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2085     {
2086         for (i = 0; i < mb_block_count; i++) {
2087             if (!skip_dct[i]) {
2088                 int overflow;
2089                 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2090                 // FIXME we could decide to change to quantizer instead of
2091                 // clipping
2092                 // JS: I don't think that would be a good idea it could lower
2093                 //     quality instead of improve it. Just INTRADC clipping
2094                 //     deserves changes in quantizer
2095                 if (overflow)
2096                     clip_coeffs(s, s->block[i], s->block_last_index[i]);
2097             } else
2098                 s->block_last_index[i] = -1;
2099         }
2100         if (s->quantizer_noise_shaping) {
2101             for (i = 0; i < mb_block_count; i++) {
2102                 if (!skip_dct[i]) {
2103                     s->block_last_index[i] =
2104                         dct_quantize_refine(s, s->block[i], weight[i],
2105                                             orig[i], i, s->qscale);
2106                 }
2107             }
2108         }
2109
2110         if (s->luma_elim_threshold && !s->mb_intra)
2111             for (i = 0; i < 4; i++)
2112                 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2113         if (s->chroma_elim_threshold && !s->mb_intra)
2114             for (i = 4; i < mb_block_count; i++)
2115                 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2116
2117         if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2118             for (i = 0; i < mb_block_count; i++) {
2119                 if (s->block_last_index[i] == -1)
2120                     s->coded_score[i] = INT_MAX / 256;
2121             }
2122         }
2123     }
2124
2125     if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
2126         s->block_last_index[4] =
2127         s->block_last_index[5] = 0;
2128         s->block[4][0] =
2129         s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2130     }
2131
2132     // non c quantize code returns incorrect block_last_index FIXME
2133     if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2134         for (i = 0; i < mb_block_count; i++) {
2135             int j;
2136             if (s->block_last_index[i] > 0) {
2137                 for (j = 63; j > 0; j--) {
2138                     if (s->block[i][s->intra_scantable.permutated[j]])
2139                         break;
2140                 }
2141                 s->block_last_index[i] = j;
2142             }
2143         }
2144     }
2145
2146     /* huffman encode */
2147     switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2148     case AV_CODEC_ID_MPEG1VIDEO:
2149     case AV_CODEC_ID_MPEG2VIDEO:
2150         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2151             ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2152         break;
2153     case AV_CODEC_ID_MPEG4:
2154         if (CONFIG_MPEG4_ENCODER)
2155             ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2156         break;
2157     case AV_CODEC_ID_MSMPEG4V2:
2158     case AV_CODEC_ID_MSMPEG4V3:
2159     case AV_CODEC_ID_WMV1:
2160         if (CONFIG_MSMPEG4_ENCODER)
2161             ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2162         break;
2163     case AV_CODEC_ID_WMV2:
2164         if (CONFIG_WMV2_ENCODER)
2165             ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2166         break;
2167     case AV_CODEC_ID_H261:
2168         if (CONFIG_H261_ENCODER)
2169             ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2170         break;
2171     case AV_CODEC_ID_H263:
2172     case AV_CODEC_ID_H263P:
2173     case AV_CODEC_ID_FLV1:
2174     case AV_CODEC_ID_RV10:
2175     case AV_CODEC_ID_RV20:
2176         if (CONFIG_H263_ENCODER)
2177             ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2178         break;
2179     case AV_CODEC_ID_MJPEG:
2180     case AV_CODEC_ID_AMV:
2181         if (CONFIG_MJPEG_ENCODER)
2182             ff_mjpeg_encode_mb(s, s->block);
2183         break;
2184     default:
2185         av_assert1(0);
2186     }
2187 }
2188
2189 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2190 {
2191     if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y,  8, 8, 6);
2192     else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2193     else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2194 }
2195
2196 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2197     int i;
2198
2199     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2200
2201     /* mpeg1 */
2202     d->mb_skip_run= s->mb_skip_run;
2203     for(i=0; i<3; i++)
2204         d->last_dc[i] = s->last_dc[i];
2205
2206     /* statistics */
2207     d->mv_bits= s->mv_bits;
2208     d->i_tex_bits= s->i_tex_bits;
2209     d->p_tex_bits= s->p_tex_bits;
2210     d->i_count= s->i_count;
2211     d->f_count= s->f_count;
2212     d->b_count= s->b_count;
2213     d->skip_count= s->skip_count;
2214     d->misc_bits= s->misc_bits;
2215     d->last_bits= 0;
2216
2217     d->mb_skipped= 0;
2218     d->qscale= s->qscale;
2219     d->dquant= s->dquant;
2220
2221     d->esc3_level_length= s->esc3_level_length;
2222 }
2223
2224 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2225     int i;
2226
2227     memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2228     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2229
2230     /* mpeg1 */
2231     d->mb_skip_run= s->mb_skip_run;
2232     for(i=0; i<3; i++)
2233         d->last_dc[i] = s->last_dc[i];
2234
2235     /* statistics */
2236     d->mv_bits= s->mv_bits;
2237     d->i_tex_bits= s->i_tex_bits;
2238     d->p_tex_bits= s->p_tex_bits;
2239     d->i_count= s->i_count;
2240     d->f_count= s->f_count;
2241     d->b_count= s->b_count;
2242     d->skip_count= s->skip_count;
2243     d->misc_bits= s->misc_bits;
2244
2245     d->mb_intra= s->mb_intra;
2246     d->mb_skipped= s->mb_skipped;
2247     d->mv_type= s->mv_type;
2248     d->mv_dir= s->mv_dir;
2249     d->pb= s->pb;
2250     if(s->data_partitioning){
2251         d->pb2= s->pb2;
2252         d->tex_pb= s->tex_pb;
2253     }
2254     d->block= s->block;
2255     for(i=0; i<8; i++)
2256         d->block_last_index[i]= s->block_last_index[i];
2257     d->interlaced_dct= s->interlaced_dct;
2258     d->qscale= s->qscale;
2259
2260     d->esc3_level_length= s->esc3_level_length;
2261 }
2262
2263 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2264                            PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2265                            int *dmin, int *next_block, int motion_x, int motion_y)
2266 {
2267     int score;
2268     uint8_t *dest_backup[3];
2269
2270     copy_context_before_encode(s, backup, type);
2271
2272     s->block= s->blocks[*next_block];
2273     s->pb= pb[*next_block];
2274     if(s->data_partitioning){
2275         s->pb2   = pb2   [*next_block];
2276         s->tex_pb= tex_pb[*next_block];
2277     }
2278
2279     if(*next_block){
2280         memcpy(dest_backup, s->dest, sizeof(s->dest));
2281         s->dest[0] = s->rd_scratchpad;
2282         s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2283         s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2284         assert(s->linesize >= 32); //FIXME
2285     }
2286
2287     encode_mb(s, motion_x, motion_y);
2288
2289     score= put_bits_count(&s->pb);
2290     if(s->data_partitioning){
2291         score+= put_bits_count(&s->pb2);
2292         score+= put_bits_count(&s->tex_pb);
2293     }
2294
2295     if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2296         ff_MPV_decode_mb(s, s->block);
2297
2298         score *= s->lambda2;
2299         score += sse_mb(s) << FF_LAMBDA_SHIFT;
2300     }
2301
2302     if(*next_block){
2303         memcpy(s->dest, dest_backup, sizeof(s->dest));
2304     }
2305
2306     if(score<*dmin){
2307         *dmin= score;
2308         *next_block^=1;
2309
2310         copy_context_after_encode(best, s, type);
2311     }
2312 }
2313
2314 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2315     uint32_t *sq = ff_squareTbl + 256;
2316     int acc=0;
2317     int x,y;
2318
2319     if(w==16 && h==16)
2320         return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2321     else if(w==8 && h==8)
2322         return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2323
2324     for(y=0; y<h; y++){
2325         for(x=0; x<w; x++){
2326             acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2327         }
2328     }
2329
2330     av_assert2(acc>=0);
2331
2332     return acc;
2333 }
2334
2335 static int sse_mb(MpegEncContext *s){
2336     int w= 16;
2337     int h= 16;
2338
2339     if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2340     if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2341
2342     if(w==16 && h==16)
2343       if(s->avctx->mb_cmp == FF_CMP_NSSE){
2344         return  s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2345                +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2346                +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2347       }else{
2348         return  s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2349                +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2350                +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2351       }
2352     else
2353         return  sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2354                +sse(s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2355                +sse(s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2356 }
2357
2358 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2359     MpegEncContext *s= *(void**)arg;
2360
2361
2362     s->me.pre_pass=1;
2363     s->me.dia_size= s->avctx->pre_dia_size;
2364     s->first_slice_line=1;
2365     for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2366         for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2367             ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2368         }
2369         s->first_slice_line=0;
2370     }
2371
2372     s->me.pre_pass=0;
2373
2374     return 0;
2375 }
2376
2377 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2378     MpegEncContext *s= *(void**)arg;
2379
2380     ff_check_alignment();
2381
2382     s->me.dia_size= s->avctx->dia_size;
2383     s->first_slice_line=1;
2384     for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2385         s->mb_x=0; //for block init below
2386         ff_init_block_index(s);
2387         for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2388             s->block_index[0]+=2;
2389             s->block_index[1]+=2;
2390             s->block_index[2]+=2;
2391             s->block_index[3]+=2;
2392
2393             /* compute motion vector & mb_type and store in context */
2394             if(s->pict_type==AV_PICTURE_TYPE_B)
2395                 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2396             else
2397                 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2398         }
2399         s->first_slice_line=0;
2400     }
2401     return 0;
2402 }
2403
2404 static int mb_var_thread(AVCodecContext *c, void *arg){
2405     MpegEncContext *s= *(void**)arg;
2406     int mb_x, mb_y;
2407
2408     ff_check_alignment();
2409
2410     for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2411         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2412             int xx = mb_x * 16;
2413             int yy = mb_y * 16;
2414             uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2415             int varc;
2416             int sum = s->dsp.pix_sum(pix, s->linesize);
2417
2418             varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2419
2420             s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2421             s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2422             s->me.mb_var_sum_temp    += varc;
2423         }
2424     }
2425     return 0;
2426 }
2427
2428 static void write_slice_end(MpegEncContext *s){
2429     if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2430         if(s->partitioned_frame){
2431             ff_mpeg4_merge_partitions(s);
2432         }
2433
2434         ff_mpeg4_stuffing(&s->pb);
2435     }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2436         ff_mjpeg_encode_stuffing(s);
2437     }
2438
2439     avpriv_align_put_bits(&s->pb);
2440     flush_put_bits(&s->pb);
2441
2442     if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2443         s->misc_bits+= get_bits_diff(s);
2444 }
2445
2446 static void write_mb_info(MpegEncContext *s)
2447 {
2448     uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2449     int offset = put_bits_count(&s->pb);
2450     int mba  = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2451     int gobn = s->mb_y / s->gob_index;
2452     int pred_x, pred_y;
2453     if (CONFIG_H263_ENCODER)
2454         ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2455     bytestream_put_le32(&ptr, offset);
2456     bytestream_put_byte(&ptr, s->qscale);
2457     bytestream_put_byte(&ptr, gobn);
2458     bytestream_put_le16(&ptr, mba);
2459     bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2460     bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2461     /* 4MV not implemented */
2462     bytestream_put_byte(&ptr, 0); /* hmv2 */
2463     bytestream_put_byte(&ptr, 0); /* vmv2 */
2464 }
2465
2466 static void update_mb_info(MpegEncContext *s, int startcode)
2467 {
2468     if (!s->mb_info)
2469         return;
2470     if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2471         s->mb_info_size += 12;
2472         s->prev_mb_info = s->last_mb_info;
2473     }
2474     if (startcode) {
2475         s->prev_mb_info = put_bits_count(&s->pb)/8;
2476         /* This might have incremented mb_info_size above, and we return without
2477          * actually writing any info into that slot yet. But in that case,
2478          * this will be called again at the start of the after writing the
2479          * start code, actually writing the mb info. */
2480         return;
2481     }
2482
2483     s->last_mb_info = put_bits_count(&s->pb)/8;
2484     if (!s->mb_info_size)
2485         s->mb_info_size += 12;
2486     write_mb_info(s);
2487 }
2488
2489 static int encode_thread(AVCodecContext *c, void *arg){
2490     MpegEncContext *s= *(void**)arg;
2491     int mb_x, mb_y, pdif = 0;
2492     int chr_h= 16>>s->chroma_y_shift;
2493     int i, j;
2494     MpegEncContext best_s, backup_s;
2495     uint8_t bit_buf[2][MAX_MB_BYTES];
2496     uint8_t bit_buf2[2][MAX_MB_BYTES];
2497     uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2498     PutBitContext pb[2], pb2[2], tex_pb[2];
2499
2500     ff_check_alignment();
2501
2502     for(i=0; i<2; i++){
2503         init_put_bits(&pb    [i], bit_buf    [i], MAX_MB_BYTES);
2504         init_put_bits(&pb2   [i], bit_buf2   [i], MAX_MB_BYTES);
2505         init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2506     }
2507
2508     s->last_bits= put_bits_count(&s->pb);
2509     s->mv_bits=0;
2510     s->misc_bits=0;
2511     s->i_tex_bits=0;
2512     s->p_tex_bits=0;
2513     s->i_count=0;
2514     s->f_count=0;
2515     s->b_count=0;
2516     s->skip_count=0;
2517
2518     for(i=0; i<3; i++){
2519         /* init last dc values */
2520         /* note: quant matrix value (8) is implied here */
2521         s->last_dc[i] = 128 << s->intra_dc_precision;
2522
2523         s->current_picture.f.error[i] = 0;
2524     }
2525     if(s->codec_id==AV_CODEC_ID_AMV){
2526         s->last_dc[0] = 128*8/13;
2527         s->last_dc[1] = 128*8/14;
2528         s->last_dc[2] = 128*8/14;
2529     }
2530     s->mb_skip_run = 0;
2531     memset(s->last_mv, 0, sizeof(s->last_mv));
2532
2533     s->last_mv_dir = 0;
2534
2535     switch(s->codec_id){
2536     case AV_CODEC_ID_H263:
2537     case AV_CODEC_ID_H263P:
2538     case AV_CODEC_ID_FLV1:
2539         if (CONFIG_H263_ENCODER)
2540             s->gob_index = ff_h263_get_gob_height(s);
2541         break;
2542     case AV_CODEC_ID_MPEG4:
2543         if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2544             ff_mpeg4_init_partitions(s);
2545         break;
2546     }
2547
2548     s->resync_mb_x=0;
2549     s->resync_mb_y=0;
2550     s->first_slice_line = 1;
2551     s->ptr_lastgob = s->pb.buf;
2552     for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2553         s->mb_x=0;
2554         s->mb_y= mb_y;
2555
2556         ff_set_qscale(s, s->qscale);
2557         ff_init_block_index(s);
2558
2559         for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2560             int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2561             int mb_type= s->mb_type[xy];
2562 //            int d;
2563             int dmin= INT_MAX;
2564             int dir;
2565
2566             if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2567                 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2568                 return -1;
2569             }
2570             if(s->data_partitioning){
2571                 if(   s->pb2   .buf_end - s->pb2   .buf - (put_bits_count(&s->    pb2)>>3) < MAX_MB_BYTES
2572                    || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2573                     av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2574                     return -1;
2575                 }
2576             }
2577
2578             s->mb_x = mb_x;
2579             s->mb_y = mb_y;  // moved into loop, can get changed by H.261
2580             ff_update_block_index(s);
2581
2582             if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2583                 ff_h261_reorder_mb_index(s);
2584                 xy= s->mb_y*s->mb_stride + s->mb_x;
2585                 mb_type= s->mb_type[xy];
2586             }
2587
2588             /* write gob / video packet header  */
2589             if(s->rtp_mode){
2590                 int current_packet_size, is_gob_start;
2591
2592                 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2593
2594                 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2595
2596                 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2597
2598                 switch(s->codec_id){
2599                 case AV_CODEC_ID_H263:
2600                 case AV_CODEC_ID_H263P:
2601                     if(!s->h263_slice_structured)
2602                         if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2603                     break;
2604                 case AV_CODEC_ID_MPEG2VIDEO:
2605                     if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2606                 case AV_CODEC_ID_MPEG1VIDEO:
2607                     if(s->mb_skip_run) is_gob_start=0;
2608                     break;
2609                 case AV_CODEC_ID_MJPEG:
2610                     if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2611                     break;
2612                 }
2613
2614                 if(is_gob_start){
2615                     if(s->start_mb_y != mb_y || mb_x!=0){
2616                         write_slice_end(s);
2617                         if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2618                             ff_mpeg4_init_partitions(s);
2619                         }
2620                     }
2621
2622                     av_assert2((put_bits_count(&s->pb)&7) == 0);
2623                     current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2624
2625                     if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
2626                         int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2627                         int d= 100 / s->avctx->error_rate;
2628                         if(r % d == 0){
2629                             current_packet_size=0;
2630                             s->pb.buf_ptr= s->ptr_lastgob;
2631                             assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2632                         }
2633                     }
2634
2635                     if (s->avctx->rtp_callback){
2636                         int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2637                         s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2638                     }
2639                     update_mb_info(s, 1);
2640
2641                     switch(s->codec_id){
2642                     case AV_CODEC_ID_MPEG4:
2643                         if (CONFIG_MPEG4_ENCODER) {
2644                             ff_mpeg4_encode_video_packet_header(s);
2645                             ff_mpeg4_clean_buffers(s);
2646                         }
2647                     break;
2648                     case AV_CODEC_ID_MPEG1VIDEO:
2649                     case AV_CODEC_ID_MPEG2VIDEO:
2650                         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2651                             ff_mpeg1_encode_slice_header(s);
2652                             ff_mpeg1_clean_buffers(s);
2653                         }
2654                     break;
2655                     case AV_CODEC_ID_H263:
2656                     case AV_CODEC_ID_H263P:
2657                         if (CONFIG_H263_ENCODER)
2658                             ff_h263_encode_gob_header(s, mb_y);
2659                     break;
2660                     }
2661
2662                     if(s->flags&CODEC_FLAG_PASS1){
2663                         int bits= put_bits_count(&s->pb);
2664                         s->misc_bits+= bits - s->last_bits;
2665                         s->last_bits= bits;
2666                     }
2667
2668                     s->ptr_lastgob += current_packet_size;
2669                     s->first_slice_line=1;
2670                     s->resync_mb_x=mb_x;
2671                     s->resync_mb_y=mb_y;
2672                 }
2673             }
2674
2675             if(  (s->resync_mb_x   == s->mb_x)
2676                && s->resync_mb_y+1 == s->mb_y){
2677                 s->first_slice_line=0;
2678             }
2679
2680             s->mb_skipped=0;
2681             s->dquant=0; //only for QP_RD
2682
2683             update_mb_info(s, 0);
2684
2685             if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2686                 int next_block=0;
2687                 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2688
2689                 copy_context_before_encode(&backup_s, s, -1);
2690                 backup_s.pb= s->pb;
2691                 best_s.data_partitioning= s->data_partitioning;
2692                 best_s.partitioned_frame= s->partitioned_frame;
2693                 if(s->data_partitioning){
2694                     backup_s.pb2= s->pb2;
2695                     backup_s.tex_pb= s->tex_pb;
2696                 }
2697
2698                 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2699                     s->mv_dir = MV_DIR_FORWARD;
2700                     s->mv_type = MV_TYPE_16X16;
2701                     s->mb_intra= 0;
2702                     s->mv[0][0][0] = s->p_mv_table[xy][0];
2703                     s->mv[0][0][1] = s->p_mv_table[xy][1];
2704                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2705                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2706                 }
2707                 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2708                     s->mv_dir = MV_DIR_FORWARD;
2709                     s->mv_type = MV_TYPE_FIELD;
2710                     s->mb_intra= 0;
2711                     for(i=0; i<2; i++){
2712                         j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2713                         s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2714                         s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2715                     }
2716                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2717                                  &dmin, &next_block, 0, 0);
2718                 }
2719                 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2720                     s->mv_dir = MV_DIR_FORWARD;
2721                     s->mv_type = MV_TYPE_16X16;
2722                     s->mb_intra= 0;
2723                     s->mv[0][0][0] = 0;
2724                     s->mv[0][0][1] = 0;
2725                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2726                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2727                 }
2728                 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2729                     s->mv_dir = MV_DIR_FORWARD;
2730                     s->mv_type = MV_TYPE_8X8;
2731                     s->mb_intra= 0;
2732                     for(i=0; i<4; i++){
2733                         s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
2734                         s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
2735                     }
2736                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2737                                  &dmin, &next_block, 0, 0);
2738                 }
2739                 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2740                     s->mv_dir = MV_DIR_FORWARD;
2741                     s->mv_type = MV_TYPE_16X16;
2742                     s->mb_intra= 0;
2743                     s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2744                     s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2745                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2746                                  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2747                 }
2748                 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2749                     s->mv_dir = MV_DIR_BACKWARD;
2750                     s->mv_type = MV_TYPE_16X16;
2751                     s->mb_intra= 0;
2752                     s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2753                     s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2754                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2755                                  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2756                 }
2757                 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2758                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2759                     s->mv_type = MV_TYPE_16X16;
2760                     s->mb_intra= 0;
2761                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2762                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2763                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2764                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2765                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2766                                  &dmin, &next_block, 0, 0);
2767                 }
2768                 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2769                     s->mv_dir = MV_DIR_FORWARD;
2770                     s->mv_type = MV_TYPE_FIELD;
2771                     s->mb_intra= 0;
2772                     for(i=0; i<2; i++){
2773                         j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2774                         s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2775                         s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2776                     }
2777                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2778                                  &dmin, &next_block, 0, 0);
2779                 }
2780                 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2781                     s->mv_dir = MV_DIR_BACKWARD;
2782                     s->mv_type = MV_TYPE_FIELD;
2783                     s->mb_intra= 0;
2784                     for(i=0; i<2; i++){
2785                         j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2786                         s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2787                         s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2788                     }
2789                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2790                                  &dmin, &next_block, 0, 0);
2791                 }
2792                 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2793                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2794                     s->mv_type = MV_TYPE_FIELD;
2795                     s->mb_intra= 0;
2796                     for(dir=0; dir<2; dir++){
2797                         for(i=0; i<2; i++){
2798                             j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2799                             s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2800                             s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2801                         }
2802                     }
2803                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2804                                  &dmin, &next_block, 0, 0);
2805                 }
2806                 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2807                     s->mv_dir = 0;
2808                     s->mv_type = MV_TYPE_16X16;
2809                     s->mb_intra= 1;
2810                     s->mv[0][0][0] = 0;
2811                     s->mv[0][0][1] = 0;
2812                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2813                                  &dmin, &next_block, 0, 0);
2814                     if(s->h263_pred || s->h263_aic){
2815                         if(best_s.mb_intra)
2816                             s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2817                         else
2818                             ff_clean_intra_table_entries(s); //old mode?
2819                     }
2820                 }
2821
2822                 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2823                     if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2824                         const int last_qp= backup_s.qscale;
2825                         int qpi, qp, dc[6];
2826                         DCTELEM ac[6][16];
2827                         const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2828                         static const int dquant_tab[4]={-1,1,-2,2};
2829
2830                         av_assert2(backup_s.dquant == 0);
2831
2832                         //FIXME intra
2833                         s->mv_dir= best_s.mv_dir;
2834                         s->mv_type = MV_TYPE_16X16;
2835                         s->mb_intra= best_s.mb_intra;
2836                         s->mv[0][0][0] = best_s.mv[0][0][0];
2837                         s->mv[0][0][1] = best_s.mv[0][0][1];
2838                         s->mv[1][0][0] = best_s.mv[1][0][0];
2839                         s->mv[1][0][1] = best_s.mv[1][0][1];
2840
2841                         qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2842                         for(; qpi<4; qpi++){
2843                             int dquant= dquant_tab[qpi];
2844                             qp= last_qp + dquant;
2845                             if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2846                                 continue;
2847                             backup_s.dquant= dquant;
2848                             if(s->mb_intra && s->dc_val[0]){
2849                                 for(i=0; i<6; i++){
2850                                     dc[i]= s->dc_val[0][ s->block_index[i] ];
2851                                     memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16);
2852                                 }
2853                             }
2854
2855                             encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2856                                          &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2857                             if(best_s.qscale != qp){
2858                                 if(s->mb_intra && s->dc_val[0]){
2859                                     for(i=0; i<6; i++){
2860                                         s->dc_val[0][ s->block_index[i] ]= dc[i];
2861                                         memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16);
2862                                     }
2863                                 }
2864                             }
2865                         }
2866                     }
2867                 }
2868                 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2869                     int mx= s->b_direct_mv_table[xy][0];
2870                     int my= s->b_direct_mv_table[xy][1];
2871
2872                     backup_s.dquant = 0;
2873                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2874                     s->mb_intra= 0;
2875                     ff_mpeg4_set_direct_mv(s, mx, my);
2876                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2877                                  &dmin, &next_block, mx, my);
2878                 }
2879                 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2880                     backup_s.dquant = 0;
2881                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2882                     s->mb_intra= 0;
2883                     ff_mpeg4_set_direct_mv(s, 0, 0);
2884                     encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2885                                  &dmin, &next_block, 0, 0);
2886                 }
2887                 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2888                     int coded=0;
2889                     for(i=0; i<6; i++)
2890                         coded |= s->block_last_index[i];
2891                     if(coded){
2892                         int mx,my;
2893                         memcpy(s->mv, best_s.mv, sizeof(s->mv));
2894                         if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2895                             mx=my=0; //FIXME find the one we actually used
2896                             ff_mpeg4_set_direct_mv(s, mx, my);
2897                         }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2898                             mx= s->mv[1][0][0];
2899                             my= s->mv[1][0][1];
2900                         }else{
2901                             mx= s->mv[0][0][0];
2902                             my= s->mv[0][0][1];
2903                         }
2904
2905                         s->mv_dir= best_s.mv_dir;
2906                         s->mv_type = best_s.mv_type;
2907                         s->mb_intra= 0;
2908 /*                        s->mv[0][0][0] = best_s.mv[0][0][0];
2909                         s->mv[0][0][1] = best_s.mv[0][0][1];
2910                         s->mv[1][0][0] = best_s.mv[1][0][0];
2911                         s->mv[1][0][1] = best_s.mv[1][0][1];*/
2912                         backup_s.dquant= 0;
2913                         s->skipdct=1;
2914                         encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2915                                         &dmin, &next_block, mx, my);
2916                         s->skipdct=0;
2917                     }
2918                 }
2919
2920                 s->current_picture.f.qscale_table[xy] = best_s.qscale;
2921
2922                 copy_context_after_encode(s, &best_s, -1);
2923
2924                 pb_bits_count= put_bits_count(&s->pb);
2925                 flush_put_bits(&s->pb);
2926                 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2927                 s->pb= backup_s.pb;
2928
2929                 if(s->data_partitioning){
2930                     pb2_bits_count= put_bits_count(&s->pb2);
2931                     flush_put_bits(&s->pb2);
2932                     avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2933                     s->pb2= backup_s.pb2;
2934
2935                     tex_pb_bits_count= put_bits_count(&s->tex_pb);
2936                     flush_put_bits(&s->tex_pb);
2937                     avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2938                     s->tex_pb= backup_s.tex_pb;
2939                 }
2940                 s->last_bits= put_bits_count(&s->pb);
2941
2942                 if (CONFIG_H263_ENCODER &&
2943                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2944                     ff_h263_update_motion_val(s);
2945
2946                 if(next_block==0){ //FIXME 16 vs linesize16
2947                     s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad                     , s->linesize  ,16);
2948                     s->dsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize    , s->uvlinesize, 8);
2949                     s->dsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2950                 }
2951
2952                 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2953                     ff_MPV_decode_mb(s, s->block);
2954             } else {
2955                 int motion_x = 0, motion_y = 0;
2956                 s->mv_type=MV_TYPE_16X16;
2957                 // only one MB-Type possible
2958
2959                 switch(mb_type){
2960                 case CANDIDATE_MB_TYPE_INTRA:
2961                     s->mv_dir = 0;
2962                     s->mb_intra= 1;
2963                     motion_x= s->mv[0][0][0] = 0;
2964                     motion_y= s->mv[0][0][1] = 0;
2965                     break;
2966                 case CANDIDATE_MB_TYPE_INTER:
2967                     s->mv_dir = MV_DIR_FORWARD;
2968                     s->mb_intra= 0;
2969                     motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2970                     motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2971                     break;
2972                 case CANDIDATE_MB_TYPE_INTER_I:
2973                     s->mv_dir = MV_DIR_FORWARD;
2974                     s->mv_type = MV_TYPE_FIELD;
2975                     s->mb_intra= 0;
2976                     for(i=0; i<2; i++){
2977                         j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2978                         s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2979                         s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2980                     }
2981                     break;
2982                 case CANDIDATE_MB_TYPE_INTER4V:
2983                     s->mv_dir = MV_DIR_FORWARD;
2984                     s->mv_type = MV_TYPE_8X8;
2985                     s->mb_intra= 0;
2986                     for(i=0; i<4; i++){
2987                         s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
2988                         s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
2989                     }
2990                     break;
2991                 case CANDIDATE_MB_TYPE_DIRECT:
2992                     if (CONFIG_MPEG4_ENCODER) {
2993                         s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2994                         s->mb_intra= 0;
2995                         motion_x=s->b_direct_mv_table[xy][0];
2996                         motion_y=s->b_direct_mv_table[xy][1];
2997                         ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2998                     }
2999                     break;
3000                 case CANDIDATE_MB_TYPE_DIRECT0:
3001                     if (CONFIG_MPEG4_ENCODER) {
3002                         s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
3003                         s->mb_intra= 0;
3004                         ff_mpeg4_set_direct_mv(s, 0, 0);
3005                     }
3006                     break;
3007                 case CANDIDATE_MB_TYPE_BIDIR:
3008                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3009                     s->mb_intra= 0;
3010                     s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3011                     s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3012                     s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3013                     s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3014                     break;
3015                 case CANDIDATE_MB_TYPE_BACKWARD:
3016                     s->mv_dir = MV_DIR_BACKWARD;
3017                     s->mb_intra= 0;
3018                     motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3019                     motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3020                     break;
3021                 case CANDIDATE_MB_TYPE_FORWARD:
3022                     s->mv_dir = MV_DIR_FORWARD;
3023                     s->mb_intra= 0;
3024                     motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3025                     motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3026                     break;
3027                 case CANDIDATE_MB_TYPE_FORWARD_I:
3028                     s->mv_dir = MV_DIR_FORWARD;
3029                     s->mv_type = MV_TYPE_FIELD;
3030                     s->mb_intra= 0;
3031                     for(i=0; i<2; i++){
3032                         j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3033                         s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3034                         s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3035                     }
3036                     break;
3037                 case CANDIDATE_MB_TYPE_BACKWARD_I:
3038                     s->mv_dir = MV_DIR_BACKWARD;
3039                     s->mv_type = MV_TYPE_FIELD;
3040                     s->mb_intra= 0;
3041                     for(i=0; i<2; i++){
3042                         j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3043                         s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3044                         s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3045                     }
3046                     break;
3047                 case CANDIDATE_MB_TYPE_BIDIR_I:
3048                     s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3049                     s->mv_type = MV_TYPE_FIELD;
3050                     s->mb_intra= 0;
3051                     for(dir=0; dir<2; dir++){
3052                         for(i=0; i<2; i++){
3053                             j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3054                             s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3055                             s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3056                         }
3057                     }
3058                     break;
3059                 default:
3060                     av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3061                 }
3062
3063                 encode_mb(s, motion_x, motion_y);
3064
3065                 // RAL: Update last macroblock type
3066                 s->last_mv_dir = s->mv_dir;
3067
3068                 if (CONFIG_H263_ENCODER &&
3069                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3070                     ff_h263_update_motion_val(s);
3071
3072                 ff_MPV_decode_mb(s, s->block);
3073             }
3074
3075             /* clean the MV table in IPS frames for direct mode in B frames */
3076             if(s->mb_intra /* && I,P,S_TYPE */){
3077                 s->p_mv_table[xy][0]=0;
3078                 s->p_mv_table[xy][1]=0;
3079             }
3080
3081             if(s->flags&CODEC_FLAG_PSNR){
3082                 int w= 16;
3083                 int h= 16;
3084
3085                 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3086                 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3087
3088                 s->current_picture.f.error[0] += sse(
3089                     s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3090                     s->dest[0], w, h, s->linesize);
3091                 s->current_picture.f.error[1] += sse(
3092                     s, s->new_picture.f.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
3093                     s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3094                 s->current_picture.f.error[2] += sse(
3095                     s, s->new_picture.f.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*chr_h,
3096                     s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3097             }
3098             if(s->loop_filter){
3099                 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3100                     ff_h263_loop_filter(s);
3101             }
3102             av_dlog(s->avctx, "MB %d %d bits\n",
3103                     s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3104         }
3105     }
3106
3107     //not beautiful here but we must write it before flushing so it has to be here
3108     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3109         ff_msmpeg4_encode_ext_header(s);
3110
3111     write_slice_end(s);
3112
3113     /* Send the last GOB if RTP */
3114     if (s->avctx->rtp_callback) {
3115         int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3116         pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3117         /* Call the RTP callback to send the last GOB */
3118         emms_c();
3119         s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3120     }
3121
3122     return 0;
3123 }
3124
3125 #define MERGE(field) dst->field += src->field; src->field=0
3126 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3127     MERGE(me.scene_change_score);
3128     MERGE(me.mc_mb_var_sum_temp);
3129     MERGE(me.mb_var_sum_temp);
3130 }
3131
3132 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3133     int i;
3134
3135     MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3136     MERGE(dct_count[1]);
3137     MERGE(mv_bits);
3138     MERGE(i_tex_bits);
3139     MERGE(p_tex_bits);
3140     MERGE(i_count);
3141     MERGE(f_count);
3142     MERGE(b_count);
3143     MERGE(skip_count);
3144     MERGE(misc_bits);
3145     MERGE(error_count);
3146     MERGE(padding_bug_score);
3147     MERGE(current_picture.f.error[0]);
3148     MERGE(current_picture.f.error[1]);
3149     MERGE(current_picture.f.error[2]);
3150
3151     if(dst->avctx->noise_reduction){
3152         for(i=0; i<64; i++){
3153             MERGE(dct_error_sum[0][i]);
3154             MERGE(dct_error_sum[1][i]);
3155         }
3156     }
3157
3158     assert(put_bits_count(&src->pb) % 8 ==0);
3159     assert(put_bits_count(&dst->pb) % 8 ==0);
3160     avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3161     flush_put_bits(&dst->pb);
3162 }
3163
3164 static int estimate_qp(MpegEncContext *s, int dry_run){
3165     if (s->next_lambda){
3166         s->current_picture_ptr->f.quality =
3167         s->current_picture.f.quality = s->next_lambda;
3168         if(!dry_run) s->next_lambda= 0;
3169     } else if (!s->fixed_qscale) {
3170         s->current_picture_ptr->f.quality =
3171         s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
3172         if (s->current_picture.f.quality < 0)
3173             return -1;
3174     }
3175
3176     if(s->adaptive_quant){
3177         switch(s->codec_id){
3178         case AV_CODEC_ID_MPEG4:
3179             if (CONFIG_MPEG4_ENCODER)
3180                 ff_clean_mpeg4_qscales(s);
3181             break;
3182         case AV_CODEC_ID_H263:
3183         case AV_CODEC_ID_H263P:
3184         case AV_CODEC_ID_FLV1:
3185             if (CONFIG_H263_ENCODER)
3186                 ff_clean_h263_qscales(s);
3187             break;
3188         default:
3189             ff_init_qscale_tab(s);
3190         }
3191
3192         s->lambda= s->lambda_table[0];
3193         //FIXME broken
3194     }else
3195         s->lambda = s->current_picture.f.quality;
3196     update_qscale(s);
3197     return 0;
3198 }
3199
3200 /* must be called before writing the header */
3201 static void set_frame_distances(MpegEncContext * s){
3202     assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3203     s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3204
3205     if(s->pict_type==AV_PICTURE_TYPE_B){
3206         s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3207         assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3208     }else{
3209         s->pp_time= s->time - s->last_non_b_time;
3210         s->last_non_b_time= s->time;
3211         assert(s->picture_number==0 || s->pp_time > 0);
3212     }
3213 }
3214
3215 static int encode_picture(MpegEncContext *s, int picture_number)
3216 {
3217     int i;
3218     int bits;
3219     int context_count = s->slice_context_count;
3220
3221     s->picture_number = picture_number;
3222
3223     /* Reset the average MB variance */
3224     s->me.mb_var_sum_temp    =
3225     s->me.mc_mb_var_sum_temp = 0;
3226
3227     /* we need to initialize some time vars before we can encode b-frames */
3228     // RAL: Condition added for MPEG1VIDEO
3229     if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3230         set_frame_distances(s);
3231     if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3232         ff_set_mpeg4_time(s);
3233
3234     s->me.scene_change_score=0;
3235
3236 //    s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3237
3238     if(s->pict_type==AV_PICTURE_TYPE_I){
3239         if(s->msmpeg4_version >= 3) s->no_rounding=1;
3240         else                        s->no_rounding=0;
3241     }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3242         if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3243             s->no_rounding ^= 1;
3244     }
3245
3246     if(s->flags & CODEC_FLAG_PASS2){
3247         if (estimate_qp(s,1) < 0)
3248             return -1;
3249         ff_get_2pass_fcode(s);
3250     }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3251         if(s->pict_type==AV_PICTURE_TYPE_B)
3252             s->lambda= s->last_lambda_for[s->pict_type];
3253         else
3254             s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3255         update_qscale(s);
3256     }
3257
3258     if(s->codec_id != AV_CODEC_ID_AMV){
3259         if(s->q_chroma_intra_matrix   != s->q_intra_matrix  ) av_freep(&s->q_chroma_intra_matrix);
3260         if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3261         s->q_chroma_intra_matrix   = s->q_intra_matrix;
3262         s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3263     }
3264
3265     s->mb_intra=0; //for the rate distortion & bit compare functions
3266     for(i=1; i<context_count; i++){
3267         ff_update_duplicate_context(s->thread_context[i], s);
3268     }
3269
3270     if(ff_init_me(s)<0)
3271         return -1;
3272
3273     /* Estimate motion for every MB */
3274     if(s->pict_type != AV_PICTURE_TYPE_I){
3275         s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3276         s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3277         if(s->pict_type != AV_PICTURE_TYPE_B && s->avctx->me_threshold==0){
3278             if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3279                 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3280             }
3281         }
3282
3283         s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3284     }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3285         /* I-Frame */
3286         for(i=0; i<s->mb_stride*s->mb_height; i++)
3287             s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3288
3289         if(!s->fixed_qscale){
3290             /* finding spatial complexity for I-frame rate control */
3291             s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3292         }
3293     }
3294     for(i=1; i<context_count; i++){
3295         merge_context_after_me(s, s->thread_context[i]);
3296     }
3297     s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3298     s->current_picture.   mb_var_sum= s->current_picture_ptr->   mb_var_sum= s->me.   mb_var_sum_temp;
3299     emms_c();
3300
3301     if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3302         s->pict_type= AV_PICTURE_TYPE_I;
3303         for(i=0; i<s->mb_stride*s->mb_height; i++)
3304             s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3305         if(s->msmpeg4_version >= 3)
3306             s->no_rounding=1;
3307         av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3308                 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3309     }
3310
3311     if(!s->umvplus){
3312         if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3313             s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3314
3315             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3316                 int a,b;
3317                 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3318                 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3319                 s->f_code= FFMAX3(s->f_code, a, b);
3320             }
3321
3322             ff_fix_long_p_mvs(s);
3323             ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3324             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3325                 int j;
3326                 for(i=0; i<2; i++){
3327                     for(j=0; j<2; j++)
3328                         ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3329                                         s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3330                 }
3331             }
3332         }
3333
3334         if(s->pict_type==AV_PICTURE_TYPE_B){
3335             int a, b;
3336
3337             a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3338             b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3339             s->f_code = FFMAX(a, b);
3340
3341             a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3342             b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3343             s->b_code = FFMAX(a, b);
3344
3345             ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3346             ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3347             ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3348             ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3349             if(s->flags & CODEC_FLAG_INTERLACED_ME){
3350                 int dir, j;
3351                 for(dir=0; dir<2; dir++){
3352                     for(i=0; i<2; i++){
3353                         for(j=0; j<2; j++){
3354                             int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3355                                           : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3356                             ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3357                                             s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3358                         }
3359                     }
3360                 }
3361             }
3362         }
3363     }
3364
3365     if (estimate_qp(s, 0) < 0)
3366         return -1;
3367
3368     if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3369         s->qscale= 3; //reduce clipping problems
3370
3371     if (s->out_format == FMT_MJPEG) {
3372         /* for mjpeg, we do include qscale in the matrix */
3373         for(i=1;i<64;i++){
3374             int j= s->dsp.idct_permutation[i];
3375
3376             s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3377         }
3378         s->y_dc_scale_table=
3379         s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3380         s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3381         ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3382                        s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3383         s->qscale= 8;
3384     }
3385     if(s->codec_id == AV_CODEC_ID_AMV){
3386         static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3387         static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3388         for(i=1;i<64;i++){
3389             int j= s->dsp.idct_permutation[ff_zigzag_direct[i]];
3390
3391             s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3392             s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3393         }
3394         s->y_dc_scale_table= y;
3395         s->c_dc_scale_table= c;
3396         s->intra_matrix[0] = 13;
3397         s->chroma_intra_matrix[0] = 14;
3398         ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3399                        s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3400         ff_convert_matrix(&s->dsp, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3401                        s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3402         s->qscale= 8;
3403     }
3404
3405     //FIXME var duplication
3406     s->current_picture_ptr->f.key_frame =
3407     s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3408     s->current_picture_ptr->f.pict_type =
3409     s->current_picture.f.pict_type = s->pict_type;
3410
3411     if (s->current_picture.f.key_frame)
3412         s->picture_in_gop_number=0;
3413
3414     s->mb_x = s->mb_y = 0;
3415     s->last_bits= put_bits_count(&s->pb);
3416     switch(s->out_format) {
3417     case FMT_MJPEG:
3418         if (CONFIG_MJPEG_ENCODER)
3419             ff_mjpeg_encode_picture_header(s);
3420         break;
3421     case FMT_H261:
3422         if (CONFIG_H261_ENCODER)
3423             ff_h261_encode_picture_header(s, picture_number);
3424         break;
3425     case FMT_H263:
3426         if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3427             ff_wmv2_encode_picture_header(s, picture_number);
3428         else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3429             ff_msmpeg4_encode_picture_header(s, picture_number);
3430         else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3431             ff_mpeg4_encode_picture_header(s, picture_number);
3432         else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3433             ff_rv10_encode_picture_header(s, picture_number);
3434         else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3435             ff_rv20_encode_picture_header(s, picture_number);
3436         else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3437             ff_flv_encode_picture_header(s, picture_number);
3438         else if (CONFIG_H263_ENCODER)
3439             ff_h263_encode_picture_header(s, picture_number);
3440         break;
3441     case FMT_MPEG1:
3442         if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3443             ff_mpeg1_encode_picture_header(s, picture_number);
3444         break;
3445     case FMT_H264:
3446         break;
3447     default:
3448         av_assert0(0);
3449     }
3450     bits= put_bits_count(&s->pb);
3451     s->header_bits= bits - s->last_bits;
3452
3453     for(i=1; i<context_count; i++){
3454         update_duplicate_context_after_me(s->thread_context[i], s);
3455     }
3456     s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3457     for(i=1; i<context_count; i++){
3458         merge_context_after_encode(s, s->thread_context[i]);
3459     }
3460     emms_c();
3461     return 0;
3462 }
3463
3464 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block){
3465     const int intra= s->mb_intra;
3466     int i;
3467
3468     s->dct_count[intra]++;
3469
3470     for(i=0; i<64; i++){
3471         int level= block[i];
3472
3473         if(level){
3474             if(level>0){
3475                 s->dct_error_sum[intra][i] += level;
3476                 level -= s->dct_offset[intra][i];
3477                 if(level<0) level=0;
3478             }else{
3479                 s->dct_error_sum[intra][i] -= level;
3480                 level += s->dct_offset[intra][i];
3481                 if(level>0) level=0;
3482             }
3483             block[i]= level;
3484         }
3485     }
3486 }
3487
3488 static int dct_quantize_trellis_c(MpegEncContext *s,
3489                                   DCTELEM *block, int n,
3490                                   int qscale, int *overflow){
3491     const int *qmat;
3492     const uint8_t *scantable= s->intra_scantable.scantable;
3493     const uint8_t *perm_scantable= s->intra_scantable.permutated;
3494     int max=0;
3495     unsigned int threshold1, threshold2;
3496     int bias=0;
3497     int run_tab[65];
3498     int level_tab[65];
3499     int score_tab[65];
3500     int survivor[65];
3501     int survivor_count;
3502     int last_run=0;
3503     int last_level=0;
3504     int last_score= 0;
3505     int last_i;
3506     int coeff[2][64];
3507     int coeff_count[64];
3508     int qmul, qadd, start_i, last_non_zero, i, dc;
3509     const int esc_length= s->ac_esc_length;
3510     uint8_t * length;
3511     uint8_t * last_length;
3512     const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3513
3514     s->dsp.fdct (block);
3515
3516     if(s->dct_error_sum)
3517         s->denoise_dct(s, block);
3518     qmul= qscale*16;
3519     qadd= ((qscale-1)|1)*8;
3520
3521     if (s->mb_intra) {
3522         int q;
3523         if (!s->h263_aic) {
3524             if (n < 4)
3525                 q = s->y_dc_scale;
3526             else
3527                 q = s->c_dc_scale;
3528             q = q << 3;
3529         } else{
3530             /* For AIC we skip quant/dequant of INTRADC */
3531             q = 1 << 3;
3532             qadd=0;
3533         }
3534
3535         /* note: block[0] is assumed to be positive */
3536         block[0] = (block[0] + (q >> 1)) / q;
3537         start_i = 1;
3538         last_non_zero = 0;
3539         qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3540         if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3541             bias= 1<<(QMAT_SHIFT-1);
3542         length     = s->intra_ac_vlc_length;
3543         last_length= s->intra_ac_vlc_last_length;
3544     } else {
3545         start_i = 0;
3546         last_non_zero = -1;
3547         qmat = s->q_inter_matrix[qscale];
3548         length     = s->inter_ac_vlc_length;
3549         last_length= s->inter_ac_vlc_last_length;
3550     }
3551     last_i= start_i;
3552
3553     threshold1= (1<<QMAT_SHIFT) - bias - 1;
3554     threshold2= (threshold1<<1);
3555
3556     for(i=63; i>=start_i; i--) {
3557         const int j = scantable[i];
3558         int level = block[j] * qmat[j];
3559
3560         if(((unsigned)(level+threshold1))>threshold2){
3561             last_non_zero = i;
3562             break;
3563         }
3564     }
3565
3566     for(i=start_i; i<=last_non_zero; i++) {
3567         const int j = scantable[i];
3568         int level = block[j] * qmat[j];
3569
3570 //        if(   bias+level >= (1<<(QMAT_SHIFT - 3))
3571 //           || bias-level >= (1<<(QMAT_SHIFT - 3))){
3572         if(((unsigned)(level+threshold1))>threshold2){
3573             if(level>0){
3574                 level= (bias + level)>>QMAT_SHIFT;
3575                 coeff[0][i]= level;
3576                 coeff[1][i]= level-1;
3577 //                coeff[2][k]= level-2;
3578             }else{
3579                 level= (bias - level)>>QMAT_SHIFT;
3580                 coeff[0][i]= -level;
3581                 coeff[1][i]= -level+1;
3582 //                coeff[2][k]= -level+2;
3583             }
3584             coeff_count[i]= FFMIN(level, 2);
3585             av_assert2(coeff_count[i]);
3586             max |=level;
3587         }else{
3588             coeff[0][i]= (level>>31)|1;
3589             coeff_count[i]= 1;
3590         }
3591     }
3592
3593     *overflow= s->max_qcoeff < max; //overflow might have happened
3594
3595     if(last_non_zero < start_i){
3596         memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
3597         return last_non_zero;
3598     }
3599
3600     score_tab[start_i]= 0;
3601     survivor[0]= start_i;
3602     survivor_count= 1;
3603
3604     for(i=start_i; i<=last_non_zero; i++){
3605         int level_index, j, zero_distortion;
3606         int dct_coeff= FFABS(block[ scantable[i] ]);
3607         int best_score=256*256*256*120;
3608
3609         if (s->dsp.fdct == ff_fdct_ifast)
3610             dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3611         zero_distortion= dct_coeff*dct_coeff;
3612
3613         for(level_index=0; level_index < coeff_count[i]; level_index++){
3614             int distortion;
3615             int level= coeff[level_index][i];
3616             const int alevel= FFABS(level);
3617             int unquant_coeff;
3618
3619             av_assert2(level);
3620
3621             if(s->out_format == FMT_H263){
3622                 unquant_coeff= alevel*qmul + qadd;
3623             }else{ //MPEG1
3624                 j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3625                 if(s->mb_intra){
3626                         unquant_coeff = (int)(  alevel  * qscale * s->intra_matrix[j]) >> 3;
3627                         unquant_coeff =   (unquant_coeff - 1) | 1;
3628                 }else{
3629                         unquant_coeff = (((  alevel  << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3630                         unquant_coeff =   (unquant_coeff - 1) | 1;
3631                 }
3632                 unquant_coeff<<= 3;
3633             }
3634
3635             distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3636             level+=64;
3637             if((level&(~127)) == 0){
3638                 for(j=survivor_count-1; j>=0; j--){
3639                     int run= i - survivor[j];
3640                     int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3641                     score += score_tab[i-run];
3642
3643                     if(score < best_score){
3644                         best_score= score;
3645                         run_tab[i+1]= run;
3646                         level_tab[i+1]= level-64;
3647                     }
3648                 }
3649
3650                 if(s->out_format == FMT_H263){
3651                     for(j=survivor_count-1; j>=0; j--){
3652                         int run= i - survivor[j];
3653                         int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3654                         score += score_tab[i-run];
3655                         if(score < last_score){
3656                             last_score= score;
3657                             last_run= run;
3658                             last_level= level-64;
3659                             last_i= i+1;
3660                         }
3661                     }
3662                 }
3663             }else{
3664                 distortion += esc_length*lambda;
3665                 for(j=survivor_count-1; j>=0; j--){
3666                     int run= i - survivor[j];
3667                     int score= distortion + score_tab[i-run];
3668
3669                     if(score < best_score){
3670                         best_score= score;
3671                         run_tab[i+1]= run;
3672                         level_tab[i+1]= level-64;
3673                     }
3674                 }
3675
3676                 if(s->out_format == FMT_H263){
3677                   for(j=survivor_count-1; j>=0; j--){
3678                         int run= i - survivor[j];
3679                         int score= distortion + score_tab[i-run];
3680                         if(score < last_score){
3681                             last_score= score;
3682                             last_run= run;
3683                             last_level= level-64;
3684                             last_i= i+1;
3685                         }
3686                     }
3687                 }
3688             }
3689         }
3690
3691         score_tab[i+1]= best_score;
3692
3693         //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3694         if(last_non_zero <= 27){
3695             for(; survivor_count; survivor_count--){
3696                 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3697                     break;
3698             }
3699         }else{
3700             for(; survivor_count; survivor_count--){
3701                 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3702                     break;
3703             }
3704         }
3705
3706         survivor[ survivor_count++ ]= i+1;
3707     }
3708
3709     if(s->out_format != FMT_H263){
3710         last_score= 256*256*256*120;
3711         for(i= survivor[0]; i<=last_non_zero + 1; i++){
3712             int score= score_tab[i];
3713             if(i) score += lambda*2; //FIXME exacter?
3714
3715             if(score < last_score){
3716                 last_score= score;
3717                 last_i= i;
3718                 last_level= level_tab[i];
3719                 last_run= run_tab[i];
3720             }
3721         }
3722     }
3723
3724     s->coded_score[n] = last_score;
3725
3726     dc= FFABS(block[0]);
3727     last_non_zero= last_i - 1;
3728     memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
3729
3730     if(last_non_zero < start_i)
3731         return last_non_zero;
3732
3733     if(last_non_zero == 0 && start_i == 0){
3734         int best_level= 0;
3735         int best_score= dc * dc;
3736
3737         for(i=0; i<coeff_count[0]; i++){
3738             int level= coeff[i][0];
3739             int alevel= FFABS(level);
3740             int unquant_coeff, score, distortion;
3741
3742             if(s->out_format == FMT_H263){
3743                     unquant_coeff= (alevel*qmul + qadd)>>3;
3744             }else{ //MPEG1
3745                     unquant_coeff = (((  alevel  << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3746                     unquant_coeff =   (unquant_coeff - 1) | 1;
3747             }
3748             unquant_coeff = (unquant_coeff + 4) >> 3;
3749             unquant_coeff<<= 3 + 3;
3750
3751             distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3752             level+=64;
3753             if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3754             else                    score= distortion + esc_length*lambda;
3755
3756             if(score < best_score){
3757                 best_score= score;
3758                 best_level= level - 64;
3759             }
3760         }
3761         block[0]= best_level;
3762         s->coded_score[n] = best_score - dc*dc;
3763         if(best_level == 0) return -1;
3764         else                return last_non_zero;
3765     }
3766
3767     i= last_i;
3768     av_assert2(last_level);
3769
3770     block[ perm_scantable[last_non_zero] ]= last_level;
3771     i -= last_run + 1;
3772
3773     for(; i>start_i; i -= run_tab[i] + 1){
3774         block[ perm_scantable[i-1] ]= level_tab[i];
3775     }
3776
3777     return last_non_zero;
3778 }
3779
3780 //#define REFINE_STATS 1
3781 static int16_t basis[64][64];
3782
3783 static void build_basis(uint8_t *perm){
3784     int i, j, x, y;
3785     emms_c();
3786     for(i=0; i<8; i++){
3787         for(j=0; j<8; j++){
3788             for(y=0; y<8; y++){
3789                 for(x=0; x<8; x++){
3790                     double s= 0.25*(1<<BASIS_SHIFT);
3791                     int index= 8*i + j;
3792                     int perm_index= perm[index];
3793                     if(i==0) s*= sqrt(0.5);
3794                     if(j==0) s*= sqrt(0.5);
3795                     basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3796                 }
3797             }
3798         }
3799     }
3800 }
3801
3802 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3803                         DCTELEM *block, int16_t *weight, DCTELEM *orig,
3804                         int n, int qscale){
3805     int16_t rem[64];
3806     LOCAL_ALIGNED_16(DCTELEM, d1, [64]);
3807     const uint8_t *scantable= s->intra_scantable.scantable;
3808     const uint8_t *perm_scantable= s->intra_scantable.permutated;
3809 //    unsigned int threshold1, threshold2;
3810 //    int bias=0;
3811     int run_tab[65];
3812     int prev_run=0;
3813     int prev_level=0;
3814     int qmul, qadd, start_i, last_non_zero, i, dc;
3815     uint8_t * length;
3816     uint8_t * last_length;
3817     int lambda;
3818     int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3819 #ifdef REFINE_STATS
3820 static int count=0;
3821 static int after_last=0;
3822 static int to_zero=0;
3823 static int from_zero=0;
3824 static int raise=0;
3825 static int lower=0;
3826 static int messed_sign=0;
3827 #endif
3828
3829     if(basis[0][0] == 0)
3830         build_basis(s->dsp.idct_permutation);
3831
3832     qmul= qscale*2;
3833     qadd= (qscale-1)|1;
3834     if (s->mb_intra) {
3835         if (!s->h263_aic) {
3836             if (n < 4)
3837                 q = s->y_dc_scale;
3838             else
3839                 q = s->c_dc_scale;
3840         } else{
3841             /* For AIC we skip quant/dequant of INTRADC */
3842             q = 1;
3843             qadd=0;
3844         }
3845         q <<= RECON_SHIFT-3;
3846         /* note: block[0] is assumed to be positive */
3847         dc= block[0]*q;
3848 //        block[0] = (block[0] + (q >> 1)) / q;
3849         start_i = 1;
3850 //        if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3851 //            bias= 1<<(QMAT_SHIFT-1);
3852         length     = s->intra_ac_vlc_length;
3853         last_length= s->intra_ac_vlc_last_length;
3854     } else {
3855         dc= 0;
3856         start_i = 0;
3857         length     = s->inter_ac_vlc_length;
3858         last_length= s->inter_ac_vlc_last_length;
3859     }
3860     last_non_zero = s->block_last_index[n];
3861
3862 #ifdef REFINE_STATS
3863 {START_TIMER
3864 #endif
3865     dc += (1<<(RECON_SHIFT-1));
3866     for(i=0; i<64; i++){
3867         rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME  use orig dirrectly instead of copying to rem[]
3868     }
3869 #ifdef REFINE_STATS
3870 STOP_TIMER("memset rem[]")}
3871 #endif
3872     sum=0;
3873     for(i=0; i<64; i++){
3874         int one= 36;
3875         int qns=4;
3876         int w;
3877
3878         w= FFABS(weight[i]) + qns*one;
3879         w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3880
3881         weight[i] = w;
3882 //        w=weight[i] = (63*qns + (w/2)) / w;
3883
3884         av_assert2(w>0);
3885         av_assert2(w<(1<<6));
3886         sum += w*w;
3887     }
3888     lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3889 #ifdef REFINE_STATS
3890 {START_TIMER
3891 #endif
3892     run=0;
3893     rle_index=0;
3894     for(i=start_i; i<=last_non_zero; i++){
3895         int j= perm_scantable[i];
3896         const int level= block[j];
3897         int coeff;
3898
3899         if(level){
3900             if(level<0) coeff= qmul*level - qadd;
3901             else        coeff= qmul*level + qadd;
3902             run_tab[rle_index++]=run;
3903             run=0;
3904
3905             s->dsp.add_8x8basis(rem, basis[j], coeff);
3906         }else{
3907             run++;
3908         }
3909     }
3910 #ifdef REFINE_STATS
3911 if(last_non_zero>0){
3912 STOP_TIMER("init rem[]")
3913 }
3914 }
3915
3916 {START_TIMER
3917 #endif
3918     for(;;){
3919         int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3920         int best_coeff=0;
3921         int best_change=0;
3922         int run2, best_unquant_change=0, analyze_gradient;
3923 #ifdef REFINE_STATS
3924 {START_TIMER
3925 #endif
3926         analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
3927
3928         if(analyze_gradient){
3929 #ifdef REFINE_STATS
3930 {START_TIMER
3931 #endif
3932             for(i=0; i<64; i++){
3933                 int w= weight[i];
3934
3935                 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3936             }
3937 #ifdef REFINE_STATS
3938 STOP_TIMER("rem*w*w")}
3939 {START_TIMER
3940 #endif
3941             s->dsp.fdct(d1);
3942 #ifdef REFINE_STATS
3943 STOP_TIMER("dct")}
3944 #endif
3945         }
3946
3947         if(start_i){
3948             const int level= block[0];
3949             int change, old_coeff;
3950
3951             av_assert2(s->mb_intra);
3952
3953             old_coeff= q*level;
3954
3955             for(change=-1; change<=1; change+=2){
3956                 int new_level= level + change;
3957                 int score, new_coeff;
3958
3959                 new_coeff= q*new_level;
3960                 if(new_coeff >= 2048 || new_coeff < 0)
3961                     continue;
3962
3963                 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
3964                 if(score<best_score){
3965                     best_score= score;
3966                     best_coeff= 0;
3967                     best_change= change;
3968                     best_unquant_change= new_coeff - old_coeff;
3969                 }
3970             }
3971         }
3972
3973         run=0;
3974         rle_index=0;
3975         run2= run_tab[rle_index++];
3976         prev_level=0;
3977         prev_run=0;
3978
3979         for(i=start_i; i<64; i++){
3980             int j= perm_scantable[i];
3981             const int level= block[j];
3982             int change, old_coeff;
3983
3984             if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
3985                 break;
3986
3987             if(level){
3988                 if(level<0) old_coeff= qmul*level - qadd;
3989                 else        old_coeff= qmul*level + qadd;
3990                 run2= run_tab[rle_index++]; //FIXME ! maybe after last
3991             }else{
3992                 old_coeff=0;
3993                 run2--;
3994                 av_assert2(run2>=0 || i >= last_non_zero );
3995             }
3996
3997             for(change=-1; change<=1; change+=2){
3998                 int new_level= level + change;
3999                 int score, new_coeff, unquant_change;
4000
4001                 score=0;
4002                 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4003                    continue;
4004
4005                 if(new_level){
4006                     if(new_level<0) new_coeff= qmul*new_level - qadd;
4007                     else            new_coeff= qmul*new_level + qadd;
4008                     if(new_coeff >= 2048 || new_coeff <= -2048)
4009                         continue;
4010                     //FIXME check for overflow
4011
4012                     if(level){
4013                         if(level < 63 && level > -63){
4014                             if(i < last_non_zero)
4015                                 score +=   length[UNI_AC_ENC_INDEX(run, new_level+64)]
4016                                          - length[UNI_AC_ENC_INDEX(run, level+64)];
4017                             else
4018                                 score +=   last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4019                                          - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4020                         }
4021                     }else{
4022                         av_assert2(FFABS(new_level)==1);
4023
4024                         if(analyze_gradient){
4025                             int g= d1[ scantable[i] ];
4026                             if(g && (g^new_level) >= 0)
4027                                 continue;
4028                         }
4029
4030                         if(i < last_non_zero){
4031                             int next_i= i + run2 + 1;
4032                             int next_level= block[ perm_scantable[next_i] ] + 64;
4033
4034                             if(next_level&(~127))
4035                                 next_level= 0;
4036
4037                             if(next_i < last_non_zero)
4038                                 score +=   length[UNI_AC_ENC_INDEX(run, 65)]
4039                                          + length[UNI_AC_ENC_INDEX(run2, next_level)]
4040                                          - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4041                             else
4042                                 score +=  length[UNI_AC_ENC_INDEX(run, 65)]
4043                                         + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4044                                         - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4045                         }else{
4046                             score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4047                             if(prev_level){
4048                                 score +=  length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4049                                         - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4050                             }
4051                         }
4052                     }
4053                 }else{
4054                     new_coeff=0;
4055                     av_assert2(FFABS(level)==1);
4056
4057                     if(i < last_non_zero){
4058                         int next_i= i + run2 + 1;
4059                         int next_level= block[ perm_scantable[next_i] ] + 64;
4060
4061                         if(next_level&(~127))
4062                             next_level= 0;
4063
4064                         if(next_i < last_non_zero)
4065                             score +=   length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4066                                      - length[UNI_AC_ENC_INDEX(run2, next_level)]
4067                                      - length[UNI_AC_ENC_INDEX(run, 65)];
4068                         else
4069                             score +=   last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4070                                      - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4071                                      - length[UNI_AC_ENC_INDEX(run, 65)];
4072                     }else{
4073                         score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4074                         if(prev_level){
4075                             score +=  last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4076                                     - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4077                         }
4078                     }
4079                 }
4080
4081                 score *= lambda;
4082
4083                 unquant_change= new_coeff - old_coeff;
4084                 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4085
4086                 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
4087                 if(score<best_score){
4088                     best_score= score;
4089                     best_coeff= i;
4090                     best_change= change;
4091                     best_unquant_change= unquant_change;
4092                 }
4093             }
4094             if(level){
4095                 prev_level= level + 64;
4096                 if(prev_level&(~127))
4097                     prev_level= 0;
4098                 prev_run= run;
4099                 run=0;
4100             }else{
4101                 run++;
4102             }
4103         }
4104 #ifdef REFINE_STATS
4105 STOP_TIMER("iterative step")}
4106 #endif
4107
4108         if(best_change){
4109             int j= perm_scantable[ best_coeff ];
4110
4111             block[j] += best_change;
4112
4113             if(best_coeff > last_non_zero){
4114                 last_non_zero= best_coeff;
4115                 av_assert2(block[j]);
4116 #ifdef REFINE_STATS
4117 after_last++;
4118 #endif
4119             }else{
4120 #ifdef REFINE_STATS
4121 if(block[j]){
4122     if(block[j] - best_change){
4123         if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4124             raise++;
4125         }else{
4126             lower++;
4127         }
4128     }else{
4129         from_zero++;
4130     }
4131 }else{
4132     to_zero++;
4133 }
4134 #endif
4135                 for(; last_non_zero>=start_i; last_non_zero--){
4136                     if(block[perm_scantable[last_non_zero]])
4137                         break;
4138                 }
4139             }
4140 #ifdef REFINE_STATS
4141 count++;
4142 if(256*256*256*64 % count == 0){
4143     printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4144 }
4145 #endif
4146             run=0;
4147             rle_index=0;
4148             for(i=start_i; i<=last_non_zero; i++){
4149                 int j= perm_scantable[i];
4150                 const int level= block[j];
4151
4152                  if(level){
4153                      run_tab[rle_index++]=run;
4154                      run=0;
4155                  }else{
4156                      run++;
4157                  }
4158             }
4159
4160             s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
4161         }else{
4162             break;
4163         }
4164     }
4165 #ifdef REFINE_STATS
4166 if(last_non_zero>0){
4167 STOP_TIMER("iterative search")
4168 }
4169 }
4170 #endif
4171
4172     return last_non_zero;
4173 }
4174
4175 int ff_dct_quantize_c(MpegEncContext *s,
4176                         DCTELEM *block, int n,
4177                         int qscale, int *overflow)
4178 {
4179     int i, j, level, last_non_zero, q, start_i;
4180     const int *qmat;
4181     const uint8_t *scantable= s->intra_scantable.scantable;
4182     int bias;
4183     int max=0;
4184     unsigned int threshold1, threshold2;
4185
4186     s->dsp.fdct (block);
4187
4188     if(s->dct_error_sum)
4189         s->denoise_dct(s, block);
4190
4191     if (s->mb_intra) {
4192         if (!s->h263_aic) {
4193             if (n < 4)
4194                 q = s->y_dc_scale;
4195             else
4196                 q = s->c_dc_scale;
4197             q = q << 3;
4198         } else
4199             /* For AIC we skip quant/dequant of INTRADC */
4200             q = 1 << 3;
4201
4202         /* note: block[0] is assumed to be positive */
4203         block[0] = (block[0] + (q >> 1)) / q;
4204         start_i = 1;
4205         last_non_zero = 0;
4206         qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4207         bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4208     } else {
4209         start_i = 0;
4210         last_non_zero = -1;
4211         qmat = s->q_inter_matrix[qscale];
4212         bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4213     }
4214     threshold1= (1<<QMAT_SHIFT) - bias - 1;
4215     threshold2= (threshold1<<1);
4216     for(i=63;i>=start_i;i--) {
4217         j = scantable[i];
4218         level = block[j] * qmat[j];
4219
4220         if(((unsigned)(level+threshold1))>threshold2){
4221             last_non_zero = i;
4222             break;
4223         }else{
4224             block[j]=0;
4225         }
4226     }
4227     for(i=start_i; i<=last_non_zero; i++) {
4228         j = scantable[i];
4229         level = block[j] * qmat[j];
4230
4231 //        if(   bias+level >= (1<<QMAT_SHIFT)
4232 //           || bias-level >= (1<<QMAT_SHIFT)){
4233         if(((unsigned)(level+threshold1))>threshold2){
4234             if(level>0){
4235                 level= (bias + level)>>QMAT_SHIFT;
4236                 block[j]= level;
4237             }else{
4238                 level= (bias - level)>>QMAT_SHIFT;
4239                 block[j]= -level;
4240             }
4241             max |=level;
4242         }else{
4243             block[j]=0;
4244         }
4245     }
4246     *overflow= s->max_qcoeff < max; //overflow might have happened
4247
4248     /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4249     if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4250         ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4251
4252     return last_non_zero;
4253 }
4254
4255 #define OFFSET(x) offsetof(MpegEncContext, x)
4256 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4257 static const AVOption h263_options[] = {
4258     { "obmc",         "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4259     { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4260     { "mb_info",      "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4261     FF_MPV_COMMON_OPTS
4262     { NULL },
4263 };
4264
4265 static const AVClass h263_class = {
4266     .class_name = "H.263 encoder",
4267     .item_name  = av_default_item_name,
4268     .option     = h263_options,
4269     .version    = LIBAVUTIL_VERSION_INT,
4270 };
4271
4272 AVCodec ff_h263_encoder = {
4273     .name           = "h263",
4274     .type           = AVMEDIA_TYPE_VIDEO,
4275     .id             = AV_CODEC_ID_H263,
4276     .priv_data_size = sizeof(MpegEncContext),
4277     .init           = ff_MPV_encode_init,
4278     .encode2        = ff_MPV_encode_picture,
4279     .close          = ff_MPV_encode_end,
4280     .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4281     .long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4282     .priv_class     = &h263_class,
4283 };
4284
4285 static const AVOption h263p_options[] = {
4286     { "umv",        "Use unlimited motion vectors.",    OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4287     { "aiv",        "Use alternative inter VLC.",       OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4288     { "obmc",       "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4289     { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4290     FF_MPV_COMMON_OPTS
4291     { NULL },
4292 };
4293 static const AVClass h263p_class = {
4294     .class_name = "H.263p encoder",
4295     .item_name  = av_default_item_name,
4296     .option     = h263p_options,
4297     .version    = LIBAVUTIL_VERSION_INT,
4298 };
4299
4300 AVCodec ff_h263p_encoder = {
4301     .name           = "h263p",
4302     .type           = AVMEDIA_TYPE_VIDEO,
4303     .id             = AV_CODEC_ID_H263P,
4304     .priv_data_size = sizeof(MpegEncContext),
4305     .init           = ff_MPV_encode_init,
4306     .encode2        = ff_MPV_encode_picture,
4307     .close          = ff_MPV_encode_end,
4308     .capabilities   = CODEC_CAP_SLICE_THREADS,
4309     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4310     .long_name      = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4311     .priv_class     = &h263p_class,
4312 };
4313
4314 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4315
4316 AVCodec ff_msmpeg4v2_encoder = {
4317     .name           = "msmpeg4v2",
4318     .type           = AVMEDIA_TYPE_VIDEO,
4319     .id             = AV_CODEC_ID_MSMPEG4V2,
4320     .priv_data_size = sizeof(MpegEncContext),
4321     .init           = ff_MPV_encode_init,
4322     .encode2        = ff_MPV_encode_picture,
4323     .close          = ff_MPV_encode_end,
4324     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4325     .long_name      = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4326     .priv_class     = &msmpeg4v2_class,
4327 };
4328
4329 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4330
4331 AVCodec ff_msmpeg4v3_encoder = {
4332     .name           = "msmpeg4",
4333     .type           = AVMEDIA_TYPE_VIDEO,
4334     .id             = AV_CODEC_ID_MSMPEG4V3,
4335     .priv_data_size = sizeof(MpegEncContext),
4336     .init           = ff_MPV_encode_init,
4337     .encode2        = ff_MPV_encode_picture,
4338     .close          = ff_MPV_encode_end,
4339     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4340     .long_name      = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4341     .priv_class     = &msmpeg4v3_class,
4342 };
4343
4344 FF_MPV_GENERIC_CLASS(wmv1)
4345
4346 AVCodec ff_wmv1_encoder = {
4347     .name           = "wmv1",
4348     .type           = AVMEDIA_TYPE_VIDEO,
4349     .id             = AV_CODEC_ID_WMV1,
4350     .priv_data_size = sizeof(MpegEncContext),
4351     .init           = ff_MPV_encode_init,
4352     .encode2        = ff_MPV_encode_picture,
4353     .close          = ff_MPV_encode_end,
4354     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4355     .long_name      = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4356     .priv_class     = &wmv1_class,
4357 };