]> git.sesse.net Git - ffmpeg/blob - libavcodec/mpegvideo.c
http: Add encoding/decoding flags to the AVOptions
[ffmpeg] / libavcodec / mpegvideo.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
32 #include "avcodec.h"
33 #include "dsputil.h"
34 #include "internal.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
37 #include "mjpegenc.h"
38 #include "msmpeg4.h"
39 #include "faandct.h"
40 #include "xvmc_internal.h"
41 #include "thread.h"
42 #include <limits.h>
43
44 //#undef NDEBUG
45 //#include <assert.h>
46
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48                                    DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50                                    DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52                                    DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54                                    DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56                                    DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58                                   DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60                                   DCTELEM *block, int n, int qscale);
61
62
63 /* enable all paranoid tests for rounding, overflows, etc... */
64 //#define PARANOID
65
66 //#define DEBUG
67
68
69 static const uint8_t ff_default_chroma_qscale_table[32]={
70 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
71     0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
72 };
73
74 const uint8_t ff_mpeg1_dc_scale_table[128]={
75 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
76     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 };
81
82 static const uint8_t mpeg2_dc_scale_table1[128]={
83 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
84     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 };
89
90 static const uint8_t mpeg2_dc_scale_table2[128]={
91 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
92     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 };
97
98 static const uint8_t mpeg2_dc_scale_table3[128]={
99 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
100     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104 };
105
106 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
107     ff_mpeg1_dc_scale_table,
108     mpeg2_dc_scale_table1,
109     mpeg2_dc_scale_table2,
110     mpeg2_dc_scale_table3,
111 };
112
113 const enum PixelFormat ff_pixfmt_list_420[] = {
114     PIX_FMT_YUV420P,
115     PIX_FMT_NONE
116 };
117
118 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
119     PIX_FMT_DXVA2_VLD,
120     PIX_FMT_VAAPI_VLD,
121     PIX_FMT_YUV420P,
122     PIX_FMT_NONE
123 };
124
125 const uint8_t *avpriv_mpv_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
126     int i;
127
128     assert(p<=end);
129     if(p>=end)
130         return end;
131
132     for(i=0; i<3; i++){
133         uint32_t tmp= *state << 8;
134         *state= tmp + *(p++);
135         if(tmp == 0x100 || p==end)
136             return p;
137     }
138
139     while(p<end){
140         if     (p[-1] > 1      ) p+= 3;
141         else if(p[-2]          ) p+= 2;
142         else if(p[-3]|(p[-1]-1)) p++;
143         else{
144             p++;
145             break;
146         }
147     }
148
149     p= FFMIN(p, end)-4;
150     *state= AV_RB32(p);
151
152     return p+4;
153 }
154
155 /* init common dct for both encoder and decoder */
156 av_cold int ff_dct_common_init(MpegEncContext *s)
157 {
158     dsputil_init(&s->dsp, s->avctx);
159
160     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
161     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
162     s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
163     s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
164     s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
165     if(s->flags & CODEC_FLAG_BITEXACT)
166         s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
167     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
168
169 #if   HAVE_MMX
170     MPV_common_init_mmx(s);
171 #elif ARCH_ALPHA
172     MPV_common_init_axp(s);
173 #elif CONFIG_MLIB
174     MPV_common_init_mlib(s);
175 #elif HAVE_MMI
176     MPV_common_init_mmi(s);
177 #elif ARCH_ARM
178     MPV_common_init_arm(s);
179 #elif HAVE_ALTIVEC
180     MPV_common_init_altivec(s);
181 #elif ARCH_BFIN
182     MPV_common_init_bfin(s);
183 #endif
184
185     /* load & permutate scantables
186        note: only wmv uses different ones
187     */
188     if(s->alternate_scan){
189         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_alternate_vertical_scan);
190         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_alternate_vertical_scan);
191     }else{
192         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_zigzag_direct);
193         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_zigzag_direct);
194     }
195     ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
196     ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
197
198     return 0;
199 }
200
201 void ff_copy_picture(Picture *dst, Picture *src){
202     *dst = *src;
203     dst->f.type= FF_BUFFER_TYPE_COPY;
204 }
205
206 /**
207  * Release a frame buffer
208  */
209 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
210 {
211     /* Windows Media Image codecs allocate internal buffers with different
212        dimensions; ignore user defined callbacks for these */
213     if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
214         ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
215     else
216         avcodec_default_release_buffer(s->avctx, (AVFrame*)pic);
217     av_freep(&pic->f.hwaccel_picture_private);
218 }
219
220 /**
221  * Allocate a frame buffer
222  */
223 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
224 {
225     int r;
226
227     if (s->avctx->hwaccel) {
228         assert(!pic->f.hwaccel_picture_private);
229         if (s->avctx->hwaccel->priv_data_size) {
230             pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
231             if (!pic->f.hwaccel_picture_private) {
232                 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
233                 return -1;
234             }
235         }
236     }
237
238     if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
239         r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
240     else
241         r = avcodec_default_get_buffer(s->avctx, (AVFrame*)pic);
242
243     if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
244         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
245                r, pic->f.age, pic->f.type, pic->f.data[0]);
246         av_freep(&pic->f.hwaccel_picture_private);
247         return -1;
248     }
249
250     if (s->linesize && (s->linesize != pic->f.linesize[0] || s->uvlinesize != pic->f.linesize[1])) {
251         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
252         free_frame_buffer(s, pic);
253         return -1;
254     }
255
256     if (pic->f.linesize[1] != pic->f.linesize[2]) {
257         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
258         free_frame_buffer(s, pic);
259         return -1;
260     }
261
262     return 0;
263 }
264
265 /**
266  * allocates a Picture
267  * The pixels are allocated/set by calling get_buffer() if shared=0
268  */
269 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
270     const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
271     const int mb_array_size= s->mb_stride*s->mb_height;
272     const int b8_array_size= s->b8_stride*s->mb_height*2;
273     const int b4_array_size= s->b4_stride*s->mb_height*4;
274     int i;
275     int r= -1;
276
277     if(shared){
278         assert(pic->f.data[0]);
279         assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
280         pic->f.type = FF_BUFFER_TYPE_SHARED;
281     }else{
282         assert(!pic->f.data[0]);
283
284         if (alloc_frame_buffer(s, pic) < 0)
285             return -1;
286
287         s->linesize   = pic->f.linesize[0];
288         s->uvlinesize = pic->f.linesize[1];
289     }
290
291     if (pic->f.qscale_table == NULL) {
292         if (s->encoding) {
293             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var   , mb_array_size * sizeof(int16_t)  , fail)
294             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t)  , fail)
295             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean  , mb_array_size * sizeof(int8_t )  , fail)
296         }
297
298         FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table, mb_array_size * sizeof(uint8_t) + 2, fail) //the +2 is for the slice end check
299         FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t)  , fail)
300         FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
301         pic->f.mb_type = pic->mb_type_base + 2*s->mb_stride + 1;
302         pic->f.qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1;
303         if(s->out_format == FMT_H264){
304             for(i=0; i<2; i++){
305                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4)  * sizeof(int16_t), fail)
306                 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
307                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
308             }
309             pic->f.motion_subsample_log2 = 2;
310         }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
311             for(i=0; i<2; i++){
312                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
313                 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
314                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
315             }
316             pic->f.motion_subsample_log2 = 3;
317         }
318         if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
319             FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff, 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
320         }
321         pic->f.qstride = s->mb_stride;
322         FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan , 1 * sizeof(AVPanScan), fail)
323     }
324
325     /* It might be nicer if the application would keep track of these
326      * but it would require an API change. */
327     memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
328     s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
329     if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B)
330         pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
331     pic->owner2 = s;
332
333     return 0;
334 fail: //for the FF_ALLOCZ_OR_GOTO macro
335     if(r>=0)
336         free_frame_buffer(s, pic);
337     return -1;
338 }
339
340 /**
341  * deallocates a picture
342  */
343 static void free_picture(MpegEncContext *s, Picture *pic){
344     int i;
345
346     if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
347         free_frame_buffer(s, pic);
348     }
349
350     av_freep(&pic->mb_var);
351     av_freep(&pic->mc_mb_var);
352     av_freep(&pic->mb_mean);
353     av_freep(&pic->f.mbskip_table);
354     av_freep(&pic->qscale_table_base);
355     av_freep(&pic->mb_type_base);
356     av_freep(&pic->f.dct_coeff);
357     av_freep(&pic->f.pan_scan);
358     pic->f.mb_type = NULL;
359     for(i=0; i<2; i++){
360         av_freep(&pic->motion_val_base[i]);
361         av_freep(&pic->f.ref_index[i]);
362     }
363
364     if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
365         for(i=0; i<4; i++){
366             pic->f.base[i] =
367             pic->f.data[i] = NULL;
368         }
369         pic->f.type = 0;
370     }
371 }
372
373 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
374     int y_size = s->b8_stride * (2 * s->mb_height + 1);
375     int c_size = s->mb_stride * (s->mb_height + 1);
376     int yc_size = y_size + 2 * c_size;
377     int i;
378
379     // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
380     FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
381
382      //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
383     FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,  (s->width+64)*4*16*2*sizeof(uint8_t), fail)
384     s->me.temp=         s->me.scratchpad;
385     s->rd_scratchpad=   s->me.scratchpad;
386     s->b_scratchpad=    s->me.scratchpad;
387     s->obmc_scratchpad= s->me.scratchpad + 16;
388     if (s->encoding) {
389         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map      , ME_MAP_SIZE*sizeof(uint32_t), fail)
390         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
391         if(s->avctx->noise_reduction){
392             FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
393         }
394     }
395     FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
396     s->block= s->blocks[0];
397
398     for(i=0;i<12;i++){
399         s->pblocks[i] = &s->block[i];
400     }
401
402     if (s->out_format == FMT_H263) {
403         /* ac values */
404         FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
405         s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
406         s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
407         s->ac_val[2] = s->ac_val[1] + c_size;
408     }
409
410     return 0;
411 fail:
412     return -1; //free() through MPV_common_end()
413 }
414
415 static void free_duplicate_context(MpegEncContext *s){
416     if(s==NULL) return;
417
418     av_freep(&s->edge_emu_buffer);
419     av_freep(&s->me.scratchpad);
420     s->me.temp=
421     s->rd_scratchpad=
422     s->b_scratchpad=
423     s->obmc_scratchpad= NULL;
424
425     av_freep(&s->dct_error_sum);
426     av_freep(&s->me.map);
427     av_freep(&s->me.score_map);
428     av_freep(&s->blocks);
429     av_freep(&s->ac_val_base);
430     s->block= NULL;
431 }
432
433 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
434 #define COPY(a) bak->a= src->a
435     COPY(edge_emu_buffer);
436     COPY(me.scratchpad);
437     COPY(me.temp);
438     COPY(rd_scratchpad);
439     COPY(b_scratchpad);
440     COPY(obmc_scratchpad);
441     COPY(me.map);
442     COPY(me.score_map);
443     COPY(blocks);
444     COPY(block);
445     COPY(start_mb_y);
446     COPY(end_mb_y);
447     COPY(me.map_generation);
448     COPY(pb);
449     COPY(dct_error_sum);
450     COPY(dct_count[0]);
451     COPY(dct_count[1]);
452     COPY(ac_val_base);
453     COPY(ac_val[0]);
454     COPY(ac_val[1]);
455     COPY(ac_val[2]);
456 #undef COPY
457 }
458
459 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
460     MpegEncContext bak;
461     int i;
462     //FIXME copy only needed parts
463 //START_TIMER
464     backup_duplicate_context(&bak, dst);
465     memcpy(dst, src, sizeof(MpegEncContext));
466     backup_duplicate_context(dst, &bak);
467     for(i=0;i<12;i++){
468         dst->pblocks[i] = &dst->block[i];
469     }
470 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
471 }
472
473 int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
474 {
475     MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
476
477     if(dst == src || !s1->context_initialized) return 0;
478
479     //FIXME can parameters change on I-frames? in that case dst may need a reinit
480     if(!s->context_initialized){
481         memcpy(s, s1, sizeof(MpegEncContext));
482
483         s->avctx                 = dst;
484         s->picture_range_start  += MAX_PICTURE_COUNT;
485         s->picture_range_end    += MAX_PICTURE_COUNT;
486         s->bitstream_buffer      = NULL;
487         s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
488
489         MPV_common_init(s);
490     }
491
492     s->avctx->coded_height  = s1->avctx->coded_height;
493     s->avctx->coded_width   = s1->avctx->coded_width;
494     s->avctx->width         = s1->avctx->width;
495     s->avctx->height        = s1->avctx->height;
496
497     s->coded_picture_number = s1->coded_picture_number;
498     s->picture_number       = s1->picture_number;
499     s->input_picture_number = s1->input_picture_number;
500
501     memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
502     memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture);
503
504     s->last_picture_ptr     = REBASE_PICTURE(s1->last_picture_ptr,    s, s1);
505     s->current_picture_ptr  = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
506     s->next_picture_ptr     = REBASE_PICTURE(s1->next_picture_ptr,    s, s1);
507
508     memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
509
510     //Error/bug resilience
511     s->next_p_frame_damaged = s1->next_p_frame_damaged;
512     s->workaround_bugs      = s1->workaround_bugs;
513
514     //MPEG4 timing info
515     memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits);
516
517     //B-frame info
518     s->max_b_frames         = s1->max_b_frames;
519     s->low_delay            = s1->low_delay;
520     s->dropable             = s1->dropable;
521
522     //DivX handling (doesn't work)
523     s->divx_packed          = s1->divx_packed;
524
525     if(s1->bitstream_buffer){
526         if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
527             av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size);
528         s->bitstream_buffer_size  = s1->bitstream_buffer_size;
529         memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size);
530         memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
531     }
532
533     //MPEG2/interlacing info
534     memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence);
535
536     if(!s1->first_field){
537         s->last_pict_type= s1->pict_type;
538         if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
539
540         if (s1->pict_type != AV_PICTURE_TYPE_B) {
541             s->last_non_b_pict_type= s1->pict_type;
542         }
543     }
544
545     return 0;
546 }
547
548 /**
549  * sets the given MpegEncContext to common defaults (same for encoding and decoding).
550  * the changed fields will not depend upon the prior state of the MpegEncContext.
551  */
552 void MPV_common_defaults(MpegEncContext *s){
553     s->y_dc_scale_table=
554     s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
555     s->chroma_qscale_table= ff_default_chroma_qscale_table;
556     s->progressive_frame= 1;
557     s->progressive_sequence= 1;
558     s->picture_structure= PICT_FRAME;
559
560     s->coded_picture_number = 0;
561     s->picture_number = 0;
562     s->input_picture_number = 0;
563
564     s->picture_in_gop_number = 0;
565
566     s->f_code = 1;
567     s->b_code = 1;
568
569     s->picture_range_start = 0;
570     s->picture_range_end = MAX_PICTURE_COUNT;
571 }
572
573 /**
574  * sets the given MpegEncContext to defaults for decoding.
575  * the changed fields will not depend upon the prior state of the MpegEncContext.
576  */
577 void MPV_decode_defaults(MpegEncContext *s){
578     MPV_common_defaults(s);
579 }
580
581 /**
582  * init common structure for both encoder and decoder.
583  * this assumes that some variables like width/height are already set
584  */
585 av_cold int MPV_common_init(MpegEncContext *s)
586 {
587     int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y,
588         threads = (s->encoding ||
589                    (HAVE_THREADS &&
590                     s->avctx->active_thread_type & FF_THREAD_SLICE)) ?
591                   s->avctx->thread_count : 1;
592
593     if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
594         s->mb_height = (s->height + 31) / 32 * 2;
595     else if (s->codec_id != CODEC_ID_H264)
596         s->mb_height = (s->height + 15) / 16;
597
598     if(s->avctx->pix_fmt == PIX_FMT_NONE){
599         av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
600         return -1;
601     }
602
603     if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
604        (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
605         int max_threads = FFMIN(MAX_THREADS, s->mb_height);
606         av_log(s->avctx, AV_LOG_WARNING, "too many threads (%d), reducing to %d\n",
607                s->avctx->thread_count, max_threads);
608         threads = max_threads;
609     }
610
611     if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
612         return -1;
613
614     ff_dct_common_init(s);
615
616     s->flags= s->avctx->flags;
617     s->flags2= s->avctx->flags2;
618
619     if (s->width && s->height) {
620         s->mb_width  = (s->width  + 15) / 16;
621         s->mb_stride = s->mb_width + 1;
622         s->b8_stride = s->mb_width*2 + 1;
623         s->b4_stride = s->mb_width*4 + 1;
624         mb_array_size= s->mb_height * s->mb_stride;
625         mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
626
627         /* set chroma shifts */
628         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
629                                       &(s->chroma_y_shift) );
630
631         /* set default edge pos, will be overriden in decode_header if needed */
632         s->h_edge_pos= s->mb_width*16;
633         s->v_edge_pos= s->mb_height*16;
634
635         s->mb_num = s->mb_width * s->mb_height;
636
637         s->block_wrap[0]=
638         s->block_wrap[1]=
639         s->block_wrap[2]=
640         s->block_wrap[3]= s->b8_stride;
641         s->block_wrap[4]=
642         s->block_wrap[5]= s->mb_stride;
643
644         y_size = s->b8_stride * (2 * s->mb_height + 1);
645         c_size = s->mb_stride * (s->mb_height + 1);
646         yc_size = y_size + 2 * c_size;
647
648         /* convert fourcc to upper case */
649         s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
650
651         s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
652
653         s->avctx->coded_frame= (AVFrame*)&s->current_picture;
654
655         FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
656         for(y=0; y<s->mb_height; y++){
657             for(x=0; x<s->mb_width; x++){
658                 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
659             }
660         }
661         s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
662
663         if (s->encoding) {
664             /* Allocate MV tables */
665             FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base            , mv_table_size * 2 * sizeof(int16_t), fail)
666             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base       , mv_table_size * 2 * sizeof(int16_t), fail)
667             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base       , mv_table_size * 2 * sizeof(int16_t), fail)
668             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
669             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
670             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base     , mv_table_size * 2 * sizeof(int16_t), fail)
671             s->p_mv_table           = s->p_mv_table_base            + s->mb_stride + 1;
672             s->b_forw_mv_table      = s->b_forw_mv_table_base       + s->mb_stride + 1;
673             s->b_back_mv_table      = s->b_back_mv_table_base       + s->mb_stride + 1;
674             s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
675             s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
676             s->b_direct_mv_table    = s->b_direct_mv_table_base     + s->mb_stride + 1;
677
678             if(s->msmpeg4_version){
679                 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
680             }
681             FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
682
683             /* Allocate MB type table */
684             FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type  , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
685
686             FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
687
688             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix  , 64*32   * sizeof(int), fail)
689             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix  , 64*32   * sizeof(int), fail)
690             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
691             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
692             FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
693             FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
694
695             if(s->avctx->noise_reduction){
696                 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
697             }
698         }
699     }
700
701     s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
702     FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
703     for(i = 0; i < s->picture_count; i++) {
704         avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
705     }
706
707     if (s->width && s->height) {
708         FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
709
710         if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
711             /* interlaced direct mode decoding tables */
712             for(i=0; i<2; i++){
713                 int j, k;
714                 for(j=0; j<2; j++){
715                     for(k=0; k<2; k++){
716                         FF_ALLOCZ_OR_GOTO(s->avctx,    s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
717                         s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
718                     }
719                     FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
720                     FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
721                     s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
722                 }
723                 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
724             }
725         }
726         if (s->out_format == FMT_H263) {
727             /* cbp values */
728             FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
729             s->coded_block= s->coded_block_base + s->b8_stride + 1;
730
731             /* cbp, ac_pred, pred_dir */
732             FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table     , mb_array_size * sizeof(uint8_t), fail)
733             FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
734         }
735
736         if (s->h263_pred || s->h263_plus || !s->encoding) {
737             /* dc values */
738             //MN: we need these for error resilience of intra-frames
739             FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
740             s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
741             s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
742             s->dc_val[2] = s->dc_val[1] + c_size;
743             for(i=0;i<yc_size;i++)
744                 s->dc_val_base[i] = 1024;
745         }
746
747         /* which mb is a intra block */
748         FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
749         memset(s->mbintra_table, 1, mb_array_size);
750
751         /* init macroblock skip table */
752         FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
753         //Note the +1 is for a quicker mpeg4 slice_end detection
754         FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
755
756         s->parse_context.state= -1;
757         if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
758             s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
759             s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
760             s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
761         }
762     }
763
764     s->context_initialized = 1;
765     s->thread_context[0]= s;
766
767     if (s->width && s->height) {
768     if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
769         for(i=1; i<threads; i++){
770             s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
771             memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
772         }
773
774         for(i=0; i<threads; i++){
775             if(init_duplicate_context(s->thread_context[i], s) < 0)
776                 goto fail;
777             s->thread_context[i]->start_mb_y= (s->mb_height*(i  ) + s->avctx->thread_count/2) / s->avctx->thread_count;
778             s->thread_context[i]->end_mb_y  = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
779         }
780     } else {
781         if(init_duplicate_context(s, s) < 0) goto fail;
782         s->start_mb_y = 0;
783         s->end_mb_y   = s->mb_height;
784     }
785     }
786
787     return 0;
788  fail:
789     MPV_common_end(s);
790     return -1;
791 }
792
793 /* init common structure for both encoder and decoder */
794 void MPV_common_end(MpegEncContext *s)
795 {
796     int i, j, k;
797
798     if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
799         for(i=0; i<s->avctx->thread_count; i++){
800             free_duplicate_context(s->thread_context[i]);
801         }
802         for(i=1; i<s->avctx->thread_count; i++){
803             av_freep(&s->thread_context[i]);
804         }
805     } else free_duplicate_context(s);
806
807     av_freep(&s->parse_context.buffer);
808     s->parse_context.buffer_size=0;
809
810     av_freep(&s->mb_type);
811     av_freep(&s->p_mv_table_base);
812     av_freep(&s->b_forw_mv_table_base);
813     av_freep(&s->b_back_mv_table_base);
814     av_freep(&s->b_bidir_forw_mv_table_base);
815     av_freep(&s->b_bidir_back_mv_table_base);
816     av_freep(&s->b_direct_mv_table_base);
817     s->p_mv_table= NULL;
818     s->b_forw_mv_table= NULL;
819     s->b_back_mv_table= NULL;
820     s->b_bidir_forw_mv_table= NULL;
821     s->b_bidir_back_mv_table= NULL;
822     s->b_direct_mv_table= NULL;
823     for(i=0; i<2; i++){
824         for(j=0; j<2; j++){
825             for(k=0; k<2; k++){
826                 av_freep(&s->b_field_mv_table_base[i][j][k]);
827                 s->b_field_mv_table[i][j][k]=NULL;
828             }
829             av_freep(&s->b_field_select_table[i][j]);
830             av_freep(&s->p_field_mv_table_base[i][j]);
831             s->p_field_mv_table[i][j]=NULL;
832         }
833         av_freep(&s->p_field_select_table[i]);
834     }
835
836     av_freep(&s->dc_val_base);
837     av_freep(&s->coded_block_base);
838     av_freep(&s->mbintra_table);
839     av_freep(&s->cbp_table);
840     av_freep(&s->pred_dir_table);
841
842     av_freep(&s->mbskip_table);
843     av_freep(&s->prev_pict_types);
844     av_freep(&s->bitstream_buffer);
845     s->allocated_bitstream_buffer_size=0;
846
847     av_freep(&s->avctx->stats_out);
848     av_freep(&s->ac_stats);
849     av_freep(&s->error_status_table);
850     av_freep(&s->mb_index2xy);
851     av_freep(&s->lambda_table);
852     av_freep(&s->q_intra_matrix);
853     av_freep(&s->q_inter_matrix);
854     av_freep(&s->q_intra_matrix16);
855     av_freep(&s->q_inter_matrix16);
856     av_freep(&s->input_picture);
857     av_freep(&s->reordered_input_picture);
858     av_freep(&s->dct_offset);
859
860     if(s->picture && !s->avctx->is_copy){
861         for(i=0; i<s->picture_count; i++){
862             free_picture(s, &s->picture[i]);
863         }
864     }
865     av_freep(&s->picture);
866     s->context_initialized = 0;
867     s->last_picture_ptr=
868     s->next_picture_ptr=
869     s->current_picture_ptr= NULL;
870     s->linesize= s->uvlinesize= 0;
871
872     for(i=0; i<3; i++)
873         av_freep(&s->visualization_buffer[i]);
874
875     if(!(s->avctx->active_thread_type&FF_THREAD_FRAME))
876         avcodec_default_free_buffers(s->avctx);
877 }
878
879 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
880 {
881     int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
882     uint8_t index_run[MAX_RUN+1];
883     int last, run, level, start, end, i;
884
885     /* If table is static, we can quit if rl->max_level[0] is not NULL */
886     if(static_store && rl->max_level[0])
887         return;
888
889     /* compute max_level[], max_run[] and index_run[] */
890     for(last=0;last<2;last++) {
891         if (last == 0) {
892             start = 0;
893             end = rl->last;
894         } else {
895             start = rl->last;
896             end = rl->n;
897         }
898
899         memset(max_level, 0, MAX_RUN + 1);
900         memset(max_run, 0, MAX_LEVEL + 1);
901         memset(index_run, rl->n, MAX_RUN + 1);
902         for(i=start;i<end;i++) {
903             run = rl->table_run[i];
904             level = rl->table_level[i];
905             if (index_run[run] == rl->n)
906                 index_run[run] = i;
907             if (level > max_level[run])
908                 max_level[run] = level;
909             if (run > max_run[level])
910                 max_run[level] = run;
911         }
912         if(static_store)
913             rl->max_level[last] = static_store[last];
914         else
915             rl->max_level[last] = av_malloc(MAX_RUN + 1);
916         memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
917         if(static_store)
918             rl->max_run[last] = static_store[last] + MAX_RUN + 1;
919         else
920             rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
921         memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
922         if(static_store)
923             rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
924         else
925             rl->index_run[last] = av_malloc(MAX_RUN + 1);
926         memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
927     }
928 }
929
930 void init_vlc_rl(RLTable *rl)
931 {
932     int i, q;
933
934     for(q=0; q<32; q++){
935         int qmul= q*2;
936         int qadd= (q-1)|1;
937
938         if(q==0){
939             qmul=1;
940             qadd=0;
941         }
942         for(i=0; i<rl->vlc.table_size; i++){
943             int code= rl->vlc.table[i][0];
944             int len = rl->vlc.table[i][1];
945             int level, run;
946
947             if(len==0){ // illegal code
948                 run= 66;
949                 level= MAX_LEVEL;
950             }else if(len<0){ //more bits needed
951                 run= 0;
952                 level= code;
953             }else{
954                 if(code==rl->n){ //esc
955                     run= 66;
956                     level= 0;
957                 }else{
958                     run=   rl->table_run  [code] + 1;
959                     level= rl->table_level[code] * qmul + qadd;
960                     if(code >= rl->last) run+=192;
961                 }
962             }
963             rl->rl_vlc[q][i].len= len;
964             rl->rl_vlc[q][i].level= level;
965             rl->rl_vlc[q][i].run= run;
966         }
967     }
968 }
969
970 void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
971 {
972     int i;
973
974     /* release non reference frames */
975     for(i=0; i<s->picture_count; i++){
976         if (s->picture[i].f.data[0] && !s->picture[i].f.reference
977            && (!s->picture[i].owner2 || s->picture[i].owner2 == s)
978            && (remove_current || &s->picture[i] != s->current_picture_ptr)
979            /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
980             free_frame_buffer(s, &s->picture[i]);
981         }
982     }
983 }
984
985 int ff_find_unused_picture(MpegEncContext *s, int shared){
986     int i;
987
988     if(shared){
989         for(i=s->picture_range_start; i<s->picture_range_end; i++){
990             if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
991                 return i;
992         }
993     }else{
994         for(i=s->picture_range_start; i<s->picture_range_end; i++){
995             if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
996                 return i; //FIXME
997         }
998         for(i=s->picture_range_start; i<s->picture_range_end; i++){
999             if (s->picture[i].f.data[0] == NULL)
1000                 return i;
1001         }
1002     }
1003
1004     av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
1005     /* We could return -1, but the codec would crash trying to draw into a
1006      * non-existing frame anyway. This is safer than waiting for a random crash.
1007      * Also the return of this is never useful, an encoder must only allocate
1008      * as much as allowed in the specification. This has no relationship to how
1009      * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1010      * enough for such valid streams).
1011      * Plus, a decoder has to check stream validity and remove frames if too
1012      * many reference frames are around. Waiting for "OOM" is not correct at
1013      * all. Similarly, missing reference frames have to be replaced by
1014      * interpolated/MC frames, anything else is a bug in the codec ...
1015      */
1016     abort();
1017     return -1;
1018 }
1019
1020 static void update_noise_reduction(MpegEncContext *s){
1021     int intra, i;
1022
1023     for(intra=0; intra<2; intra++){
1024         if(s->dct_count[intra] > (1<<16)){
1025             for(i=0; i<64; i++){
1026                 s->dct_error_sum[intra][i] >>=1;
1027             }
1028             s->dct_count[intra] >>= 1;
1029         }
1030
1031         for(i=0; i<64; i++){
1032             s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1033         }
1034     }
1035 }
1036
1037 /**
1038  * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1039  */
1040 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1041 {
1042     int i;
1043     Picture *pic;
1044     s->mb_skipped = 0;
1045
1046     assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1047
1048     /* mark&release old frames */
1049     if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.data[0]) {
1050       if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1051           if (s->last_picture_ptr->owner2 == s)
1052               free_frame_buffer(s, s->last_picture_ptr);
1053
1054         /* release forgotten pictures */
1055         /* if(mpeg124/h263) */
1056         if(!s->encoding){
1057             for(i=0; i<s->picture_count; i++){
1058                 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].f.reference) {
1059                     if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1060                         av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1061                     free_frame_buffer(s, &s->picture[i]);
1062                 }
1063             }
1064         }
1065       }
1066     }
1067
1068     if(!s->encoding){
1069         ff_release_unused_pictures(s, 1);
1070
1071         if (s->current_picture_ptr && s->current_picture_ptr->f.data[0] == NULL)
1072             pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
1073         else{
1074             i= ff_find_unused_picture(s, 0);
1075             pic= &s->picture[i];
1076         }
1077
1078         pic->f.reference = 0;
1079         if (!s->dropable){
1080             if (s->codec_id == CODEC_ID_H264)
1081                 pic->f.reference = s->picture_structure;
1082             else if (s->pict_type != AV_PICTURE_TYPE_B)
1083                 pic->f.reference = 3;
1084         }
1085
1086         pic->f.coded_picture_number = s->coded_picture_number++;
1087
1088         if(ff_alloc_picture(s, pic, 0) < 0)
1089             return -1;
1090
1091         s->current_picture_ptr= pic;
1092         //FIXME use only the vars from current_pic
1093         s->current_picture_ptr->f.top_field_first = s->top_field_first;
1094         if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1095             if(s->picture_structure != PICT_FRAME)
1096                 s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1097         }
1098         s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence;
1099         s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1100     }
1101
1102     s->current_picture_ptr->f.pict_type = s->pict_type;
1103 //    if(s->flags && CODEC_FLAG_QSCALE)
1104   //      s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1105     s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1106
1107     ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1108
1109     if (s->pict_type != AV_PICTURE_TYPE_B) {
1110         s->last_picture_ptr= s->next_picture_ptr;
1111         if(!s->dropable)
1112             s->next_picture_ptr= s->current_picture_ptr;
1113     }
1114 /*    av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1115         s->last_picture_ptr    ? s->last_picture_ptr->f.data[0]    : NULL,
1116         s->next_picture_ptr    ? s->next_picture_ptr->f.data[0]    : NULL,
1117         s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1118         s->pict_type, s->dropable);*/
1119
1120     if(s->codec_id != CODEC_ID_H264){
1121         if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.data[0] == NULL) &&
1122            (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
1123             if (s->pict_type != AV_PICTURE_TYPE_I)
1124                 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1125             else if (s->picture_structure != PICT_FRAME)
1126                 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
1127
1128             /* Allocate a dummy frame */
1129             i= ff_find_unused_picture(s, 0);
1130             s->last_picture_ptr= &s->picture[i];
1131             if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1132                 return -1;
1133             ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
1134             ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
1135         }
1136         if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) {
1137             /* Allocate a dummy frame */
1138             i= ff_find_unused_picture(s, 0);
1139             s->next_picture_ptr= &s->picture[i];
1140             if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1141                 return -1;
1142             ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
1143             ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
1144         }
1145     }
1146
1147     if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1148     if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1149
1150     assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.data[0]));
1151
1152     if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1153         int i;
1154         for(i=0; i<4; i++){
1155             if(s->picture_structure == PICT_BOTTOM_FIELD){
1156                  s->current_picture.f.data[i] += s->current_picture.f.linesize[i];
1157             }
1158             s->current_picture.f.linesize[i] *= 2;
1159             s->last_picture.f.linesize[i]    *= 2;
1160             s->next_picture.f.linesize[i]    *= 2;
1161         }
1162     }
1163
1164     s->error_recognition= avctx->error_recognition;
1165
1166     /* set dequantizer, we can't do it during init as it might change for mpeg4
1167        and we can't do it in the header decode as init is not called for mpeg4 there yet */
1168     if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1169         s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1170         s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1171     }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1172         s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1173         s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1174     }else{
1175         s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1176         s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1177     }
1178
1179     if(s->dct_error_sum){
1180         assert(s->avctx->noise_reduction && s->encoding);
1181
1182         update_noise_reduction(s);
1183     }
1184
1185     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1186         return ff_xvmc_field_start(s, avctx);
1187
1188     return 0;
1189 }
1190
1191 /* generic function for encode/decode called after a frame has been coded/decoded */
1192 void MPV_frame_end(MpegEncContext *s)
1193 {
1194     int i;
1195     /* redraw edges for the frame if decoding didn't complete */
1196     //just to make sure that all data is rendered.
1197     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1198         ff_xvmc_field_end(s);
1199    }else if((s->error_count || s->encoding)
1200        && !s->avctx->hwaccel
1201        && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1202        && s->unrestricted_mv
1203        && s->current_picture.f.reference
1204        && !s->intra_only
1205        && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1206             int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1207             int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1208             s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1209                               s->h_edge_pos             , s->v_edge_pos,
1210                               EDGE_WIDTH        , EDGE_WIDTH        , EDGE_TOP | EDGE_BOTTOM);
1211             s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1212                               s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1213                               EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1214             s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1215                               s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1216                               EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1217     }
1218
1219     emms_c();
1220
1221     s->last_pict_type    = s->pict_type;
1222     s->last_lambda_for[s->pict_type] = s->current_picture_ptr->f.quality;
1223     if(s->pict_type!=AV_PICTURE_TYPE_B){
1224         s->last_non_b_pict_type= s->pict_type;
1225     }
1226 #if 0
1227         /* copy back current_picture variables */
1228     for(i=0; i<MAX_PICTURE_COUNT; i++){
1229         if(s->picture[i].f.data[0] == s->current_picture.f.data[0]){
1230             s->picture[i]= s->current_picture;
1231             break;
1232         }
1233     }
1234     assert(i<MAX_PICTURE_COUNT);
1235 #endif
1236
1237     if(s->encoding){
1238         /* release non-reference frames */
1239         for(i=0; i<s->picture_count; i++){
1240             if (s->picture[i].f.data[0] && !s->picture[i].f.reference /*&& s->picture[i].type != FF_BUFFER_TYPE_SHARED*/) {
1241                 free_frame_buffer(s, &s->picture[i]);
1242             }
1243         }
1244     }
1245     // clear copies, to avoid confusion
1246 #if 0
1247     memset(&s->last_picture, 0, sizeof(Picture));
1248     memset(&s->next_picture, 0, sizeof(Picture));
1249     memset(&s->current_picture, 0, sizeof(Picture));
1250 #endif
1251     s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1252
1253     if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1254         ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
1255     }
1256 }
1257
1258 /**
1259  * draws an line from (ex, ey) -> (sx, sy).
1260  * @param w width of the image
1261  * @param h height of the image
1262  * @param stride stride/linesize of the image
1263  * @param color color of the arrow
1264  */
1265 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1266     int x, y, fr, f;
1267
1268     sx= av_clip(sx, 0, w-1);
1269     sy= av_clip(sy, 0, h-1);
1270     ex= av_clip(ex, 0, w-1);
1271     ey= av_clip(ey, 0, h-1);
1272
1273     buf[sy*stride + sx]+= color;
1274
1275     if(FFABS(ex - sx) > FFABS(ey - sy)){
1276         if(sx > ex){
1277             FFSWAP(int, sx, ex);
1278             FFSWAP(int, sy, ey);
1279         }
1280         buf+= sx + sy*stride;
1281         ex-= sx;
1282         f= ((ey-sy)<<16)/ex;
1283         for(x= 0; x <= ex; x++){
1284             y = (x*f)>>16;
1285             fr= (x*f)&0xFFFF;
1286             buf[ y   *stride + x]+= (color*(0x10000-fr))>>16;
1287             buf[(y+1)*stride + x]+= (color*         fr )>>16;
1288         }
1289     }else{
1290         if(sy > ey){
1291             FFSWAP(int, sx, ex);
1292             FFSWAP(int, sy, ey);
1293         }
1294         buf+= sx + sy*stride;
1295         ey-= sy;
1296         if(ey) f= ((ex-sx)<<16)/ey;
1297         else   f= 0;
1298         for(y= 0; y <= ey; y++){
1299             x = (y*f)>>16;
1300             fr= (y*f)&0xFFFF;
1301             buf[y*stride + x  ]+= (color*(0x10000-fr))>>16;
1302             buf[y*stride + x+1]+= (color*         fr )>>16;
1303         }
1304     }
1305 }
1306
1307 /**
1308  * draws an arrow from (ex, ey) -> (sx, sy).
1309  * @param w width of the image
1310  * @param h height of the image
1311  * @param stride stride/linesize of the image
1312  * @param color color of the arrow
1313  */
1314 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1315     int dx,dy;
1316
1317     sx= av_clip(sx, -100, w+100);
1318     sy= av_clip(sy, -100, h+100);
1319     ex= av_clip(ex, -100, w+100);
1320     ey= av_clip(ey, -100, h+100);
1321
1322     dx= ex - sx;
1323     dy= ey - sy;
1324
1325     if(dx*dx + dy*dy > 3*3){
1326         int rx=  dx + dy;
1327         int ry= -dx + dy;
1328         int length= ff_sqrt((rx*rx + ry*ry)<<8);
1329
1330         //FIXME subpixel accuracy
1331         rx= ROUNDED_DIV(rx*3<<4, length);
1332         ry= ROUNDED_DIV(ry*3<<4, length);
1333
1334         draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1335         draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1336     }
1337     draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1338 }
1339
1340 /**
1341  * prints debuging info for the given picture.
1342  */
1343 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1344
1345     if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1346
1347     if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1348         int x,y;
1349
1350         av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1351         switch (pict->pict_type) {
1352             case AV_PICTURE_TYPE_I: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1353             case AV_PICTURE_TYPE_P: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1354             case AV_PICTURE_TYPE_B: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1355             case AV_PICTURE_TYPE_S: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1356             case AV_PICTURE_TYPE_SI: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1357             case AV_PICTURE_TYPE_SP: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1358         }
1359         for(y=0; y<s->mb_height; y++){
1360             for(x=0; x<s->mb_width; x++){
1361                 if(s->avctx->debug&FF_DEBUG_SKIP){
1362                     int count= s->mbskip_table[x + y*s->mb_stride];
1363                     if(count>9) count=9;
1364                     av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1365                 }
1366                 if(s->avctx->debug&FF_DEBUG_QP){
1367                     av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1368                 }
1369                 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1370                     int mb_type= pict->mb_type[x + y*s->mb_stride];
1371                     //Type & MV direction
1372                     if(IS_PCM(mb_type))
1373                         av_log(s->avctx, AV_LOG_DEBUG, "P");
1374                     else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1375                         av_log(s->avctx, AV_LOG_DEBUG, "A");
1376                     else if(IS_INTRA4x4(mb_type))
1377                         av_log(s->avctx, AV_LOG_DEBUG, "i");
1378                     else if(IS_INTRA16x16(mb_type))
1379                         av_log(s->avctx, AV_LOG_DEBUG, "I");
1380                     else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1381                         av_log(s->avctx, AV_LOG_DEBUG, "d");
1382                     else if(IS_DIRECT(mb_type))
1383                         av_log(s->avctx, AV_LOG_DEBUG, "D");
1384                     else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1385                         av_log(s->avctx, AV_LOG_DEBUG, "g");
1386                     else if(IS_GMC(mb_type))
1387                         av_log(s->avctx, AV_LOG_DEBUG, "G");
1388                     else if(IS_SKIP(mb_type))
1389                         av_log(s->avctx, AV_LOG_DEBUG, "S");
1390                     else if(!USES_LIST(mb_type, 1))
1391                         av_log(s->avctx, AV_LOG_DEBUG, ">");
1392                     else if(!USES_LIST(mb_type, 0))
1393                         av_log(s->avctx, AV_LOG_DEBUG, "<");
1394                     else{
1395                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1396                         av_log(s->avctx, AV_LOG_DEBUG, "X");
1397                     }
1398
1399                     //segmentation
1400                     if(IS_8X8(mb_type))
1401                         av_log(s->avctx, AV_LOG_DEBUG, "+");
1402                     else if(IS_16X8(mb_type))
1403                         av_log(s->avctx, AV_LOG_DEBUG, "-");
1404                     else if(IS_8X16(mb_type))
1405                         av_log(s->avctx, AV_LOG_DEBUG, "|");
1406                     else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1407                         av_log(s->avctx, AV_LOG_DEBUG, " ");
1408                     else
1409                         av_log(s->avctx, AV_LOG_DEBUG, "?");
1410
1411
1412                     if(IS_INTERLACED(mb_type))
1413                         av_log(s->avctx, AV_LOG_DEBUG, "=");
1414                     else
1415                         av_log(s->avctx, AV_LOG_DEBUG, " ");
1416                 }
1417 //                av_log(s->avctx, AV_LOG_DEBUG, " ");
1418             }
1419             av_log(s->avctx, AV_LOG_DEBUG, "\n");
1420         }
1421     }
1422
1423     if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1424         const int shift= 1 + s->quarter_sample;
1425         int mb_y;
1426         uint8_t *ptr;
1427         int i;
1428         int h_chroma_shift, v_chroma_shift, block_height;
1429         const int width = s->avctx->width;
1430         const int height= s->avctx->height;
1431         const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1432         const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1433         s->low_delay=0; //needed to see the vectors without trashing the buffers
1434
1435         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1436         for(i=0; i<3; i++){
1437             memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1438             pict->data[i]= s->visualization_buffer[i];
1439         }
1440         pict->type= FF_BUFFER_TYPE_COPY;
1441         ptr= pict->data[0];
1442         block_height = 16>>v_chroma_shift;
1443
1444         for(mb_y=0; mb_y<s->mb_height; mb_y++){
1445             int mb_x;
1446             for(mb_x=0; mb_x<s->mb_width; mb_x++){
1447                 const int mb_index= mb_x + mb_y*s->mb_stride;
1448                 if((s->avctx->debug_mv) && pict->motion_val){
1449                   int type;
1450                   for(type=0; type<3; type++){
1451                     int direction = 0;
1452                     switch (type) {
1453                       case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
1454                                 continue;
1455                               direction = 0;
1456                               break;
1457                       case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1458                                 continue;
1459                               direction = 0;
1460                               break;
1461                       case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1462                                 continue;
1463                               direction = 1;
1464                               break;
1465                     }
1466                     if(!USES_LIST(pict->mb_type[mb_index], direction))
1467                         continue;
1468
1469                     if(IS_8X8(pict->mb_type[mb_index])){
1470                       int i;
1471                       for(i=0; i<4; i++){
1472                         int sx= mb_x*16 + 4 + 8*(i&1);
1473                         int sy= mb_y*16 + 4 + 8*(i>>1);
1474                         int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1475                         int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1476                         int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1477                         draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1478                       }
1479                     }else if(IS_16X8(pict->mb_type[mb_index])){
1480                       int i;
1481                       for(i=0; i<2; i++){
1482                         int sx=mb_x*16 + 8;
1483                         int sy=mb_y*16 + 4 + 8*i;
1484                         int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1485                         int mx=(pict->motion_val[direction][xy][0]>>shift);
1486                         int my=(pict->motion_val[direction][xy][1]>>shift);
1487
1488                         if(IS_INTERLACED(pict->mb_type[mb_index]))
1489                             my*=2;
1490
1491                         draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1492                       }
1493                     }else if(IS_8X16(pict->mb_type[mb_index])){
1494                       int i;
1495                       for(i=0; i<2; i++){
1496                         int sx=mb_x*16 + 4 + 8*i;
1497                         int sy=mb_y*16 + 8;
1498                         int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1499                         int mx=(pict->motion_val[direction][xy][0]>>shift);
1500                         int my=(pict->motion_val[direction][xy][1]>>shift);
1501
1502                         if(IS_INTERLACED(pict->mb_type[mb_index]))
1503                             my*=2;
1504
1505                         draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1506                       }
1507                     }else{
1508                       int sx= mb_x*16 + 8;
1509                       int sy= mb_y*16 + 8;
1510                       int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1511                       int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1512                       int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1513                       draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1514                     }
1515                   }
1516                 }
1517                 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1518                     uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1519                     int y;
1520                     for(y=0; y<block_height; y++){
1521                         *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1522                         *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1523                     }
1524                 }
1525                 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1526                     int mb_type= pict->mb_type[mb_index];
1527                     uint64_t u,v;
1528                     int y;
1529 #define COLOR(theta, r)\
1530 u= (int)(128 + r*cos(theta*3.141592/180));\
1531 v= (int)(128 + r*sin(theta*3.141592/180));
1532
1533
1534                     u=v=128;
1535                     if(IS_PCM(mb_type)){
1536                         COLOR(120,48)
1537                     }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1538                         COLOR(30,48)
1539                     }else if(IS_INTRA4x4(mb_type)){
1540                         COLOR(90,48)
1541                     }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1542 //                        COLOR(120,48)
1543                     }else if(IS_DIRECT(mb_type)){
1544                         COLOR(150,48)
1545                     }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1546                         COLOR(170,48)
1547                     }else if(IS_GMC(mb_type)){
1548                         COLOR(190,48)
1549                     }else if(IS_SKIP(mb_type)){
1550 //                        COLOR(180,48)
1551                     }else if(!USES_LIST(mb_type, 1)){
1552                         COLOR(240,48)
1553                     }else if(!USES_LIST(mb_type, 0)){
1554                         COLOR(0,48)
1555                     }else{
1556                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1557                         COLOR(300,48)
1558                     }
1559
1560                     u*= 0x0101010101010101ULL;
1561                     v*= 0x0101010101010101ULL;
1562                     for(y=0; y<block_height; y++){
1563                         *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1564                         *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1565                     }
1566
1567                     //segmentation
1568                     if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1569                         *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1570                         *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1571                     }
1572                     if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1573                         for(y=0; y<16; y++)
1574                             pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1575                     }
1576                     if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1577                         int dm= 1 << (mv_sample_log2-2);
1578                         for(i=0; i<4; i++){
1579                             int sx= mb_x*16 + 8*(i&1);
1580                             int sy= mb_y*16 + 8*(i>>1);
1581                             int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1582                             //FIXME bidir
1583                             int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1584                             if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1585                                 for(y=0; y<8; y++)
1586                                     pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1587                             if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1588                                 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1589                         }
1590                     }
1591
1592                     if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1593                         // hmm
1594                     }
1595                 }
1596                 s->mbskip_table[mb_index]=0;
1597             }
1598         }
1599     }
1600 }
1601
1602 static inline int hpel_motion_lowres(MpegEncContext *s,
1603                                   uint8_t *dest, uint8_t *src,
1604                                   int field_based, int field_select,
1605                                   int src_x, int src_y,
1606                                   int width, int height, int stride,
1607                                   int h_edge_pos, int v_edge_pos,
1608                                   int w, int h, h264_chroma_mc_func *pix_op,
1609                                   int motion_x, int motion_y)
1610 {
1611     const int lowres= s->avctx->lowres;
1612     const int op_index= FFMIN(lowres, 2);
1613     const int s_mask= (2<<lowres)-1;
1614     int emu=0;
1615     int sx, sy;
1616
1617     if(s->quarter_sample){
1618         motion_x/=2;
1619         motion_y/=2;
1620     }
1621
1622     sx= motion_x & s_mask;
1623     sy= motion_y & s_mask;
1624     src_x += motion_x >> (lowres+1);
1625     src_y += motion_y >> (lowres+1);
1626
1627     src += src_y * stride + src_x;
1628
1629     if(   (unsigned)src_x > h_edge_pos                 - (!!sx) - w
1630        || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1631         s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1632                             src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1633         src= s->edge_emu_buffer;
1634         emu=1;
1635     }
1636
1637     sx= (sx << 2) >> lowres;
1638     sy= (sy << 2) >> lowres;
1639     if(field_select)
1640         src += s->linesize;
1641     pix_op[op_index](dest, src, stride, h, sx, sy);
1642     return emu;
1643 }
1644
1645 /* apply one mpeg motion vector to the three components */
1646 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1647                                uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1648                                int field_based, int bottom_field, int field_select,
1649                                uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1650                                int motion_x, int motion_y, int h, int mb_y)
1651 {
1652     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1653     int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1654     const int lowres= s->avctx->lowres;
1655     const int op_index= FFMIN(lowres, 2);
1656     const int block_s= 8>>lowres;
1657     const int s_mask= (2<<lowres)-1;
1658     const int h_edge_pos = s->h_edge_pos >> lowres;
1659     const int v_edge_pos = s->v_edge_pos >> lowres;
1660     linesize   = s->current_picture.f.linesize[0] << field_based;
1661     uvlinesize = s->current_picture.f.linesize[1] << field_based;
1662
1663     if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1664         motion_x/=2;
1665         motion_y/=2;
1666     }
1667
1668     if(field_based){
1669         motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1670     }
1671
1672     sx= motion_x & s_mask;
1673     sy= motion_y & s_mask;
1674     src_x = s->mb_x*2*block_s               + (motion_x >> (lowres+1));
1675     src_y =(   mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1676
1677     if (s->out_format == FMT_H263) {
1678         uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1679         uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1680         uvsrc_x = src_x>>1;
1681         uvsrc_y = src_y>>1;
1682     }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1683         mx = motion_x / 4;
1684         my = motion_y / 4;
1685         uvsx = (2*mx) & s_mask;
1686         uvsy = (2*my) & s_mask;
1687         uvsrc_x = s->mb_x*block_s               + (mx >> lowres);
1688         uvsrc_y =    mb_y*block_s               + (my >> lowres);
1689     } else {
1690         mx = motion_x / 2;
1691         my = motion_y / 2;
1692         uvsx = mx & s_mask;
1693         uvsy = my & s_mask;
1694         uvsrc_x = s->mb_x*block_s               + (mx >> (lowres+1));
1695         uvsrc_y =(   mb_y*block_s>>field_based) + (my >> (lowres+1));
1696     }
1697
1698     ptr_y  = ref_picture[0] + src_y * linesize + src_x;
1699     ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1700     ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1701
1702     if(   (unsigned)src_x > h_edge_pos                 - (!!sx) - 2*block_s
1703        || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1704             s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1705                              src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1706             ptr_y = s->edge_emu_buffer;
1707             if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1708                 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1709                 s->dsp.emulated_edge_mc(uvbuf  , ptr_cb, s->uvlinesize, 9, 9+field_based,
1710                                  uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1711                 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1712                                  uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1713                 ptr_cb= uvbuf;
1714                 ptr_cr= uvbuf+16;
1715             }
1716     }
1717
1718     if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
1719         dest_y += s->linesize;
1720         dest_cb+= s->uvlinesize;
1721         dest_cr+= s->uvlinesize;
1722     }
1723
1724     if(field_select){
1725         ptr_y += s->linesize;
1726         ptr_cb+= s->uvlinesize;
1727         ptr_cr+= s->uvlinesize;
1728     }
1729
1730     sx= (sx << 2) >> lowres;
1731     sy= (sy << 2) >> lowres;
1732     pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1733
1734     if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1735         uvsx= (uvsx << 2) >> lowres;
1736         uvsy= (uvsy << 2) >> lowres;
1737         pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1738         pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1739     }
1740     //FIXME h261 lowres loop filter
1741 }
1742
1743 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1744                                      uint8_t *dest_cb, uint8_t *dest_cr,
1745                                      uint8_t **ref_picture,
1746                                      h264_chroma_mc_func *pix_op,
1747                                      int mx, int my){
1748     const int lowres= s->avctx->lowres;
1749     const int op_index= FFMIN(lowres, 2);
1750     const int block_s= 8>>lowres;
1751     const int s_mask= (2<<lowres)-1;
1752     const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1753     const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1754     int emu=0, src_x, src_y, offset, sx, sy;
1755     uint8_t *ptr;
1756
1757     if(s->quarter_sample){
1758         mx/=2;
1759         my/=2;
1760     }
1761
1762     /* In case of 8X8, we construct a single chroma motion vector
1763        with a special rounding */
1764     mx= ff_h263_round_chroma(mx);
1765     my= ff_h263_round_chroma(my);
1766
1767     sx= mx & s_mask;
1768     sy= my & s_mask;
1769     src_x = s->mb_x*block_s + (mx >> (lowres+1));
1770     src_y = s->mb_y*block_s + (my >> (lowres+1));
1771
1772     offset = src_y * s->uvlinesize + src_x;
1773     ptr = ref_picture[1] + offset;
1774     if(s->flags&CODEC_FLAG_EMU_EDGE){
1775         if(   (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1776            || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1777             s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1778             ptr= s->edge_emu_buffer;
1779             emu=1;
1780         }
1781     }
1782     sx= (sx << 2) >> lowres;
1783     sy= (sy << 2) >> lowres;
1784     pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1785
1786     ptr = ref_picture[2] + offset;
1787     if(emu){
1788         s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1789         ptr= s->edge_emu_buffer;
1790     }
1791     pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1792 }
1793
1794 /**
1795  * motion compensation of a single macroblock
1796  * @param s context
1797  * @param dest_y luma destination pointer
1798  * @param dest_cb chroma cb/u destination pointer
1799  * @param dest_cr chroma cr/v destination pointer
1800  * @param dir direction (0->forward, 1->backward)
1801  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1802  * @param pix_op halfpel motion compensation function (average or put normally)
1803  * the motion vectors are taken from s->mv and the MV type from s->mv_type
1804  */
1805 static inline void MPV_motion_lowres(MpegEncContext *s,
1806                               uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1807                               int dir, uint8_t **ref_picture,
1808                               h264_chroma_mc_func *pix_op)
1809 {
1810     int mx, my;
1811     int mb_x, mb_y, i;
1812     const int lowres= s->avctx->lowres;
1813     const int block_s= 8>>lowres;
1814
1815     mb_x = s->mb_x;
1816     mb_y = s->mb_y;
1817
1818     switch(s->mv_type) {
1819     case MV_TYPE_16X16:
1820         mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1821                     0, 0, 0,
1822                     ref_picture, pix_op,
1823                     s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1824         break;
1825     case MV_TYPE_8X8:
1826         mx = 0;
1827         my = 0;
1828             for(i=0;i<4;i++) {
1829                 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1830                             ref_picture[0], 0, 0,
1831                             (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1832                             s->width, s->height, s->linesize,
1833                             s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1834                             block_s, block_s, pix_op,
1835                             s->mv[dir][i][0], s->mv[dir][i][1]);
1836
1837                 mx += s->mv[dir][i][0];
1838                 my += s->mv[dir][i][1];
1839             }
1840
1841         if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1842             chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1843         break;
1844     case MV_TYPE_FIELD:
1845         if (s->picture_structure == PICT_FRAME) {
1846             /* top field */
1847             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1848                         1, 0, s->field_select[dir][0],
1849                         ref_picture, pix_op,
1850                         s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1851             /* bottom field */
1852             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1853                         1, 1, s->field_select[dir][1],
1854                         ref_picture, pix_op,
1855                         s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1856         } else {
1857             if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
1858                 ref_picture = s->current_picture_ptr->f.data;
1859             }
1860
1861             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1862                         0, 0, s->field_select[dir][0],
1863                         ref_picture, pix_op,
1864                         s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1865         }
1866         break;
1867     case MV_TYPE_16X8:
1868         for(i=0; i<2; i++){
1869             uint8_t ** ref2picture;
1870
1871             if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
1872                 ref2picture= ref_picture;
1873             }else{
1874                 ref2picture = s->current_picture_ptr->f.data;
1875             }
1876
1877             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1878                         0, 0, s->field_select[dir][i],
1879                         ref2picture, pix_op,
1880                         s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1881
1882             dest_y += 2*block_s*s->linesize;
1883             dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1884             dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1885         }
1886         break;
1887     case MV_TYPE_DMV:
1888         if(s->picture_structure == PICT_FRAME){
1889             for(i=0; i<2; i++){
1890                 int j;
1891                 for(j=0; j<2; j++){
1892                     mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1893                                 1, j, j^i,
1894                                 ref_picture, pix_op,
1895                                 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1896                 }
1897                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1898             }
1899         }else{
1900             for(i=0; i<2; i++){
1901                 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1902                             0, 0, s->picture_structure != i+1,
1903                             ref_picture, pix_op,
1904                             s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1905
1906                 // after put we make avg of the same block
1907                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1908
1909                 //opposite parity is always in the same frame if this is second field
1910                 if(!s->first_field){
1911                     ref_picture = s->current_picture_ptr->f.data;
1912                 }
1913             }
1914         }
1915     break;
1916     default: assert(0);
1917     }
1918 }
1919
1920 /**
1921  * find the lowest MB row referenced in the MVs
1922  */
1923 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1924 {
1925     int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1926     int my, off, i, mvs;
1927
1928     if (s->picture_structure != PICT_FRAME) goto unhandled;
1929
1930     switch (s->mv_type) {
1931         case MV_TYPE_16X16:
1932             mvs = 1;
1933             break;
1934         case MV_TYPE_16X8:
1935             mvs = 2;
1936             break;
1937         case MV_TYPE_8X8:
1938             mvs = 4;
1939             break;
1940         default:
1941             goto unhandled;
1942     }
1943
1944     for (i = 0; i < mvs; i++) {
1945         my = s->mv[dir][i][1]<<qpel_shift;
1946         my_max = FFMAX(my_max, my);
1947         my_min = FFMIN(my_min, my);
1948     }
1949
1950     off = (FFMAX(-my_min, my_max) + 63) >> 6;
1951
1952     return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1953 unhandled:
1954     return s->mb_height-1;
1955 }
1956
1957 /* put block[] to dest[] */
1958 static inline void put_dct(MpegEncContext *s,
1959                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1960 {
1961     s->dct_unquantize_intra(s, block, i, qscale);
1962     s->dsp.idct_put (dest, line_size, block);
1963 }
1964
1965 /* add block[] to dest[] */
1966 static inline void add_dct(MpegEncContext *s,
1967                            DCTELEM *block, int i, uint8_t *dest, int line_size)
1968 {
1969     if (s->block_last_index[i] >= 0) {
1970         s->dsp.idct_add (dest, line_size, block);
1971     }
1972 }
1973
1974 static inline void add_dequant_dct(MpegEncContext *s,
1975                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1976 {
1977     if (s->block_last_index[i] >= 0) {
1978         s->dct_unquantize_inter(s, block, i, qscale);
1979
1980         s->dsp.idct_add (dest, line_size, block);
1981     }
1982 }
1983
1984 /**
1985  * cleans dc, ac, coded_block for the current non intra MB
1986  */
1987 void ff_clean_intra_table_entries(MpegEncContext *s)
1988 {
1989     int wrap = s->b8_stride;
1990     int xy = s->block_index[0];
1991
1992     s->dc_val[0][xy           ] =
1993     s->dc_val[0][xy + 1       ] =
1994     s->dc_val[0][xy     + wrap] =
1995     s->dc_val[0][xy + 1 + wrap] = 1024;
1996     /* ac pred */
1997     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
1998     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1999     if (s->msmpeg4_version>=3) {
2000         s->coded_block[xy           ] =
2001         s->coded_block[xy + 1       ] =
2002         s->coded_block[xy     + wrap] =
2003         s->coded_block[xy + 1 + wrap] = 0;
2004     }
2005     /* chroma */
2006     wrap = s->mb_stride;
2007     xy = s->mb_x + s->mb_y * wrap;
2008     s->dc_val[1][xy] =
2009     s->dc_val[2][xy] = 1024;
2010     /* ac pred */
2011     memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2012     memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2013
2014     s->mbintra_table[xy]= 0;
2015 }
2016
2017 /* generic function called after a macroblock has been parsed by the
2018    decoder or after it has been encoded by the encoder.
2019
2020    Important variables used:
2021    s->mb_intra : true if intra macroblock
2022    s->mv_dir   : motion vector direction
2023    s->mv_type  : motion vector type
2024    s->mv       : motion vector
2025    s->interlaced_dct : true if interlaced dct used (mpeg2)
2026  */
2027 static av_always_inline
2028 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2029                             int lowres_flag, int is_mpeg12)
2030 {
2031     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2032     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2033         ff_xvmc_decode_mb(s);//xvmc uses pblocks
2034         return;
2035     }
2036
2037     if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2038        /* save DCT coefficients */
2039        int i,j;
2040        DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2041        av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2042        for(i=0; i<6; i++){
2043            for(j=0; j<64; j++){
2044                *dct++ = block[i][s->dsp.idct_permutation[j]];
2045                av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2046            }
2047            av_log(s->avctx, AV_LOG_DEBUG, "\n");
2048        }
2049     }
2050
2051     s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2052
2053     /* update DC predictors for P macroblocks */
2054     if (!s->mb_intra) {
2055         if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2056             if(s->mbintra_table[mb_xy])
2057                 ff_clean_intra_table_entries(s);
2058         } else {
2059             s->last_dc[0] =
2060             s->last_dc[1] =
2061             s->last_dc[2] = 128 << s->intra_dc_precision;
2062         }
2063     }
2064     else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2065         s->mbintra_table[mb_xy]=1;
2066
2067     if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2068         uint8_t *dest_y, *dest_cb, *dest_cr;
2069         int dct_linesize, dct_offset;
2070         op_pixels_func (*op_pix)[4];
2071         qpel_mc_func (*op_qpix)[16];
2072         const int linesize   = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2073         const int uvlinesize = s->current_picture.f.linesize[1];
2074         const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2075         const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2076
2077         /* avoid copy if macroblock skipped in last frame too */
2078         /* skip only during decoding as we might trash the buffers during encoding a bit */
2079         if(!s->encoding){
2080             uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2081             const int age = s->current_picture.f.age;
2082
2083             assert(age);
2084
2085             if (s->mb_skipped) {
2086                 s->mb_skipped= 0;
2087                 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2088
2089                 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
2090                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2091
2092                 /* if previous was skipped too, then nothing to do !  */
2093                 if (*mbskip_ptr >= age && s->current_picture.f.reference){
2094                     return;
2095                 }
2096             } else if(!s->current_picture.f.reference) {
2097                 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2098                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2099             } else{
2100                 *mbskip_ptr = 0; /* not skipped */
2101             }
2102         }
2103
2104         dct_linesize = linesize << s->interlaced_dct;
2105         dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
2106
2107         if(readable){
2108             dest_y=  s->dest[0];
2109             dest_cb= s->dest[1];
2110             dest_cr= s->dest[2];
2111         }else{
2112             dest_y = s->b_scratchpad;
2113             dest_cb= s->b_scratchpad+16*linesize;
2114             dest_cr= s->b_scratchpad+32*linesize;
2115         }
2116
2117         if (!s->mb_intra) {
2118             /* motion handling */
2119             /* decoding or more than one mb_type (MC was already done otherwise) */
2120             if(!s->encoding){
2121
2122                 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2123                     if (s->mv_dir & MV_DIR_FORWARD) {
2124                         ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
2125                     }
2126                     if (s->mv_dir & MV_DIR_BACKWARD) {
2127                         ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
2128                     }
2129                 }
2130
2131                 if(lowres_flag){
2132                     h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2133
2134                     if (s->mv_dir & MV_DIR_FORWARD) {
2135                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2136                         op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2137                     }
2138                     if (s->mv_dir & MV_DIR_BACKWARD) {
2139                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2140                     }
2141                 }else{
2142                     op_qpix= s->me.qpel_put;
2143                     if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2144                         op_pix = s->dsp.put_pixels_tab;
2145                     }else{
2146                         op_pix = s->dsp.put_no_rnd_pixels_tab;
2147                     }
2148                     if (s->mv_dir & MV_DIR_FORWARD) {
2149                         MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2150                         op_pix = s->dsp.avg_pixels_tab;
2151                         op_qpix= s->me.qpel_avg;
2152                     }
2153                     if (s->mv_dir & MV_DIR_BACKWARD) {
2154                         MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2155                     }
2156                 }
2157             }
2158
2159             /* skip dequant / idct if we are really late ;) */
2160             if(s->avctx->skip_idct){
2161                 if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2162                    ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2163                    || s->avctx->skip_idct >= AVDISCARD_ALL)
2164                     goto skip_idct;
2165             }
2166
2167             /* add dct residue */
2168             if(s->encoding || !(   s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2169                                 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2170                 add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
2171                 add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
2172                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
2173                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2174
2175                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2176                     if (s->chroma_y_shift){
2177                         add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2178                         add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2179                     }else{
2180                         dct_linesize >>= 1;
2181                         dct_offset >>=1;
2182                         add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
2183                         add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
2184                         add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2185                         add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2186                     }
2187                 }
2188             } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2189                 add_dct(s, block[0], 0, dest_y                          , dct_linesize);
2190                 add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
2191                 add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
2192                 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2193
2194                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2195                     if(s->chroma_y_shift){//Chroma420
2196                         add_dct(s, block[4], 4, dest_cb, uvlinesize);
2197                         add_dct(s, block[5], 5, dest_cr, uvlinesize);
2198                     }else{
2199                         //chroma422
2200                         dct_linesize = uvlinesize << s->interlaced_dct;
2201                         dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2202
2203                         add_dct(s, block[4], 4, dest_cb, dct_linesize);
2204                         add_dct(s, block[5], 5, dest_cr, dct_linesize);
2205                         add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2206                         add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2207                         if(!s->chroma_x_shift){//Chroma444
2208                             add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2209                             add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2210                             add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2211                             add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2212                         }
2213                     }
2214                 }//fi gray
2215             }
2216             else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2217                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2218             }
2219         } else {
2220             /* dct only in intra block */
2221             if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2222                 put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
2223                 put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
2224                 put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
2225                 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2226
2227                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2228                     if(s->chroma_y_shift){
2229                         put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2230                         put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2231                     }else{
2232                         dct_offset >>=1;
2233                         dct_linesize >>=1;
2234                         put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
2235                         put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
2236                         put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2237                         put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2238                     }
2239                 }
2240             }else{
2241                 s->dsp.idct_put(dest_y                          , dct_linesize, block[0]);
2242                 s->dsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
2243                 s->dsp.idct_put(dest_y + dct_offset             , dct_linesize, block[2]);
2244                 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2245
2246                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2247                     if(s->chroma_y_shift){
2248                         s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2249                         s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2250                     }else{
2251
2252                         dct_linesize = uvlinesize << s->interlaced_dct;
2253                         dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2254
2255                         s->dsp.idct_put(dest_cb,              dct_linesize, block[4]);
2256                         s->dsp.idct_put(dest_cr,              dct_linesize, block[5]);
2257                         s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2258                         s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2259                         if(!s->chroma_x_shift){//Chroma444
2260                             s->dsp.idct_put(dest_cb + 8,              dct_linesize, block[8]);
2261                             s->dsp.idct_put(dest_cr + 8,              dct_linesize, block[9]);
2262                             s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2263                             s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2264                         }
2265                     }
2266                 }//gray
2267             }
2268         }
2269 skip_idct:
2270         if(!readable){
2271             s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y ,   linesize,16);
2272             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2273             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2274         }
2275     }
2276 }
2277
2278 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2279 #if !CONFIG_SMALL
2280     if(s->out_format == FMT_MPEG1) {
2281         if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2282         else                 MPV_decode_mb_internal(s, block, 0, 1);
2283     } else
2284 #endif
2285     if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2286     else                  MPV_decode_mb_internal(s, block, 0, 0);
2287 }
2288
2289 /**
2290  *
2291  * @param h is the normal height, this will be reduced automatically if needed for the last row
2292  */
2293 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2294     const int field_pic= s->picture_structure != PICT_FRAME;
2295     if(field_pic){
2296         h <<= 1;
2297         y <<= 1;
2298     }
2299
2300     if (!s->avctx->hwaccel
2301        && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2302        && s->unrestricted_mv
2303        && s->current_picture.f.reference
2304        && !s->intra_only
2305        && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2306         int sides = 0, edge_h;
2307         int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2308         int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2309         if (y==0) sides |= EDGE_TOP;
2310         if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2311
2312         edge_h= FFMIN(h, s->v_edge_pos - y);
2313
2314         s->dsp.draw_edges(s->current_picture_ptr->f.data[0] +  y         *s->linesize,
2315                           s->linesize,           s->h_edge_pos,         edge_h,
2316                           EDGE_WIDTH,            EDGE_WIDTH,            sides);
2317         s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2318                           s->uvlinesize,         s->h_edge_pos>>hshift, edge_h>>vshift,
2319                           EDGE_WIDTH>>hshift,    EDGE_WIDTH>>vshift,    sides);
2320         s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2321                           s->uvlinesize,         s->h_edge_pos>>hshift, edge_h>>vshift,
2322                           EDGE_WIDTH>>hshift,    EDGE_WIDTH>>vshift,    sides);
2323     }
2324
2325     h= FFMIN(h, s->avctx->height - y);
2326
2327     if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2328
2329     if (s->avctx->draw_horiz_band) {
2330         AVFrame *src;
2331         int offset[4];
2332
2333         if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2334             src= (AVFrame*)s->current_picture_ptr;
2335         else if(s->last_picture_ptr)
2336             src= (AVFrame*)s->last_picture_ptr;
2337         else
2338             return;
2339
2340         if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2341             offset[0]=
2342             offset[1]=
2343             offset[2]=
2344             offset[3]= 0;
2345         }else{
2346             offset[0]= y * s->linesize;
2347             offset[1]=
2348             offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2349             offset[3]= 0;
2350         }
2351
2352         emms_c();
2353
2354         s->avctx->draw_horiz_band(s->avctx, src, offset,
2355                                   y, s->picture_structure, h);
2356     }
2357 }
2358
2359 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2360     const int linesize   = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2361     const int uvlinesize = s->current_picture.f.linesize[1];
2362     const int mb_size= 4 - s->avctx->lowres;
2363
2364     s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
2365     s->block_index[1]= s->b8_stride*(s->mb_y*2    ) - 1 + s->mb_x*2;
2366     s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2367     s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2368     s->block_index[4]= s->mb_stride*(s->mb_y + 1)                + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2369     s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2370     //block_index is not used by mpeg2, so it is not affected by chroma_format
2371
2372     s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) <<  mb_size);
2373     s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2374     s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2375
2376     if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2377     {
2378         if(s->picture_structure==PICT_FRAME){
2379         s->dest[0] += s->mb_y *   linesize << mb_size;
2380         s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2381         s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2382         }else{
2383             s->dest[0] += (s->mb_y>>1) *   linesize << mb_size;
2384             s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2385             s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2386             assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2387         }
2388     }
2389 }
2390
2391 void ff_mpeg_flush(AVCodecContext *avctx){
2392     int i;
2393     MpegEncContext *s = avctx->priv_data;
2394
2395     if(s==NULL || s->picture==NULL)
2396         return;
2397
2398     for(i=0; i<s->picture_count; i++){
2399        if (s->picture[i].f.data[0] &&
2400            (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2401             s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2402         free_frame_buffer(s, &s->picture[i]);
2403     }
2404     s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2405
2406     s->mb_x= s->mb_y= 0;
2407
2408     s->parse_context.state= -1;
2409     s->parse_context.frame_start_found= 0;
2410     s->parse_context.overread= 0;
2411     s->parse_context.overread_index= 0;
2412     s->parse_context.index= 0;
2413     s->parse_context.last_index= 0;
2414     s->bitstream_buffer_size=0;
2415     s->pp_time=0;
2416 }
2417
2418 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2419                                    DCTELEM *block, int n, int qscale)
2420 {
2421     int i, level, nCoeffs;
2422     const uint16_t *quant_matrix;
2423
2424     nCoeffs= s->block_last_index[n];
2425
2426     if (n < 4)
2427         block[0] = block[0] * s->y_dc_scale;
2428     else
2429         block[0] = block[0] * s->c_dc_scale;
2430     /* XXX: only mpeg1 */
2431     quant_matrix = s->intra_matrix;
2432     for(i=1;i<=nCoeffs;i++) {
2433         int j= s->intra_scantable.permutated[i];
2434         level = block[j];
2435         if (level) {
2436             if (level < 0) {
2437                 level = -level;
2438                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2439                 level = (level - 1) | 1;
2440                 level = -level;
2441             } else {
2442                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2443                 level = (level - 1) | 1;
2444             }
2445             block[j] = level;
2446         }
2447     }
2448 }
2449
2450 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2451                                    DCTELEM *block, int n, int qscale)
2452 {
2453     int i, level, nCoeffs;
2454     const uint16_t *quant_matrix;
2455
2456     nCoeffs= s->block_last_index[n];
2457
2458     quant_matrix = s->inter_matrix;
2459     for(i=0; i<=nCoeffs; i++) {
2460         int j= s->intra_scantable.permutated[i];
2461         level = block[j];
2462         if (level) {
2463             if (level < 0) {
2464                 level = -level;
2465                 level = (((level << 1) + 1) * qscale *
2466                          ((int) (quant_matrix[j]))) >> 4;
2467                 level = (level - 1) | 1;
2468                 level = -level;
2469             } else {
2470                 level = (((level << 1) + 1) * qscale *
2471                          ((int) (quant_matrix[j]))) >> 4;
2472                 level = (level - 1) | 1;
2473             }
2474             block[j] = level;
2475         }
2476     }
2477 }
2478
2479 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2480                                    DCTELEM *block, int n, int qscale)
2481 {
2482     int i, level, nCoeffs;
2483     const uint16_t *quant_matrix;
2484
2485     if(s->alternate_scan) nCoeffs= 63;
2486     else nCoeffs= s->block_last_index[n];
2487
2488     if (n < 4)
2489         block[0] = block[0] * s->y_dc_scale;
2490     else
2491         block[0] = block[0] * s->c_dc_scale;
2492     quant_matrix = s->intra_matrix;
2493     for(i=1;i<=nCoeffs;i++) {
2494         int j= s->intra_scantable.permutated[i];
2495         level = block[j];
2496         if (level) {
2497             if (level < 0) {
2498                 level = -level;
2499                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2500                 level = -level;
2501             } else {
2502                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2503             }
2504             block[j] = level;
2505         }
2506     }
2507 }
2508
2509 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2510                                    DCTELEM *block, int n, int qscale)
2511 {
2512     int i, level, nCoeffs;
2513     const uint16_t *quant_matrix;
2514     int sum=-1;
2515
2516     if(s->alternate_scan) nCoeffs= 63;
2517     else nCoeffs= s->block_last_index[n];
2518
2519     if (n < 4)
2520         block[0] = block[0] * s->y_dc_scale;
2521     else
2522         block[0] = block[0] * s->c_dc_scale;
2523     quant_matrix = s->intra_matrix;
2524     for(i=1;i<=nCoeffs;i++) {
2525         int j= s->intra_scantable.permutated[i];
2526         level = block[j];
2527         if (level) {
2528             if (level < 0) {
2529                 level = -level;
2530                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2531                 level = -level;
2532             } else {
2533                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2534             }
2535             block[j] = level;
2536             sum+=level;
2537         }
2538     }
2539     block[63]^=sum&1;
2540 }
2541
2542 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2543                                    DCTELEM *block, int n, int qscale)
2544 {
2545     int i, level, nCoeffs;
2546     const uint16_t *quant_matrix;
2547     int sum=-1;
2548
2549     if(s->alternate_scan) nCoeffs= 63;
2550     else nCoeffs= s->block_last_index[n];
2551
2552     quant_matrix = s->inter_matrix;
2553     for(i=0; i<=nCoeffs; i++) {
2554         int j= s->intra_scantable.permutated[i];
2555         level = block[j];
2556         if (level) {
2557             if (level < 0) {
2558                 level = -level;
2559                 level = (((level << 1) + 1) * qscale *
2560                          ((int) (quant_matrix[j]))) >> 4;
2561                 level = -level;
2562             } else {
2563                 level = (((level << 1) + 1) * qscale *
2564                          ((int) (quant_matrix[j]))) >> 4;
2565             }
2566             block[j] = level;
2567             sum+=level;
2568         }
2569     }
2570     block[63]^=sum&1;
2571 }
2572
2573 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2574                                   DCTELEM *block, int n, int qscale)
2575 {
2576     int i, level, qmul, qadd;
2577     int nCoeffs;
2578
2579     assert(s->block_last_index[n]>=0);
2580
2581     qmul = qscale << 1;
2582
2583     if (!s->h263_aic) {
2584         if (n < 4)
2585             block[0] = block[0] * s->y_dc_scale;
2586         else
2587             block[0] = block[0] * s->c_dc_scale;
2588         qadd = (qscale - 1) | 1;
2589     }else{
2590         qadd = 0;
2591     }
2592     if(s->ac_pred)
2593         nCoeffs=63;
2594     else
2595         nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2596
2597     for(i=1; i<=nCoeffs; i++) {
2598         level = block[i];
2599         if (level) {
2600             if (level < 0) {
2601                 level = level * qmul - qadd;
2602             } else {
2603                 level = level * qmul + qadd;
2604             }
2605             block[i] = level;
2606         }
2607     }
2608 }
2609
2610 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2611                                   DCTELEM *block, int n, int qscale)
2612 {
2613     int i, level, qmul, qadd;
2614     int nCoeffs;
2615
2616     assert(s->block_last_index[n]>=0);
2617
2618     qadd = (qscale - 1) | 1;
2619     qmul = qscale << 1;
2620
2621     nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2622
2623     for(i=0; i<=nCoeffs; i++) {
2624         level = block[i];
2625         if (level) {
2626             if (level < 0) {
2627                 level = level * qmul - qadd;
2628             } else {
2629                 level = level * qmul + qadd;
2630             }
2631             block[i] = level;
2632         }
2633     }
2634 }
2635
2636 /**
2637  * set qscale and update qscale dependent variables.
2638  */
2639 void ff_set_qscale(MpegEncContext * s, int qscale)
2640 {
2641     if (qscale < 1)
2642         qscale = 1;
2643     else if (qscale > 31)
2644         qscale = 31;
2645
2646     s->qscale = qscale;
2647     s->chroma_qscale= s->chroma_qscale_table[qscale];
2648
2649     s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2650     s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2651 }
2652
2653 void MPV_report_decode_progress(MpegEncContext *s)
2654 {
2655     if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2656         ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
2657 }