]> git.sesse.net Git - ffmpeg/blob - libavcodec/mpegvideo.c
Exit with error if MPV_common_init() is called with PIX_FMT_NONE.
[ffmpeg] / libavcodec / mpegvideo.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 /**
26  * @file libavcodec/mpegvideo.c
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29
30 #include "avcodec.h"
31 #include "dsputil.h"
32 #include "mpegvideo.h"
33 #include "mpegvideo_common.h"
34 #include "mjpegenc.h"
35 #include "msmpeg4.h"
36 #include "faandct.h"
37 #include "xvmc_internal.h"
38 #include <limits.h>
39
40 //#undef NDEBUG
41 //#include <assert.h>
42
43 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
44                                    DCTELEM *block, int n, int qscale);
45 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
46                                    DCTELEM *block, int n, int qscale);
47 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
48                                    DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
50                                    DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
52                                    DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
54                                   DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
56                                   DCTELEM *block, int n, int qscale);
57
58
59 /* enable all paranoid tests for rounding, overflows, etc... */
60 //#define PARANOID
61
62 //#define DEBUG
63
64
65 static const uint8_t ff_default_chroma_qscale_table[32]={
66 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
67     0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
68 };
69
70 const uint8_t ff_mpeg1_dc_scale_table[128]={
71 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
72     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 };
77
78
79 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
80     int i;
81
82     assert(p<=end);
83     if(p>=end)
84         return end;
85
86     for(i=0; i<3; i++){
87         uint32_t tmp= *state << 8;
88         *state= tmp + *(p++);
89         if(tmp == 0x100 || p==end)
90             return p;
91     }
92
93     while(p<end){
94         if     (p[-1] > 1      ) p+= 3;
95         else if(p[-2]          ) p+= 2;
96         else if(p[-3]|(p[-1]-1)) p++;
97         else{
98             p++;
99             break;
100         }
101     }
102
103     p= FFMIN(p, end)-4;
104     *state= AV_RB32(p);
105
106     return p+4;
107 }
108
109 /* init common dct for both encoder and decoder */
110 int ff_dct_common_init(MpegEncContext *s)
111 {
112     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
113     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
114     s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
115     s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
116     s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
117     if(s->flags & CODEC_FLAG_BITEXACT)
118         s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
119     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
120
121 #if   HAVE_MMX
122     MPV_common_init_mmx(s);
123 #elif ARCH_ALPHA
124     MPV_common_init_axp(s);
125 #elif CONFIG_MLIB
126     MPV_common_init_mlib(s);
127 #elif HAVE_MMI
128     MPV_common_init_mmi(s);
129 #elif ARCH_ARM
130     MPV_common_init_arm(s);
131 #elif HAVE_ALTIVEC
132     MPV_common_init_altivec(s);
133 #elif ARCH_BFIN
134     MPV_common_init_bfin(s);
135 #endif
136
137     /* load & permutate scantables
138        note: only wmv uses different ones
139     */
140     if(s->alternate_scan){
141         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_alternate_vertical_scan);
142         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_alternate_vertical_scan);
143     }else{
144         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_zigzag_direct);
145         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_zigzag_direct);
146     }
147     ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
148     ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
149
150     return 0;
151 }
152
153 void ff_copy_picture(Picture *dst, Picture *src){
154     *dst = *src;
155     dst->type= FF_BUFFER_TYPE_COPY;
156 }
157
158 /**
159  * allocates a Picture
160  * The pixels are allocated/set by calling get_buffer() if shared=0
161  */
162 int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
163     const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
164     const int mb_array_size= s->mb_stride*s->mb_height;
165     const int b8_array_size= s->b8_stride*s->mb_height*2;
166     const int b4_array_size= s->b4_stride*s->mb_height*4;
167     int i;
168     int r= -1;
169
170     if(shared){
171         assert(pic->data[0]);
172         assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
173         pic->type= FF_BUFFER_TYPE_SHARED;
174     }else{
175         assert(!pic->data[0]);
176
177         r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
178
179         if(r<0 || !pic->age || !pic->type || !pic->data[0]){
180             av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
181             return -1;
182         }
183
184         if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
185             av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
186             s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
187             return -1;
188         }
189
190         if(pic->linesize[1] != pic->linesize[2]){
191             av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
192             s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
193             return -1;
194         }
195
196         s->linesize  = pic->linesize[0];
197         s->uvlinesize= pic->linesize[1];
198     }
199
200     if(pic->qscale_table==NULL){
201         if (s->encoding) {
202             CHECKED_ALLOCZ(pic->mb_var   , mb_array_size * sizeof(int16_t))
203             CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t))
204             CHECKED_ALLOCZ(pic->mb_mean  , mb_array_size * sizeof(int8_t))
205         }
206
207         CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check
208         CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t))
209         CHECKED_ALLOCZ(pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t))
210         pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
211         if(s->out_format == FMT_H264){
212             for(i=0; i<2; i++){
213                 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b4_array_size+4)  * sizeof(int16_t))
214                 pic->motion_val[i]= pic->motion_val_base[i]+4;
215                 CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t))
216             }
217             pic->motion_subsample_log2= 2;
218         }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
219             for(i=0; i<2; i++){
220                 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t))
221                 pic->motion_val[i]= pic->motion_val_base[i]+4;
222                 CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t))
223             }
224             pic->motion_subsample_log2= 3;
225         }
226         if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
227             CHECKED_ALLOCZ(pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6)
228         }
229         pic->qstride= s->mb_stride;
230         CHECKED_ALLOCZ(pic->pan_scan , 1 * sizeof(AVPanScan))
231     }
232
233     /* It might be nicer if the application would keep track of these
234      * but it would require an API change. */
235     memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
236     s->prev_pict_types[0]= s->dropable ? FF_B_TYPE : s->pict_type;
237     if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == FF_B_TYPE)
238         pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
239
240     return 0;
241 fail: //for the CHECKED_ALLOCZ macro
242     if(r>=0)
243         s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
244     return -1;
245 }
246
247 /**
248  * deallocates a picture
249  */
250 static void free_picture(MpegEncContext *s, Picture *pic){
251     int i;
252
253     if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
254         s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
255     }
256
257     av_freep(&pic->mb_var);
258     av_freep(&pic->mc_mb_var);
259     av_freep(&pic->mb_mean);
260     av_freep(&pic->mbskip_table);
261     av_freep(&pic->qscale_table);
262     av_freep(&pic->mb_type_base);
263     av_freep(&pic->dct_coeff);
264     av_freep(&pic->pan_scan);
265     pic->mb_type= NULL;
266     for(i=0; i<2; i++){
267         av_freep(&pic->motion_val_base[i]);
268         av_freep(&pic->ref_index[i]);
269     }
270
271     if(pic->type == FF_BUFFER_TYPE_SHARED){
272         for(i=0; i<4; i++){
273             pic->base[i]=
274             pic->data[i]= NULL;
275         }
276         pic->type= 0;
277     }
278 }
279
280 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
281     int i;
282
283     // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
284     CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*21*2); //(width + edge + align)*interlaced*MBsize*tolerance
285     s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
286
287      //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
288     CHECKED_ALLOCZ(s->me.scratchpad,  (s->width+64)*4*16*2*sizeof(uint8_t))
289     s->me.temp=         s->me.scratchpad;
290     s->rd_scratchpad=   s->me.scratchpad;
291     s->b_scratchpad=    s->me.scratchpad;
292     s->obmc_scratchpad= s->me.scratchpad + 16;
293     if (s->encoding) {
294         CHECKED_ALLOCZ(s->me.map      , ME_MAP_SIZE*sizeof(uint32_t))
295         CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
296         if(s->avctx->noise_reduction){
297             CHECKED_ALLOCZ(s->dct_error_sum, 2 * 64 * sizeof(int))
298         }
299     }
300     CHECKED_ALLOCZ(s->blocks, 64*12*2 * sizeof(DCTELEM))
301     s->block= s->blocks[0];
302
303     for(i=0;i<12;i++){
304         s->pblocks[i] = (short *)(&s->block[i]);
305     }
306     return 0;
307 fail:
308     return -1; //free() through MPV_common_end()
309 }
310
311 static void free_duplicate_context(MpegEncContext *s){
312     if(s==NULL) return;
313
314     av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
315     av_freep(&s->me.scratchpad);
316     s->me.temp=
317     s->rd_scratchpad=
318     s->b_scratchpad=
319     s->obmc_scratchpad= NULL;
320
321     av_freep(&s->dct_error_sum);
322     av_freep(&s->me.map);
323     av_freep(&s->me.score_map);
324     av_freep(&s->blocks);
325     s->block= NULL;
326 }
327
328 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
329 #define COPY(a) bak->a= src->a
330     COPY(allocated_edge_emu_buffer);
331     COPY(edge_emu_buffer);
332     COPY(me.scratchpad);
333     COPY(me.temp);
334     COPY(rd_scratchpad);
335     COPY(b_scratchpad);
336     COPY(obmc_scratchpad);
337     COPY(me.map);
338     COPY(me.score_map);
339     COPY(blocks);
340     COPY(block);
341     COPY(start_mb_y);
342     COPY(end_mb_y);
343     COPY(me.map_generation);
344     COPY(pb);
345     COPY(dct_error_sum);
346     COPY(dct_count[0]);
347     COPY(dct_count[1]);
348 #undef COPY
349 }
350
351 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
352     MpegEncContext bak;
353     int i;
354     //FIXME copy only needed parts
355 //START_TIMER
356     backup_duplicate_context(&bak, dst);
357     memcpy(dst, src, sizeof(MpegEncContext));
358     backup_duplicate_context(dst, &bak);
359     for(i=0;i<12;i++){
360         dst->pblocks[i] = (short *)(&dst->block[i]);
361     }
362 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
363 }
364
365 /**
366  * sets the given MpegEncContext to common defaults (same for encoding and decoding).
367  * the changed fields will not depend upon the prior state of the MpegEncContext.
368  */
369 void MPV_common_defaults(MpegEncContext *s){
370     s->y_dc_scale_table=
371     s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
372     s->chroma_qscale_table= ff_default_chroma_qscale_table;
373     s->progressive_frame= 1;
374     s->progressive_sequence= 1;
375     s->picture_structure= PICT_FRAME;
376
377     s->coded_picture_number = 0;
378     s->picture_number = 0;
379     s->input_picture_number = 0;
380
381     s->picture_in_gop_number = 0;
382
383     s->f_code = 1;
384     s->b_code = 1;
385 }
386
387 /**
388  * sets the given MpegEncContext to defaults for decoding.
389  * the changed fields will not depend upon the prior state of the MpegEncContext.
390  */
391 void MPV_decode_defaults(MpegEncContext *s){
392     MPV_common_defaults(s);
393 }
394
395 /**
396  * init common structure for both encoder and decoder.
397  * this assumes that some variables like width/height are already set
398  */
399 int MPV_common_init(MpegEncContext *s)
400 {
401     int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
402
403     s->mb_height = (s->height + 15) / 16;
404
405     if(s->avctx->pix_fmt == PIX_FMT_NONE){
406         av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
407         return -1;
408     }
409
410     if(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height)){
411         av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
412         return -1;
413     }
414
415     if((s->width || s->height) && avcodec_check_dimensions(s->avctx, s->width, s->height))
416         return -1;
417
418     dsputil_init(&s->dsp, s->avctx);
419     ff_dct_common_init(s);
420
421     s->flags= s->avctx->flags;
422     s->flags2= s->avctx->flags2;
423
424     s->mb_width  = (s->width  + 15) / 16;
425     s->mb_stride = s->mb_width + 1;
426     s->b8_stride = s->mb_width*2 + 1;
427     s->b4_stride = s->mb_width*4 + 1;
428     mb_array_size= s->mb_height * s->mb_stride;
429     mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
430
431     /* set chroma shifts */
432     avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
433                                                     &(s->chroma_y_shift) );
434
435     /* set default edge pos, will be overriden in decode_header if needed */
436     s->h_edge_pos= s->mb_width*16;
437     s->v_edge_pos= s->mb_height*16;
438
439     s->mb_num = s->mb_width * s->mb_height;
440
441     s->block_wrap[0]=
442     s->block_wrap[1]=
443     s->block_wrap[2]=
444     s->block_wrap[3]= s->b8_stride;
445     s->block_wrap[4]=
446     s->block_wrap[5]= s->mb_stride;
447
448     y_size = s->b8_stride * (2 * s->mb_height + 1);
449     c_size = s->mb_stride * (s->mb_height + 1);
450     yc_size = y_size + 2 * c_size;
451
452     /* convert fourcc to upper case */
453     s->codec_tag=          toupper( s->avctx->codec_tag     &0xFF)
454                         + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
455                         + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
456                         + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
457
458     s->stream_codec_tag=          toupper( s->avctx->stream_codec_tag     &0xFF)
459                                + (toupper((s->avctx->stream_codec_tag>>8 )&0xFF)<<8 )
460                                + (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16)
461                                + (toupper((s->avctx->stream_codec_tag>>24)&0xFF)<<24);
462
463     s->avctx->coded_frame= (AVFrame*)&s->current_picture;
464
465     CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int)) //error ressilience code looks cleaner with this
466     for(y=0; y<s->mb_height; y++){
467         for(x=0; x<s->mb_width; x++){
468             s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
469         }
470     }
471     s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
472
473     if (s->encoding) {
474         /* Allocate MV tables */
475         CHECKED_ALLOCZ(s->p_mv_table_base            , mv_table_size * 2 * sizeof(int16_t))
476         CHECKED_ALLOCZ(s->b_forw_mv_table_base       , mv_table_size * 2 * sizeof(int16_t))
477         CHECKED_ALLOCZ(s->b_back_mv_table_base       , mv_table_size * 2 * sizeof(int16_t))
478         CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
479         CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
480         CHECKED_ALLOCZ(s->b_direct_mv_table_base     , mv_table_size * 2 * sizeof(int16_t))
481         s->p_mv_table           = s->p_mv_table_base            + s->mb_stride + 1;
482         s->b_forw_mv_table      = s->b_forw_mv_table_base       + s->mb_stride + 1;
483         s->b_back_mv_table      = s->b_back_mv_table_base       + s->mb_stride + 1;
484         s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
485         s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
486         s->b_direct_mv_table    = s->b_direct_mv_table_base     + s->mb_stride + 1;
487
488         if(s->msmpeg4_version){
489             CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
490         }
491         CHECKED_ALLOCZ(s->avctx->stats_out, 256);
492
493         /* Allocate MB type table */
494         CHECKED_ALLOCZ(s->mb_type  , mb_array_size * sizeof(uint16_t)) //needed for encoding
495
496         CHECKED_ALLOCZ(s->lambda_table, mb_array_size * sizeof(int))
497
498         CHECKED_ALLOCZ(s->q_intra_matrix, 64*32 * sizeof(int))
499         CHECKED_ALLOCZ(s->q_inter_matrix, 64*32 * sizeof(int))
500         CHECKED_ALLOCZ(s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t))
501         CHECKED_ALLOCZ(s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t))
502         CHECKED_ALLOCZ(s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
503         CHECKED_ALLOCZ(s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
504
505         if(s->avctx->noise_reduction){
506             CHECKED_ALLOCZ(s->dct_offset, 2 * 64 * sizeof(uint16_t))
507         }
508     }
509     CHECKED_ALLOCZ(s->picture, MAX_PICTURE_COUNT * sizeof(Picture))
510
511     CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t))
512
513     if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
514         /* interlaced direct mode decoding tables */
515             for(i=0; i<2; i++){
516                 int j, k;
517                 for(j=0; j<2; j++){
518                     for(k=0; k<2; k++){
519                         CHECKED_ALLOCZ(s->b_field_mv_table_base[i][j][k]     , mv_table_size * 2 * sizeof(int16_t))
520                         s->b_field_mv_table[i][j][k]    = s->b_field_mv_table_base[i][j][k]     + s->mb_stride + 1;
521                     }
522                     CHECKED_ALLOCZ(s->b_field_select_table[i][j]     , mb_array_size * 2 * sizeof(uint8_t))
523                     CHECKED_ALLOCZ(s->p_field_mv_table_base[i][j]     , mv_table_size * 2 * sizeof(int16_t))
524                     s->p_field_mv_table[i][j]    = s->p_field_mv_table_base[i][j]     + s->mb_stride + 1;
525                 }
526                 CHECKED_ALLOCZ(s->p_field_select_table[i]      , mb_array_size * 2 * sizeof(uint8_t))
527             }
528     }
529     if (s->out_format == FMT_H263) {
530         /* ac values */
531         CHECKED_ALLOCZ(s->ac_val_base, yc_size * sizeof(int16_t) * 16);
532         s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
533         s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
534         s->ac_val[2] = s->ac_val[1] + c_size;
535
536         /* cbp values */
537         CHECKED_ALLOCZ(s->coded_block_base, y_size);
538         s->coded_block= s->coded_block_base + s->b8_stride + 1;
539
540         /* cbp, ac_pred, pred_dir */
541         CHECKED_ALLOCZ(s->cbp_table  , mb_array_size * sizeof(uint8_t))
542         CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
543     }
544
545     if (s->h263_pred || s->h263_plus || !s->encoding) {
546         /* dc values */
547         //MN: we need these for error resilience of intra-frames
548         CHECKED_ALLOCZ(s->dc_val_base, yc_size * sizeof(int16_t));
549         s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
550         s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
551         s->dc_val[2] = s->dc_val[1] + c_size;
552         for(i=0;i<yc_size;i++)
553             s->dc_val_base[i] = 1024;
554     }
555
556     /* which mb is a intra block */
557     CHECKED_ALLOCZ(s->mbintra_table, mb_array_size);
558     memset(s->mbintra_table, 1, mb_array_size);
559
560     /* init macroblock skip table */
561     CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2);
562     //Note the +1 is for a quicker mpeg4 slice_end detection
563     CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
564
565     s->parse_context.state= -1;
566     if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
567        s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
568        s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
569        s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
570     }
571
572     s->context_initialized = 1;
573
574     s->thread_context[0]= s;
575     threads = s->avctx->thread_count;
576
577     for(i=1; i<threads; i++){
578         s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
579         memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
580     }
581
582     for(i=0; i<threads; i++){
583         if(init_duplicate_context(s->thread_context[i], s) < 0)
584            goto fail;
585         s->thread_context[i]->start_mb_y= (s->mb_height*(i  ) + s->avctx->thread_count/2) / s->avctx->thread_count;
586         s->thread_context[i]->end_mb_y  = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
587     }
588
589     return 0;
590  fail:
591     MPV_common_end(s);
592     return -1;
593 }
594
595 /* init common structure for both encoder and decoder */
596 void MPV_common_end(MpegEncContext *s)
597 {
598     int i, j, k;
599
600     for(i=0; i<s->avctx->thread_count; i++){
601         free_duplicate_context(s->thread_context[i]);
602     }
603     for(i=1; i<s->avctx->thread_count; i++){
604         av_freep(&s->thread_context[i]);
605     }
606
607     av_freep(&s->parse_context.buffer);
608     s->parse_context.buffer_size=0;
609
610     av_freep(&s->mb_type);
611     av_freep(&s->p_mv_table_base);
612     av_freep(&s->b_forw_mv_table_base);
613     av_freep(&s->b_back_mv_table_base);
614     av_freep(&s->b_bidir_forw_mv_table_base);
615     av_freep(&s->b_bidir_back_mv_table_base);
616     av_freep(&s->b_direct_mv_table_base);
617     s->p_mv_table= NULL;
618     s->b_forw_mv_table= NULL;
619     s->b_back_mv_table= NULL;
620     s->b_bidir_forw_mv_table= NULL;
621     s->b_bidir_back_mv_table= NULL;
622     s->b_direct_mv_table= NULL;
623     for(i=0; i<2; i++){
624         for(j=0; j<2; j++){
625             for(k=0; k<2; k++){
626                 av_freep(&s->b_field_mv_table_base[i][j][k]);
627                 s->b_field_mv_table[i][j][k]=NULL;
628             }
629             av_freep(&s->b_field_select_table[i][j]);
630             av_freep(&s->p_field_mv_table_base[i][j]);
631             s->p_field_mv_table[i][j]=NULL;
632         }
633         av_freep(&s->p_field_select_table[i]);
634     }
635
636     av_freep(&s->dc_val_base);
637     av_freep(&s->ac_val_base);
638     av_freep(&s->coded_block_base);
639     av_freep(&s->mbintra_table);
640     av_freep(&s->cbp_table);
641     av_freep(&s->pred_dir_table);
642
643     av_freep(&s->mbskip_table);
644     av_freep(&s->prev_pict_types);
645     av_freep(&s->bitstream_buffer);
646     s->allocated_bitstream_buffer_size=0;
647
648     av_freep(&s->avctx->stats_out);
649     av_freep(&s->ac_stats);
650     av_freep(&s->error_status_table);
651     av_freep(&s->mb_index2xy);
652     av_freep(&s->lambda_table);
653     av_freep(&s->q_intra_matrix);
654     av_freep(&s->q_inter_matrix);
655     av_freep(&s->q_intra_matrix16);
656     av_freep(&s->q_inter_matrix16);
657     av_freep(&s->input_picture);
658     av_freep(&s->reordered_input_picture);
659     av_freep(&s->dct_offset);
660
661     if(s->picture){
662         for(i=0; i<MAX_PICTURE_COUNT; i++){
663             free_picture(s, &s->picture[i]);
664         }
665     }
666     av_freep(&s->picture);
667     s->context_initialized = 0;
668     s->last_picture_ptr=
669     s->next_picture_ptr=
670     s->current_picture_ptr= NULL;
671     s->linesize= s->uvlinesize= 0;
672
673     for(i=0; i<3; i++)
674         av_freep(&s->visualization_buffer[i]);
675
676     avcodec_default_free_buffers(s->avctx);
677 }
678
679 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
680 {
681     int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
682     uint8_t index_run[MAX_RUN+1];
683     int last, run, level, start, end, i;
684
685     /* If table is static, we can quit if rl->max_level[0] is not NULL */
686     if(static_store && rl->max_level[0])
687         return;
688
689     /* compute max_level[], max_run[] and index_run[] */
690     for(last=0;last<2;last++) {
691         if (last == 0) {
692             start = 0;
693             end = rl->last;
694         } else {
695             start = rl->last;
696             end = rl->n;
697         }
698
699         memset(max_level, 0, MAX_RUN + 1);
700         memset(max_run, 0, MAX_LEVEL + 1);
701         memset(index_run, rl->n, MAX_RUN + 1);
702         for(i=start;i<end;i++) {
703             run = rl->table_run[i];
704             level = rl->table_level[i];
705             if (index_run[run] == rl->n)
706                 index_run[run] = i;
707             if (level > max_level[run])
708                 max_level[run] = level;
709             if (run > max_run[level])
710                 max_run[level] = run;
711         }
712         if(static_store)
713             rl->max_level[last] = static_store[last];
714         else
715             rl->max_level[last] = av_malloc(MAX_RUN + 1);
716         memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
717         if(static_store)
718             rl->max_run[last] = static_store[last] + MAX_RUN + 1;
719         else
720             rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
721         memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
722         if(static_store)
723             rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
724         else
725             rl->index_run[last] = av_malloc(MAX_RUN + 1);
726         memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
727     }
728 }
729
730 void init_vlc_rl(RLTable *rl)
731 {
732     int i, q;
733
734     for(q=0; q<32; q++){
735         int qmul= q*2;
736         int qadd= (q-1)|1;
737
738         if(q==0){
739             qmul=1;
740             qadd=0;
741         }
742         for(i=0; i<rl->vlc.table_size; i++){
743             int code= rl->vlc.table[i][0];
744             int len = rl->vlc.table[i][1];
745             int level, run;
746
747             if(len==0){ // illegal code
748                 run= 66;
749                 level= MAX_LEVEL;
750             }else if(len<0){ //more bits needed
751                 run= 0;
752                 level= code;
753             }else{
754                 if(code==rl->n){ //esc
755                     run= 66;
756                     level= 0;
757                 }else{
758                     run=   rl->table_run  [code] + 1;
759                     level= rl->table_level[code] * qmul + qadd;
760                     if(code >= rl->last) run+=192;
761                 }
762             }
763             rl->rl_vlc[q][i].len= len;
764             rl->rl_vlc[q][i].level= level;
765             rl->rl_vlc[q][i].run= run;
766         }
767     }
768 }
769
770 int ff_find_unused_picture(MpegEncContext *s, int shared){
771     int i;
772
773     if(shared){
774         for(i=0; i<MAX_PICTURE_COUNT; i++){
775             if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
776         }
777     }else{
778         for(i=0; i<MAX_PICTURE_COUNT; i++){
779             if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
780         }
781         for(i=0; i<MAX_PICTURE_COUNT; i++){
782             if(s->picture[i].data[0]==NULL) return i;
783         }
784     }
785
786     av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
787     /* We could return -1, but the codec would crash trying to draw into a
788      * non-existing frame anyway. This is safer than waiting for a random crash.
789      * Also the return of this is never useful, an encoder must only allocate
790      * as much as allowed in the specification. This has no relationship to how
791      * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
792      * enough for such valid streams).
793      * Plus, a decoder has to check stream validity and remove frames if too
794      * many reference frames are around. Waiting for "OOM" is not correct at
795      * all. Similarly, missing reference frames have to be replaced by
796      * interpolated/MC frames, anything else is a bug in the codec ...
797      */
798     abort();
799     return -1;
800 }
801
802 static void update_noise_reduction(MpegEncContext *s){
803     int intra, i;
804
805     for(intra=0; intra<2; intra++){
806         if(s->dct_count[intra] > (1<<16)){
807             for(i=0; i<64; i++){
808                 s->dct_error_sum[intra][i] >>=1;
809             }
810             s->dct_count[intra] >>= 1;
811         }
812
813         for(i=0; i<64; i++){
814             s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
815         }
816     }
817 }
818
819 /**
820  * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
821  */
822 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
823 {
824     int i;
825     AVFrame *pic;
826     s->mb_skipped = 0;
827
828     assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
829
830     /* mark&release old frames */
831     if (s->pict_type != FF_B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
832       if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
833         avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
834
835         /* release forgotten pictures */
836         /* if(mpeg124/h263) */
837         if(!s->encoding){
838             for(i=0; i<MAX_PICTURE_COUNT; i++){
839                 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
840                     av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
841                     avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
842                 }
843             }
844         }
845       }
846     }
847 alloc:
848     if(!s->encoding){
849         /* release non reference frames */
850         for(i=0; i<MAX_PICTURE_COUNT; i++){
851             if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
852                 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
853             }
854         }
855
856         if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
857             pic= (AVFrame*)s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
858         else{
859             i= ff_find_unused_picture(s, 0);
860             pic= (AVFrame*)&s->picture[i];
861         }
862
863         pic->reference= 0;
864         if (!s->dropable){
865             if (s->codec_id == CODEC_ID_H264)
866                 pic->reference = s->picture_structure;
867             else if (s->pict_type != FF_B_TYPE)
868                 pic->reference = 3;
869         }
870
871         pic->coded_picture_number= s->coded_picture_number++;
872
873         if( alloc_picture(s, (Picture*)pic, 0) < 0)
874             return -1;
875
876         s->current_picture_ptr= (Picture*)pic;
877         s->current_picture_ptr->top_field_first= s->top_field_first; //FIXME use only the vars from current_pic
878         s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
879     }
880
881     s->current_picture_ptr->pict_type= s->pict_type;
882 //    if(s->flags && CODEC_FLAG_QSCALE)
883   //      s->current_picture_ptr->quality= s->new_picture_ptr->quality;
884     s->current_picture_ptr->key_frame= s->pict_type == FF_I_TYPE;
885
886     ff_copy_picture(&s->current_picture, s->current_picture_ptr);
887
888     if (s->pict_type != FF_B_TYPE) {
889         s->last_picture_ptr= s->next_picture_ptr;
890         if(!s->dropable)
891             s->next_picture_ptr= s->current_picture_ptr;
892     }
893 /*    av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
894         s->last_picture_ptr    ? s->last_picture_ptr->data[0] : NULL,
895         s->next_picture_ptr    ? s->next_picture_ptr->data[0] : NULL,
896         s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
897         s->pict_type, s->dropable);*/
898
899     if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
900     if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
901
902     if(s->pict_type != FF_I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && !s->dropable && s->codec_id != CODEC_ID_H264){
903         av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
904         assert(s->pict_type != FF_B_TYPE); //these should have been dropped if we don't have a reference
905         goto alloc;
906     }
907
908     assert(s->pict_type == FF_I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
909
910     if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
911         int i;
912         for(i=0; i<4; i++){
913             if(s->picture_structure == PICT_BOTTOM_FIELD){
914                  s->current_picture.data[i] += s->current_picture.linesize[i];
915             }
916             s->current_picture.linesize[i] *= 2;
917             s->last_picture.linesize[i] *=2;
918             s->next_picture.linesize[i] *=2;
919         }
920     }
921
922     s->hurry_up= s->avctx->hurry_up;
923     s->error_recognition= avctx->error_recognition;
924
925     /* set dequantizer, we can't do it during init as it might change for mpeg4
926        and we can't do it in the header decode as init is not called for mpeg4 there yet */
927     if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
928         s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
929         s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
930     }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
931         s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
932         s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
933     }else{
934         s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
935         s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
936     }
937
938     if(s->dct_error_sum){
939         assert(s->avctx->noise_reduction && s->encoding);
940
941         update_noise_reduction(s);
942     }
943
944     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
945         return ff_xvmc_field_start(s, avctx);
946
947     return 0;
948 }
949
950 /* generic function for encode/decode called after a frame has been coded/decoded */
951 void MPV_frame_end(MpegEncContext *s)
952 {
953     int i;
954     /* draw edge for correct motion prediction if outside */
955     //just to make sure that all data is rendered.
956     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
957         ff_xvmc_field_end(s);
958     }else if(!(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
959        && s->unrestricted_mv
960        && s->current_picture.reference
961        && !s->intra_only
962        && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
963             s->dsp.draw_edges(s->current_picture.data[0], s->linesize  , s->h_edge_pos   , s->v_edge_pos   , EDGE_WIDTH  );
964             s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
965             s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
966     }
967     emms_c();
968
969     s->last_pict_type    = s->pict_type;
970     s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
971     if(s->pict_type!=FF_B_TYPE){
972         s->last_non_b_pict_type= s->pict_type;
973     }
974 #if 0
975         /* copy back current_picture variables */
976     for(i=0; i<MAX_PICTURE_COUNT; i++){
977         if(s->picture[i].data[0] == s->current_picture.data[0]){
978             s->picture[i]= s->current_picture;
979             break;
980         }
981     }
982     assert(i<MAX_PICTURE_COUNT);
983 #endif
984
985     if(s->encoding){
986         /* release non-reference frames */
987         for(i=0; i<MAX_PICTURE_COUNT; i++){
988             if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
989                 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
990             }
991         }
992     }
993     // clear copies, to avoid confusion
994 #if 0
995     memset(&s->last_picture, 0, sizeof(Picture));
996     memset(&s->next_picture, 0, sizeof(Picture));
997     memset(&s->current_picture, 0, sizeof(Picture));
998 #endif
999     s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1000 }
1001
1002 /**
1003  * draws an line from (ex, ey) -> (sx, sy).
1004  * @param w width of the image
1005  * @param h height of the image
1006  * @param stride stride/linesize of the image
1007  * @param color color of the arrow
1008  */
1009 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1010     int x, y, fr, f;
1011
1012     sx= av_clip(sx, 0, w-1);
1013     sy= av_clip(sy, 0, h-1);
1014     ex= av_clip(ex, 0, w-1);
1015     ey= av_clip(ey, 0, h-1);
1016
1017     buf[sy*stride + sx]+= color;
1018
1019     if(FFABS(ex - sx) > FFABS(ey - sy)){
1020         if(sx > ex){
1021             FFSWAP(int, sx, ex);
1022             FFSWAP(int, sy, ey);
1023         }
1024         buf+= sx + sy*stride;
1025         ex-= sx;
1026         f= ((ey-sy)<<16)/ex;
1027         for(x= 0; x <= ex; x++){
1028             y = (x*f)>>16;
1029             fr= (x*f)&0xFFFF;
1030             buf[ y   *stride + x]+= (color*(0x10000-fr))>>16;
1031             buf[(y+1)*stride + x]+= (color*         fr )>>16;
1032         }
1033     }else{
1034         if(sy > ey){
1035             FFSWAP(int, sx, ex);
1036             FFSWAP(int, sy, ey);
1037         }
1038         buf+= sx + sy*stride;
1039         ey-= sy;
1040         if(ey) f= ((ex-sx)<<16)/ey;
1041         else   f= 0;
1042         for(y= 0; y <= ey; y++){
1043             x = (y*f)>>16;
1044             fr= (y*f)&0xFFFF;
1045             buf[y*stride + x  ]+= (color*(0x10000-fr))>>16;
1046             buf[y*stride + x+1]+= (color*         fr )>>16;
1047         }
1048     }
1049 }
1050
1051 /**
1052  * draws an arrow from (ex, ey) -> (sx, sy).
1053  * @param w width of the image
1054  * @param h height of the image
1055  * @param stride stride/linesize of the image
1056  * @param color color of the arrow
1057  */
1058 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1059     int dx,dy;
1060
1061     sx= av_clip(sx, -100, w+100);
1062     sy= av_clip(sy, -100, h+100);
1063     ex= av_clip(ex, -100, w+100);
1064     ey= av_clip(ey, -100, h+100);
1065
1066     dx= ex - sx;
1067     dy= ey - sy;
1068
1069     if(dx*dx + dy*dy > 3*3){
1070         int rx=  dx + dy;
1071         int ry= -dx + dy;
1072         int length= ff_sqrt((rx*rx + ry*ry)<<8);
1073
1074         //FIXME subpixel accuracy
1075         rx= ROUNDED_DIV(rx*3<<4, length);
1076         ry= ROUNDED_DIV(ry*3<<4, length);
1077
1078         draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1079         draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1080     }
1081     draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1082 }
1083
1084 /**
1085  * prints debuging info for the given picture.
1086  */
1087 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1088
1089     if(!pict || !pict->mb_type) return;
1090
1091     if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1092         int x,y;
1093
1094         av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1095         switch (pict->pict_type) {
1096             case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1097             case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1098             case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1099             case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1100             case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1101             case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1102         }
1103         for(y=0; y<s->mb_height; y++){
1104             for(x=0; x<s->mb_width; x++){
1105                 if(s->avctx->debug&FF_DEBUG_SKIP){
1106                     int count= s->mbskip_table[x + y*s->mb_stride];
1107                     if(count>9) count=9;
1108                     av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1109                 }
1110                 if(s->avctx->debug&FF_DEBUG_QP){
1111                     av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1112                 }
1113                 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1114                     int mb_type= pict->mb_type[x + y*s->mb_stride];
1115                     //Type & MV direction
1116                     if(IS_PCM(mb_type))
1117                         av_log(s->avctx, AV_LOG_DEBUG, "P");
1118                     else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1119                         av_log(s->avctx, AV_LOG_DEBUG, "A");
1120                     else if(IS_INTRA4x4(mb_type))
1121                         av_log(s->avctx, AV_LOG_DEBUG, "i");
1122                     else if(IS_INTRA16x16(mb_type))
1123                         av_log(s->avctx, AV_LOG_DEBUG, "I");
1124                     else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1125                         av_log(s->avctx, AV_LOG_DEBUG, "d");
1126                     else if(IS_DIRECT(mb_type))
1127                         av_log(s->avctx, AV_LOG_DEBUG, "D");
1128                     else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1129                         av_log(s->avctx, AV_LOG_DEBUG, "g");
1130                     else if(IS_GMC(mb_type))
1131                         av_log(s->avctx, AV_LOG_DEBUG, "G");
1132                     else if(IS_SKIP(mb_type))
1133                         av_log(s->avctx, AV_LOG_DEBUG, "S");
1134                     else if(!USES_LIST(mb_type, 1))
1135                         av_log(s->avctx, AV_LOG_DEBUG, ">");
1136                     else if(!USES_LIST(mb_type, 0))
1137                         av_log(s->avctx, AV_LOG_DEBUG, "<");
1138                     else{
1139                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1140                         av_log(s->avctx, AV_LOG_DEBUG, "X");
1141                     }
1142
1143                     //segmentation
1144                     if(IS_8X8(mb_type))
1145                         av_log(s->avctx, AV_LOG_DEBUG, "+");
1146                     else if(IS_16X8(mb_type))
1147                         av_log(s->avctx, AV_LOG_DEBUG, "-");
1148                     else if(IS_8X16(mb_type))
1149                         av_log(s->avctx, AV_LOG_DEBUG, "|");
1150                     else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1151                         av_log(s->avctx, AV_LOG_DEBUG, " ");
1152                     else
1153                         av_log(s->avctx, AV_LOG_DEBUG, "?");
1154
1155
1156                     if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
1157                         av_log(s->avctx, AV_LOG_DEBUG, "=");
1158                     else
1159                         av_log(s->avctx, AV_LOG_DEBUG, " ");
1160                 }
1161 //                av_log(s->avctx, AV_LOG_DEBUG, " ");
1162             }
1163             av_log(s->avctx, AV_LOG_DEBUG, "\n");
1164         }
1165     }
1166
1167     if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1168         const int shift= 1 + s->quarter_sample;
1169         int mb_y;
1170         uint8_t *ptr;
1171         int i;
1172         int h_chroma_shift, v_chroma_shift, block_height;
1173         const int width = s->avctx->width;
1174         const int height= s->avctx->height;
1175         const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1176         const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1177         s->low_delay=0; //needed to see the vectors without trashing the buffers
1178
1179         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1180         for(i=0; i<3; i++){
1181             memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1182             pict->data[i]= s->visualization_buffer[i];
1183         }
1184         pict->type= FF_BUFFER_TYPE_COPY;
1185         ptr= pict->data[0];
1186         block_height = 16>>v_chroma_shift;
1187
1188         for(mb_y=0; mb_y<s->mb_height; mb_y++){
1189             int mb_x;
1190             for(mb_x=0; mb_x<s->mb_width; mb_x++){
1191                 const int mb_index= mb_x + mb_y*s->mb_stride;
1192                 if((s->avctx->debug_mv) && pict->motion_val){
1193                   int type;
1194                   for(type=0; type<3; type++){
1195                     int direction = 0;
1196                     switch (type) {
1197                       case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
1198                                 continue;
1199                               direction = 0;
1200                               break;
1201                       case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
1202                                 continue;
1203                               direction = 0;
1204                               break;
1205                       case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
1206                                 continue;
1207                               direction = 1;
1208                               break;
1209                     }
1210                     if(!USES_LIST(pict->mb_type[mb_index], direction))
1211                         continue;
1212
1213                     if(IS_8X8(pict->mb_type[mb_index])){
1214                       int i;
1215                       for(i=0; i<4; i++){
1216                         int sx= mb_x*16 + 4 + 8*(i&1);
1217                         int sy= mb_y*16 + 4 + 8*(i>>1);
1218                         int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1219                         int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1220                         int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1221                         draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1222                       }
1223                     }else if(IS_16X8(pict->mb_type[mb_index])){
1224                       int i;
1225                       for(i=0; i<2; i++){
1226                         int sx=mb_x*16 + 8;
1227                         int sy=mb_y*16 + 4 + 8*i;
1228                         int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1229                         int mx=(pict->motion_val[direction][xy][0]>>shift);
1230                         int my=(pict->motion_val[direction][xy][1]>>shift);
1231
1232                         if(IS_INTERLACED(pict->mb_type[mb_index]))
1233                             my*=2;
1234
1235                         draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1236                       }
1237                     }else if(IS_8X16(pict->mb_type[mb_index])){
1238                       int i;
1239                       for(i=0; i<2; i++){
1240                         int sx=mb_x*16 + 4 + 8*i;
1241                         int sy=mb_y*16 + 8;
1242                         int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1243                         int mx=(pict->motion_val[direction][xy][0]>>shift);
1244                         int my=(pict->motion_val[direction][xy][1]>>shift);
1245
1246                         if(IS_INTERLACED(pict->mb_type[mb_index]))
1247                             my*=2;
1248
1249                         draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1250                       }
1251                     }else{
1252                       int sx= mb_x*16 + 8;
1253                       int sy= mb_y*16 + 8;
1254                       int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1255                       int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1256                       int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1257                       draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1258                     }
1259                   }
1260                 }
1261                 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1262                     uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1263                     int y;
1264                     for(y=0; y<block_height; y++){
1265                         *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1266                         *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1267                     }
1268                 }
1269                 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1270                     int mb_type= pict->mb_type[mb_index];
1271                     uint64_t u,v;
1272                     int y;
1273 #define COLOR(theta, r)\
1274 u= (int)(128 + r*cos(theta*3.141592/180));\
1275 v= (int)(128 + r*sin(theta*3.141592/180));
1276
1277
1278                     u=v=128;
1279                     if(IS_PCM(mb_type)){
1280                         COLOR(120,48)
1281                     }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1282                         COLOR(30,48)
1283                     }else if(IS_INTRA4x4(mb_type)){
1284                         COLOR(90,48)
1285                     }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1286 //                        COLOR(120,48)
1287                     }else if(IS_DIRECT(mb_type)){
1288                         COLOR(150,48)
1289                     }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1290                         COLOR(170,48)
1291                     }else if(IS_GMC(mb_type)){
1292                         COLOR(190,48)
1293                     }else if(IS_SKIP(mb_type)){
1294 //                        COLOR(180,48)
1295                     }else if(!USES_LIST(mb_type, 1)){
1296                         COLOR(240,48)
1297                     }else if(!USES_LIST(mb_type, 0)){
1298                         COLOR(0,48)
1299                     }else{
1300                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1301                         COLOR(300,48)
1302                     }
1303
1304                     u*= 0x0101010101010101ULL;
1305                     v*= 0x0101010101010101ULL;
1306                     for(y=0; y<block_height; y++){
1307                         *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1308                         *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1309                     }
1310
1311                     //segmentation
1312                     if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1313                         *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1314                         *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1315                     }
1316                     if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1317                         for(y=0; y<16; y++)
1318                             pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1319                     }
1320                     if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1321                         int dm= 1 << (mv_sample_log2-2);
1322                         for(i=0; i<4; i++){
1323                             int sx= mb_x*16 + 8*(i&1);
1324                             int sy= mb_y*16 + 8*(i>>1);
1325                             int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1326                             //FIXME bidir
1327                             int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1328                             if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1329                                 for(y=0; y<8; y++)
1330                                     pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1331                             if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1332                                 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1333                         }
1334                     }
1335
1336                     if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1337                         // hmm
1338                     }
1339                 }
1340                 s->mbskip_table[mb_index]=0;
1341             }
1342         }
1343     }
1344 }
1345
1346 static inline int hpel_motion_lowres(MpegEncContext *s,
1347                                   uint8_t *dest, uint8_t *src,
1348                                   int field_based, int field_select,
1349                                   int src_x, int src_y,
1350                                   int width, int height, int stride,
1351                                   int h_edge_pos, int v_edge_pos,
1352                                   int w, int h, h264_chroma_mc_func *pix_op,
1353                                   int motion_x, int motion_y)
1354 {
1355     const int lowres= s->avctx->lowres;
1356     const int s_mask= (2<<lowres)-1;
1357     int emu=0;
1358     int sx, sy;
1359
1360     if(s->quarter_sample){
1361         motion_x/=2;
1362         motion_y/=2;
1363     }
1364
1365     sx= motion_x & s_mask;
1366     sy= motion_y & s_mask;
1367     src_x += motion_x >> (lowres+1);
1368     src_y += motion_y >> (lowres+1);
1369
1370     src += src_y * stride + src_x;
1371
1372     if(   (unsigned)src_x > h_edge_pos                 - (!!sx) - w
1373        || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1374         ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1375                             src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1376         src= s->edge_emu_buffer;
1377         emu=1;
1378     }
1379
1380     sx <<= 2 - lowres;
1381     sy <<= 2 - lowres;
1382     if(field_select)
1383         src += s->linesize;
1384     pix_op[lowres](dest, src, stride, h, sx, sy);
1385     return emu;
1386 }
1387
1388 /* apply one mpeg motion vector to the three components */
1389 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1390                                uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1391                                int field_based, int bottom_field, int field_select,
1392                                uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1393                                int motion_x, int motion_y, int h)
1394 {
1395     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1396     int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1397     const int lowres= s->avctx->lowres;
1398     const int block_s= 8>>lowres;
1399     const int s_mask= (2<<lowres)-1;
1400     const int h_edge_pos = s->h_edge_pos >> lowres;
1401     const int v_edge_pos = s->v_edge_pos >> lowres;
1402     linesize   = s->current_picture.linesize[0] << field_based;
1403     uvlinesize = s->current_picture.linesize[1] << field_based;
1404
1405     if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1406         motion_x/=2;
1407         motion_y/=2;
1408     }
1409
1410     if(field_based){
1411         motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1412     }
1413
1414     sx= motion_x & s_mask;
1415     sy= motion_y & s_mask;
1416     src_x = s->mb_x*2*block_s               + (motion_x >> (lowres+1));
1417     src_y =(s->mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1418
1419     if (s->out_format == FMT_H263) {
1420         uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1421         uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1422         uvsrc_x = src_x>>1;
1423         uvsrc_y = src_y>>1;
1424     }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1425         mx = motion_x / 4;
1426         my = motion_y / 4;
1427         uvsx = (2*mx) & s_mask;
1428         uvsy = (2*my) & s_mask;
1429         uvsrc_x = s->mb_x*block_s               + (mx >> lowres);
1430         uvsrc_y = s->mb_y*block_s               + (my >> lowres);
1431     } else {
1432         mx = motion_x / 2;
1433         my = motion_y / 2;
1434         uvsx = mx & s_mask;
1435         uvsy = my & s_mask;
1436         uvsrc_x = s->mb_x*block_s               + (mx >> (lowres+1));
1437         uvsrc_y =(s->mb_y*block_s>>field_based) + (my >> (lowres+1));
1438     }
1439
1440     ptr_y  = ref_picture[0] + src_y * linesize + src_x;
1441     ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1442     ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1443
1444     if(   (unsigned)src_x > h_edge_pos                 - (!!sx) - 2*block_s
1445        || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1446             ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1447                              src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1448             ptr_y = s->edge_emu_buffer;
1449             if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1450                 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1451                 ff_emulated_edge_mc(uvbuf  , ptr_cb, s->uvlinesize, 9, 9+field_based,
1452                                  uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1453                 ff_emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1454                                  uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1455                 ptr_cb= uvbuf;
1456                 ptr_cr= uvbuf+16;
1457             }
1458     }
1459
1460     if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
1461         dest_y += s->linesize;
1462         dest_cb+= s->uvlinesize;
1463         dest_cr+= s->uvlinesize;
1464     }
1465
1466     if(field_select){
1467         ptr_y += s->linesize;
1468         ptr_cb+= s->uvlinesize;
1469         ptr_cr+= s->uvlinesize;
1470     }
1471
1472     sx <<= 2 - lowres;
1473     sy <<= 2 - lowres;
1474     pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1475
1476     if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1477         uvsx <<= 2 - lowres;
1478         uvsy <<= 2 - lowres;
1479         pix_op[lowres](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1480         pix_op[lowres](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1481     }
1482     //FIXME h261 lowres loop filter
1483 }
1484
1485 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1486                                      uint8_t *dest_cb, uint8_t *dest_cr,
1487                                      uint8_t **ref_picture,
1488                                      h264_chroma_mc_func *pix_op,
1489                                      int mx, int my){
1490     const int lowres= s->avctx->lowres;
1491     const int block_s= 8>>lowres;
1492     const int s_mask= (2<<lowres)-1;
1493     const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1494     const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1495     int emu=0, src_x, src_y, offset, sx, sy;
1496     uint8_t *ptr;
1497
1498     if(s->quarter_sample){
1499         mx/=2;
1500         my/=2;
1501     }
1502
1503     /* In case of 8X8, we construct a single chroma motion vector
1504        with a special rounding */
1505     mx= ff_h263_round_chroma(mx);
1506     my= ff_h263_round_chroma(my);
1507
1508     sx= mx & s_mask;
1509     sy= my & s_mask;
1510     src_x = s->mb_x*block_s + (mx >> (lowres+1));
1511     src_y = s->mb_y*block_s + (my >> (lowres+1));
1512
1513     offset = src_y * s->uvlinesize + src_x;
1514     ptr = ref_picture[1] + offset;
1515     if(s->flags&CODEC_FLAG_EMU_EDGE){
1516         if(   (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1517            || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1518             ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1519             ptr= s->edge_emu_buffer;
1520             emu=1;
1521         }
1522     }
1523     sx <<= 2 - lowres;
1524     sy <<= 2 - lowres;
1525     pix_op[lowres](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1526
1527     ptr = ref_picture[2] + offset;
1528     if(emu){
1529         ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1530         ptr= s->edge_emu_buffer;
1531     }
1532     pix_op[lowres](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1533 }
1534
1535 /**
1536  * motion compensation of a single macroblock
1537  * @param s context
1538  * @param dest_y luma destination pointer
1539  * @param dest_cb chroma cb/u destination pointer
1540  * @param dest_cr chroma cr/v destination pointer
1541  * @param dir direction (0->forward, 1->backward)
1542  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1543  * @param pic_op halfpel motion compensation function (average or put normally)
1544  * the motion vectors are taken from s->mv and the MV type from s->mv_type
1545  */
1546 static inline void MPV_motion_lowres(MpegEncContext *s,
1547                               uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1548                               int dir, uint8_t **ref_picture,
1549                               h264_chroma_mc_func *pix_op)
1550 {
1551     int mx, my;
1552     int mb_x, mb_y, i;
1553     const int lowres= s->avctx->lowres;
1554     const int block_s= 8>>lowres;
1555
1556     mb_x = s->mb_x;
1557     mb_y = s->mb_y;
1558
1559     switch(s->mv_type) {
1560     case MV_TYPE_16X16:
1561         mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1562                     0, 0, 0,
1563                     ref_picture, pix_op,
1564                     s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s);
1565         break;
1566     case MV_TYPE_8X8:
1567         mx = 0;
1568         my = 0;
1569             for(i=0;i<4;i++) {
1570                 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1571                             ref_picture[0], 0, 0,
1572                             (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1573                             s->width, s->height, s->linesize,
1574                             s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1575                             block_s, block_s, pix_op,
1576                             s->mv[dir][i][0], s->mv[dir][i][1]);
1577
1578                 mx += s->mv[dir][i][0];
1579                 my += s->mv[dir][i][1];
1580             }
1581
1582         if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1583             chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1584         break;
1585     case MV_TYPE_FIELD:
1586         if (s->picture_structure == PICT_FRAME) {
1587             /* top field */
1588             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1589                         1, 0, s->field_select[dir][0],
1590                         ref_picture, pix_op,
1591                         s->mv[dir][0][0], s->mv[dir][0][1], block_s);
1592             /* bottom field */
1593             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1594                         1, 1, s->field_select[dir][1],
1595                         ref_picture, pix_op,
1596                         s->mv[dir][1][0], s->mv[dir][1][1], block_s);
1597         } else {
1598             if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){
1599                 ref_picture= s->current_picture_ptr->data;
1600             }
1601
1602             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1603                         0, 0, s->field_select[dir][0],
1604                         ref_picture, pix_op,
1605                         s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s);
1606         }
1607         break;
1608     case MV_TYPE_16X8:
1609         for(i=0; i<2; i++){
1610             uint8_t ** ref2picture;
1611
1612             if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == FF_B_TYPE || s->first_field){
1613                 ref2picture= ref_picture;
1614             }else{
1615                 ref2picture= s->current_picture_ptr->data;
1616             }
1617
1618             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1619                         0, 0, s->field_select[dir][i],
1620                         ref2picture, pix_op,
1621                         s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s);
1622
1623             dest_y += 2*block_s*s->linesize;
1624             dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1625             dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1626         }
1627         break;
1628     case MV_TYPE_DMV:
1629         if(s->picture_structure == PICT_FRAME){
1630             for(i=0; i<2; i++){
1631                 int j;
1632                 for(j=0; j<2; j++){
1633                     mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1634                                 1, j, j^i,
1635                                 ref_picture, pix_op,
1636                                 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s);
1637                 }
1638                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1639             }
1640         }else{
1641             for(i=0; i<2; i++){
1642                 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1643                             0, 0, s->picture_structure != i+1,
1644                             ref_picture, pix_op,
1645                             s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s);
1646
1647                 // after put we make avg of the same block
1648                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1649
1650                 //opposite parity is always in the same frame if this is second field
1651                 if(!s->first_field){
1652                     ref_picture = s->current_picture_ptr->data;
1653                 }
1654             }
1655         }
1656     break;
1657     default: assert(0);
1658     }
1659 }
1660
1661 /* put block[] to dest[] */
1662 static inline void put_dct(MpegEncContext *s,
1663                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1664 {
1665     s->dct_unquantize_intra(s, block, i, qscale);
1666     s->dsp.idct_put (dest, line_size, block);
1667 }
1668
1669 /* add block[] to dest[] */
1670 static inline void add_dct(MpegEncContext *s,
1671                            DCTELEM *block, int i, uint8_t *dest, int line_size)
1672 {
1673     if (s->block_last_index[i] >= 0) {
1674         s->dsp.idct_add (dest, line_size, block);
1675     }
1676 }
1677
1678 static inline void add_dequant_dct(MpegEncContext *s,
1679                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1680 {
1681     if (s->block_last_index[i] >= 0) {
1682         s->dct_unquantize_inter(s, block, i, qscale);
1683
1684         s->dsp.idct_add (dest, line_size, block);
1685     }
1686 }
1687
1688 /**
1689  * cleans dc, ac, coded_block for the current non intra MB
1690  */
1691 void ff_clean_intra_table_entries(MpegEncContext *s)
1692 {
1693     int wrap = s->b8_stride;
1694     int xy = s->block_index[0];
1695
1696     s->dc_val[0][xy           ] =
1697     s->dc_val[0][xy + 1       ] =
1698     s->dc_val[0][xy     + wrap] =
1699     s->dc_val[0][xy + 1 + wrap] = 1024;
1700     /* ac pred */
1701     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
1702     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1703     if (s->msmpeg4_version>=3) {
1704         s->coded_block[xy           ] =
1705         s->coded_block[xy + 1       ] =
1706         s->coded_block[xy     + wrap] =
1707         s->coded_block[xy + 1 + wrap] = 0;
1708     }
1709     /* chroma */
1710     wrap = s->mb_stride;
1711     xy = s->mb_x + s->mb_y * wrap;
1712     s->dc_val[1][xy] =
1713     s->dc_val[2][xy] = 1024;
1714     /* ac pred */
1715     memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1716     memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1717
1718     s->mbintra_table[xy]= 0;
1719 }
1720
1721 /* generic function called after a macroblock has been parsed by the
1722    decoder or after it has been encoded by the encoder.
1723
1724    Important variables used:
1725    s->mb_intra : true if intra macroblock
1726    s->mv_dir   : motion vector direction
1727    s->mv_type  : motion vector type
1728    s->mv       : motion vector
1729    s->interlaced_dct : true if interlaced dct used (mpeg2)
1730  */
1731 static av_always_inline
1732 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
1733                             int lowres_flag, int is_mpeg12)
1734 {
1735     int mb_x, mb_y;
1736     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1737     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1738         ff_xvmc_decode_mb(s);//xvmc uses pblocks
1739         return;
1740     }
1741
1742     mb_x = s->mb_x;
1743     mb_y = s->mb_y;
1744
1745     if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1746        /* save DCT coefficients */
1747        int i,j;
1748        DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
1749        for(i=0; i<6; i++)
1750            for(j=0; j<64; j++)
1751                *dct++ = block[i][s->dsp.idct_permutation[j]];
1752     }
1753
1754     s->current_picture.qscale_table[mb_xy]= s->qscale;
1755
1756     /* update DC predictors for P macroblocks */
1757     if (!s->mb_intra) {
1758         if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1759             if(s->mbintra_table[mb_xy])
1760                 ff_clean_intra_table_entries(s);
1761         } else {
1762             s->last_dc[0] =
1763             s->last_dc[1] =
1764             s->last_dc[2] = 128 << s->intra_dc_precision;
1765         }
1766     }
1767     else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1768         s->mbintra_table[mb_xy]=1;
1769
1770     if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
1771         uint8_t *dest_y, *dest_cb, *dest_cr;
1772         int dct_linesize, dct_offset;
1773         op_pixels_func (*op_pix)[4];
1774         qpel_mc_func (*op_qpix)[16];
1775         const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
1776         const int uvlinesize= s->current_picture.linesize[1];
1777         const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
1778         const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
1779
1780         /* avoid copy if macroblock skipped in last frame too */
1781         /* skip only during decoding as we might trash the buffers during encoding a bit */
1782         if(!s->encoding){
1783             uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1784             const int age= s->current_picture.age;
1785
1786             assert(age);
1787
1788             if (s->mb_skipped) {
1789                 s->mb_skipped= 0;
1790                 assert(s->pict_type!=FF_I_TYPE);
1791
1792                 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
1793                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
1794
1795                 /* if previous was skipped too, then nothing to do !  */
1796                 if (*mbskip_ptr >= age && s->current_picture.reference){
1797                     return;
1798                 }
1799             } else if(!s->current_picture.reference){
1800                 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
1801                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
1802             } else{
1803                 *mbskip_ptr = 0; /* not skipped */
1804             }
1805         }
1806
1807         dct_linesize = linesize << s->interlaced_dct;
1808         dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
1809
1810         if(readable){
1811             dest_y=  s->dest[0];
1812             dest_cb= s->dest[1];
1813             dest_cr= s->dest[2];
1814         }else{
1815             dest_y = s->b_scratchpad;
1816             dest_cb= s->b_scratchpad+16*linesize;
1817             dest_cr= s->b_scratchpad+32*linesize;
1818         }
1819
1820         if (!s->mb_intra) {
1821             /* motion handling */
1822             /* decoding or more than one mb_type (MC was already done otherwise) */
1823             if(!s->encoding){
1824                 if(lowres_flag){
1825                     h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
1826
1827                     if (s->mv_dir & MV_DIR_FORWARD) {
1828                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
1829                         op_pix = s->dsp.avg_h264_chroma_pixels_tab;
1830                     }
1831                     if (s->mv_dir & MV_DIR_BACKWARD) {
1832                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
1833                     }
1834                 }else{
1835                     op_qpix= s->me.qpel_put;
1836                     if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
1837                         op_pix = s->dsp.put_pixels_tab;
1838                     }else{
1839                         op_pix = s->dsp.put_no_rnd_pixels_tab;
1840                     }
1841                     if (s->mv_dir & MV_DIR_FORWARD) {
1842                         MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
1843                         op_pix = s->dsp.avg_pixels_tab;
1844                         op_qpix= s->me.qpel_avg;
1845                     }
1846                     if (s->mv_dir & MV_DIR_BACKWARD) {
1847                         MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
1848                     }
1849                 }
1850             }
1851
1852             /* skip dequant / idct if we are really late ;) */
1853             if(s->hurry_up>1) goto skip_idct;
1854             if(s->avctx->skip_idct){
1855                 if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
1856                    ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
1857                    || s->avctx->skip_idct >= AVDISCARD_ALL)
1858                     goto skip_idct;
1859             }
1860
1861             /* add dct residue */
1862             if(s->encoding || !(   s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
1863                                 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
1864                 add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
1865                 add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
1866                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
1867                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1868
1869                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1870                     if (s->chroma_y_shift){
1871                         add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1872                         add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1873                     }else{
1874                         dct_linesize >>= 1;
1875                         dct_offset >>=1;
1876                         add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
1877                         add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
1878                         add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1879                         add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1880                     }
1881                 }
1882             } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
1883                 add_dct(s, block[0], 0, dest_y                          , dct_linesize);
1884                 add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
1885                 add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
1886                 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
1887
1888                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1889                     if(s->chroma_y_shift){//Chroma420
1890                         add_dct(s, block[4], 4, dest_cb, uvlinesize);
1891                         add_dct(s, block[5], 5, dest_cr, uvlinesize);
1892                     }else{
1893                         //chroma422
1894                         dct_linesize = uvlinesize << s->interlaced_dct;
1895                         dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
1896
1897                         add_dct(s, block[4], 4, dest_cb, dct_linesize);
1898                         add_dct(s, block[5], 5, dest_cr, dct_linesize);
1899                         add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
1900                         add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
1901                         if(!s->chroma_x_shift){//Chroma444
1902                             add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
1903                             add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
1904                             add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
1905                             add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
1906                         }
1907                     }
1908                 }//fi gray
1909             }
1910             else if (CONFIG_WMV2) {
1911                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
1912             }
1913         } else {
1914             /* dct only in intra block */
1915             if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
1916                 put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
1917                 put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
1918                 put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
1919                 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1920
1921                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1922                     if(s->chroma_y_shift){
1923                         put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1924                         put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1925                     }else{
1926                         dct_offset >>=1;
1927                         dct_linesize >>=1;
1928                         put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
1929                         put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
1930                         put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1931                         put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1932                     }
1933                 }
1934             }else{
1935                 s->dsp.idct_put(dest_y                          , dct_linesize, block[0]);
1936                 s->dsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
1937                 s->dsp.idct_put(dest_y + dct_offset             , dct_linesize, block[2]);
1938                 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
1939
1940                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1941                     if(s->chroma_y_shift){
1942                         s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
1943                         s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
1944                     }else{
1945
1946                         dct_linesize = uvlinesize << s->interlaced_dct;
1947                         dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
1948
1949                         s->dsp.idct_put(dest_cb,              dct_linesize, block[4]);
1950                         s->dsp.idct_put(dest_cr,              dct_linesize, block[5]);
1951                         s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
1952                         s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
1953                         if(!s->chroma_x_shift){//Chroma444
1954                             s->dsp.idct_put(dest_cb + 8,              dct_linesize, block[8]);
1955                             s->dsp.idct_put(dest_cr + 8,              dct_linesize, block[9]);
1956                             s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
1957                             s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
1958                         }
1959                     }
1960                 }//gray
1961             }
1962         }
1963 skip_idct:
1964         if(!readable){
1965             s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y ,   linesize,16);
1966             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
1967             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
1968         }
1969     }
1970 }
1971
1972 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
1973 #if !CONFIG_SMALL
1974     if(s->out_format == FMT_MPEG1) {
1975         if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
1976         else                 MPV_decode_mb_internal(s, block, 0, 1);
1977     } else
1978 #endif
1979     if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
1980     else                  MPV_decode_mb_internal(s, block, 0, 0);
1981 }
1982
1983 /**
1984  *
1985  * @param h is the normal height, this will be reduced automatically if needed for the last row
1986  */
1987 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
1988     if (s->avctx->draw_horiz_band) {
1989         AVFrame *src;
1990         int offset[4];
1991
1992         if(s->picture_structure != PICT_FRAME){
1993             h <<= 1;
1994             y <<= 1;
1995             if(s->first_field  && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
1996         }
1997
1998         h= FFMIN(h, s->avctx->height - y);
1999
2000         if(s->pict_type==FF_B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2001             src= (AVFrame*)s->current_picture_ptr;
2002         else if(s->last_picture_ptr)
2003             src= (AVFrame*)s->last_picture_ptr;
2004         else
2005             return;
2006
2007         if(s->pict_type==FF_B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2008             offset[0]=
2009             offset[1]=
2010             offset[2]=
2011             offset[3]= 0;
2012         }else{
2013             offset[0]= y * s->linesize;
2014             offset[1]=
2015             offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2016             offset[3]= 0;
2017         }
2018
2019         emms_c();
2020
2021         s->avctx->draw_horiz_band(s->avctx, src, offset,
2022                                   y, s->picture_structure, h);
2023     }
2024 }
2025
2026 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2027     const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2028     const int uvlinesize= s->current_picture.linesize[1];
2029     const int mb_size= 4 - s->avctx->lowres;
2030
2031     s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
2032     s->block_index[1]= s->b8_stride*(s->mb_y*2    ) - 1 + s->mb_x*2;
2033     s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2034     s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2035     s->block_index[4]= s->mb_stride*(s->mb_y + 1)                + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2036     s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2037     //block_index is not used by mpeg2, so it is not affected by chroma_format
2038
2039     s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
2040     s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2041     s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2042
2043     if(!(s->pict_type==FF_B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2044     {
2045         s->dest[0] += s->mb_y *   linesize << mb_size;
2046         s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2047         s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2048     }
2049 }
2050
2051 void ff_mpeg_flush(AVCodecContext *avctx){
2052     int i;
2053     MpegEncContext *s = avctx->priv_data;
2054
2055     if(s==NULL || s->picture==NULL)
2056         return;
2057
2058     for(i=0; i<MAX_PICTURE_COUNT; i++){
2059        if(s->picture[i].data[0] && (   s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
2060                                     || s->picture[i].type == FF_BUFFER_TYPE_USER))
2061         avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
2062     }
2063     s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2064
2065     s->mb_x= s->mb_y= 0;
2066
2067     s->parse_context.state= -1;
2068     s->parse_context.frame_start_found= 0;
2069     s->parse_context.overread= 0;
2070     s->parse_context.overread_index= 0;
2071     s->parse_context.index= 0;
2072     s->parse_context.last_index= 0;
2073     s->bitstream_buffer_size=0;
2074     s->pp_time=0;
2075 }
2076
2077 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2078                                    DCTELEM *block, int n, int qscale)
2079 {
2080     int i, level, nCoeffs;
2081     const uint16_t *quant_matrix;
2082
2083     nCoeffs= s->block_last_index[n];
2084
2085     if (n < 4)
2086         block[0] = block[0] * s->y_dc_scale;
2087     else
2088         block[0] = block[0] * s->c_dc_scale;
2089     /* XXX: only mpeg1 */
2090     quant_matrix = s->intra_matrix;
2091     for(i=1;i<=nCoeffs;i++) {
2092         int j= s->intra_scantable.permutated[i];
2093         level = block[j];
2094         if (level) {
2095             if (level < 0) {
2096                 level = -level;
2097                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2098                 level = (level - 1) | 1;
2099                 level = -level;
2100             } else {
2101                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2102                 level = (level - 1) | 1;
2103             }
2104             block[j] = level;
2105         }
2106     }
2107 }
2108
2109 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2110                                    DCTELEM *block, int n, int qscale)
2111 {
2112     int i, level, nCoeffs;
2113     const uint16_t *quant_matrix;
2114
2115     nCoeffs= s->block_last_index[n];
2116
2117     quant_matrix = s->inter_matrix;
2118     for(i=0; i<=nCoeffs; i++) {
2119         int j= s->intra_scantable.permutated[i];
2120         level = block[j];
2121         if (level) {
2122             if (level < 0) {
2123                 level = -level;
2124                 level = (((level << 1) + 1) * qscale *
2125                          ((int) (quant_matrix[j]))) >> 4;
2126                 level = (level - 1) | 1;
2127                 level = -level;
2128             } else {
2129                 level = (((level << 1) + 1) * qscale *
2130                          ((int) (quant_matrix[j]))) >> 4;
2131                 level = (level - 1) | 1;
2132             }
2133             block[j] = level;
2134         }
2135     }
2136 }
2137
2138 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2139                                    DCTELEM *block, int n, int qscale)
2140 {
2141     int i, level, nCoeffs;
2142     const uint16_t *quant_matrix;
2143
2144     if(s->alternate_scan) nCoeffs= 63;
2145     else nCoeffs= s->block_last_index[n];
2146
2147     if (n < 4)
2148         block[0] = block[0] * s->y_dc_scale;
2149     else
2150         block[0] = block[0] * s->c_dc_scale;
2151     quant_matrix = s->intra_matrix;
2152     for(i=1;i<=nCoeffs;i++) {
2153         int j= s->intra_scantable.permutated[i];
2154         level = block[j];
2155         if (level) {
2156             if (level < 0) {
2157                 level = -level;
2158                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2159                 level = -level;
2160             } else {
2161                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2162             }
2163             block[j] = level;
2164         }
2165     }
2166 }
2167
2168 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2169                                    DCTELEM *block, int n, int qscale)
2170 {
2171     int i, level, nCoeffs;
2172     const uint16_t *quant_matrix;
2173     int sum=-1;
2174
2175     if(s->alternate_scan) nCoeffs= 63;
2176     else nCoeffs= s->block_last_index[n];
2177
2178     if (n < 4)
2179         block[0] = block[0] * s->y_dc_scale;
2180     else
2181         block[0] = block[0] * s->c_dc_scale;
2182     quant_matrix = s->intra_matrix;
2183     for(i=1;i<=nCoeffs;i++) {
2184         int j= s->intra_scantable.permutated[i];
2185         level = block[j];
2186         if (level) {
2187             if (level < 0) {
2188                 level = -level;
2189                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2190                 level = -level;
2191             } else {
2192                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2193             }
2194             block[j] = level;
2195             sum+=level;
2196         }
2197     }
2198     block[63]^=sum&1;
2199 }
2200
2201 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2202                                    DCTELEM *block, int n, int qscale)
2203 {
2204     int i, level, nCoeffs;
2205     const uint16_t *quant_matrix;
2206     int sum=-1;
2207
2208     if(s->alternate_scan) nCoeffs= 63;
2209     else nCoeffs= s->block_last_index[n];
2210
2211     quant_matrix = s->inter_matrix;
2212     for(i=0; i<=nCoeffs; i++) {
2213         int j= s->intra_scantable.permutated[i];
2214         level = block[j];
2215         if (level) {
2216             if (level < 0) {
2217                 level = -level;
2218                 level = (((level << 1) + 1) * qscale *
2219                          ((int) (quant_matrix[j]))) >> 4;
2220                 level = -level;
2221             } else {
2222                 level = (((level << 1) + 1) * qscale *
2223                          ((int) (quant_matrix[j]))) >> 4;
2224             }
2225             block[j] = level;
2226             sum+=level;
2227         }
2228     }
2229     block[63]^=sum&1;
2230 }
2231
2232 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2233                                   DCTELEM *block, int n, int qscale)
2234 {
2235     int i, level, qmul, qadd;
2236     int nCoeffs;
2237
2238     assert(s->block_last_index[n]>=0);
2239
2240     qmul = qscale << 1;
2241
2242     if (!s->h263_aic) {
2243         if (n < 4)
2244             block[0] = block[0] * s->y_dc_scale;
2245         else
2246             block[0] = block[0] * s->c_dc_scale;
2247         qadd = (qscale - 1) | 1;
2248     }else{
2249         qadd = 0;
2250     }
2251     if(s->ac_pred)
2252         nCoeffs=63;
2253     else
2254         nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2255
2256     for(i=1; i<=nCoeffs; i++) {
2257         level = block[i];
2258         if (level) {
2259             if (level < 0) {
2260                 level = level * qmul - qadd;
2261             } else {
2262                 level = level * qmul + qadd;
2263             }
2264             block[i] = level;
2265         }
2266     }
2267 }
2268
2269 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2270                                   DCTELEM *block, int n, int qscale)
2271 {
2272     int i, level, qmul, qadd;
2273     int nCoeffs;
2274
2275     assert(s->block_last_index[n]>=0);
2276
2277     qadd = (qscale - 1) | 1;
2278     qmul = qscale << 1;
2279
2280     nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2281
2282     for(i=0; i<=nCoeffs; i++) {
2283         level = block[i];
2284         if (level) {
2285             if (level < 0) {
2286                 level = level * qmul - qadd;
2287             } else {
2288                 level = level * qmul + qadd;
2289             }
2290             block[i] = level;
2291         }
2292     }
2293 }
2294
2295 /**
2296  * set qscale and update qscale dependent variables.
2297  */
2298 void ff_set_qscale(MpegEncContext * s, int qscale)
2299 {
2300     if (qscale < 1)
2301         qscale = 1;
2302     else if (qscale > 31)
2303         qscale = 31;
2304
2305     s->qscale = qscale;
2306     s->chroma_qscale= s->chroma_qscale_table[qscale];
2307
2308     s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2309     s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2310 }