]> git.sesse.net Git - ffmpeg/blob - libavcodec/mpegvideo.c
mimic: Convert to the new bitstream reader
[ffmpeg] / libavcodec / mpegvideo.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
35 #include "avcodec.h"
36 #include "blockdsp.h"
37 #include "idctdsp.h"
38 #include "internal.h"
39 #include "mathops.h"
40 #include "mpeg_er.h"
41 #include "mpegutils.h"
42 #include "mpegvideo.h"
43 #include "mpegvideodata.h"
44 #include "mjpegenc.h"
45 #include "msmpeg4.h"
46 #include "qpeldsp.h"
47 #include "xvmc_internal.h"
48 #include "thread.h"
49 #include "wmv2.h"
50 #include <limits.h>
51
52 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
53                                    int16_t *block, int n, int qscale)
54 {
55     int i, level, nCoeffs;
56     const uint16_t *quant_matrix;
57
58     nCoeffs= s->block_last_index[n];
59
60     if (n < 4)
61         block[0] = block[0] * s->y_dc_scale;
62     else
63         block[0] = block[0] * s->c_dc_scale;
64     /* XXX: only MPEG-1 */
65     quant_matrix = s->intra_matrix;
66     for(i=1;i<=nCoeffs;i++) {
67         int j= s->intra_scantable.permutated[i];
68         level = block[j];
69         if (level) {
70             if (level < 0) {
71                 level = -level;
72                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
73                 level = (level - 1) | 1;
74                 level = -level;
75             } else {
76                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
77                 level = (level - 1) | 1;
78             }
79             block[j] = level;
80         }
81     }
82 }
83
84 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
85                                    int16_t *block, int n, int qscale)
86 {
87     int i, level, nCoeffs;
88     const uint16_t *quant_matrix;
89
90     nCoeffs= s->block_last_index[n];
91
92     quant_matrix = s->inter_matrix;
93     for(i=0; i<=nCoeffs; i++) {
94         int j= s->intra_scantable.permutated[i];
95         level = block[j];
96         if (level) {
97             if (level < 0) {
98                 level = -level;
99                 level = (((level << 1) + 1) * qscale *
100                          ((int) (quant_matrix[j]))) >> 4;
101                 level = (level - 1) | 1;
102                 level = -level;
103             } else {
104                 level = (((level << 1) + 1) * qscale *
105                          ((int) (quant_matrix[j]))) >> 4;
106                 level = (level - 1) | 1;
107             }
108             block[j] = level;
109         }
110     }
111 }
112
113 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
114                                    int16_t *block, int n, int qscale)
115 {
116     int i, level, nCoeffs;
117     const uint16_t *quant_matrix;
118
119     if(s->alternate_scan) nCoeffs= 63;
120     else nCoeffs= s->block_last_index[n];
121
122     if (n < 4)
123         block[0] = block[0] * s->y_dc_scale;
124     else
125         block[0] = block[0] * s->c_dc_scale;
126     quant_matrix = s->intra_matrix;
127     for(i=1;i<=nCoeffs;i++) {
128         int j= s->intra_scantable.permutated[i];
129         level = block[j];
130         if (level) {
131             if (level < 0) {
132                 level = -level;
133                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
134                 level = -level;
135             } else {
136                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
137             }
138             block[j] = level;
139         }
140     }
141 }
142
143 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
144                                    int16_t *block, int n, int qscale)
145 {
146     int i, level, nCoeffs;
147     const uint16_t *quant_matrix;
148     int sum=-1;
149
150     if(s->alternate_scan) nCoeffs= 63;
151     else nCoeffs= s->block_last_index[n];
152
153     if (n < 4)
154         block[0] = block[0] * s->y_dc_scale;
155     else
156         block[0] = block[0] * s->c_dc_scale;
157     quant_matrix = s->intra_matrix;
158     for(i=1;i<=nCoeffs;i++) {
159         int j= s->intra_scantable.permutated[i];
160         level = block[j];
161         if (level) {
162             if (level < 0) {
163                 level = -level;
164                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
165                 level = -level;
166             } else {
167                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
168             }
169             block[j] = level;
170             sum+=level;
171         }
172     }
173     block[63]^=sum&1;
174 }
175
176 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
177                                    int16_t *block, int n, int qscale)
178 {
179     int i, level, nCoeffs;
180     const uint16_t *quant_matrix;
181     int sum=-1;
182
183     if(s->alternate_scan) nCoeffs= 63;
184     else nCoeffs= s->block_last_index[n];
185
186     quant_matrix = s->inter_matrix;
187     for(i=0; i<=nCoeffs; i++) {
188         int j= s->intra_scantable.permutated[i];
189         level = block[j];
190         if (level) {
191             if (level < 0) {
192                 level = -level;
193                 level = (((level << 1) + 1) * qscale *
194                          ((int) (quant_matrix[j]))) >> 4;
195                 level = -level;
196             } else {
197                 level = (((level << 1) + 1) * qscale *
198                          ((int) (quant_matrix[j]))) >> 4;
199             }
200             block[j] = level;
201             sum+=level;
202         }
203     }
204     block[63]^=sum&1;
205 }
206
207 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
208                                   int16_t *block, int n, int qscale)
209 {
210     int i, level, qmul, qadd;
211     int nCoeffs;
212
213     assert(s->block_last_index[n]>=0);
214
215     qmul = qscale << 1;
216
217     if (!s->h263_aic) {
218         if (n < 4)
219             block[0] = block[0] * s->y_dc_scale;
220         else
221             block[0] = block[0] * s->c_dc_scale;
222         qadd = (qscale - 1) | 1;
223     }else{
224         qadd = 0;
225     }
226     if(s->ac_pred)
227         nCoeffs=63;
228     else
229         nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
230
231     for(i=1; i<=nCoeffs; i++) {
232         level = block[i];
233         if (level) {
234             if (level < 0) {
235                 level = level * qmul - qadd;
236             } else {
237                 level = level * qmul + qadd;
238             }
239             block[i] = level;
240         }
241     }
242 }
243
244 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
245                                   int16_t *block, int n, int qscale)
246 {
247     int i, level, qmul, qadd;
248     int nCoeffs;
249
250     assert(s->block_last_index[n]>=0);
251
252     qadd = (qscale - 1) | 1;
253     qmul = qscale << 1;
254
255     nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
256
257     for(i=0; i<=nCoeffs; i++) {
258         level = block[i];
259         if (level) {
260             if (level < 0) {
261                 level = level * qmul - qadd;
262             } else {
263                 level = level * qmul + qadd;
264             }
265             block[i] = level;
266         }
267     }
268 }
269
270 /* init common dct for both encoder and decoder */
271 static av_cold int dct_init(MpegEncContext *s)
272 {
273     ff_blockdsp_init(&s->bdsp, s->avctx);
274     ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
275     ff_mpegvideodsp_init(&s->mdsp);
276     ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
277
278     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
279     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
280     s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
281     s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
282     s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
283     if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT)
284         s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
285     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
286
287     if (HAVE_INTRINSICS_NEON)
288         ff_mpv_common_init_neon(s);
289
290     if (ARCH_ARM)
291         ff_mpv_common_init_arm(s);
292     if (ARCH_PPC)
293         ff_mpv_common_init_ppc(s);
294     if (ARCH_X86)
295         ff_mpv_common_init_x86(s);
296
297     return 0;
298 }
299
300 av_cold void ff_mpv_idct_init(MpegEncContext *s)
301 {
302     ff_idctdsp_init(&s->idsp, s->avctx);
303
304     /* load & permutate scantables
305      * note: only wmv uses different ones
306      */
307     if (s->alternate_scan) {
308         ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
309         ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
310     } else {
311         ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
312         ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
313     }
314     ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
315     ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
316 }
317
318 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
319 {
320     return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 0,
321                             s->chroma_x_shift, s->chroma_y_shift, s->out_format,
322                             s->mb_stride, s->mb_height, s->b8_stride,
323                             &s->linesize, &s->uvlinesize);
324 }
325
326 static int init_duplicate_context(MpegEncContext *s)
327 {
328     int y_size = s->b8_stride * (2 * s->mb_height + 1);
329     int c_size = s->mb_stride * (s->mb_height + 1);
330     int yc_size = y_size + 2 * c_size;
331     int i;
332
333     s->sc.edge_emu_buffer =
334     s->me.scratchpad   =
335     s->me.temp         =
336     s->sc.rd_scratchpad   =
337     s->sc.b_scratchpad    =
338     s->sc.obmc_scratchpad = NULL;
339
340     if (s->encoding) {
341         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
342                           ME_MAP_SIZE * sizeof(uint32_t), fail)
343         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
344                           ME_MAP_SIZE * sizeof(uint32_t), fail)
345         if (s->noise_reduction) {
346             FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
347                               2 * 64 * sizeof(int), fail)
348         }
349     }
350     FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
351     s->block = s->blocks[0];
352
353     for (i = 0; i < 12; i++) {
354         s->pblocks[i] = &s->block[i];
355     }
356     if (s->avctx->codec_tag == AV_RL32("VCR2")) {
357         // exchange uv
358         int16_t (*tmp)[64];
359         tmp           = s->pblocks[4];
360         s->pblocks[4] = s->pblocks[5];
361         s->pblocks[5] = tmp;
362     }
363
364     if (s->out_format == FMT_H263) {
365         /* ac values */
366         FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
367                           yc_size * sizeof(int16_t) * 16, fail);
368         s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
369         s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
370         s->ac_val[2] = s->ac_val[1] + c_size;
371     }
372
373     return 0;
374 fail:
375     return -1; // free() through ff_mpv_common_end()
376 }
377
378 static void free_duplicate_context(MpegEncContext *s)
379 {
380     if (!s)
381         return;
382
383     av_freep(&s->sc.edge_emu_buffer);
384     av_freep(&s->me.scratchpad);
385     s->me.temp =
386     s->sc.rd_scratchpad =
387     s->sc.b_scratchpad =
388     s->sc.obmc_scratchpad = NULL;
389
390     av_freep(&s->dct_error_sum);
391     av_freep(&s->me.map);
392     av_freep(&s->me.score_map);
393     av_freep(&s->blocks);
394     av_freep(&s->ac_val_base);
395     s->block = NULL;
396 }
397
398 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
399 {
400 #define COPY(a) bak->a = src->a
401     COPY(sc.edge_emu_buffer);
402     COPY(me.scratchpad);
403     COPY(me.temp);
404     COPY(sc.rd_scratchpad);
405     COPY(sc.b_scratchpad);
406     COPY(sc.obmc_scratchpad);
407     COPY(me.map);
408     COPY(me.score_map);
409     COPY(blocks);
410     COPY(block);
411     COPY(start_mb_y);
412     COPY(end_mb_y);
413     COPY(me.map_generation);
414     COPY(pb);
415     COPY(dct_error_sum);
416     COPY(dct_count[0]);
417     COPY(dct_count[1]);
418     COPY(ac_val_base);
419     COPY(ac_val[0]);
420     COPY(ac_val[1]);
421     COPY(ac_val[2]);
422 #undef COPY
423 }
424
425 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
426 {
427     MpegEncContext bak;
428     int i, ret;
429     // FIXME copy only needed parts
430     // START_TIMER
431     backup_duplicate_context(&bak, dst);
432     memcpy(dst, src, sizeof(MpegEncContext));
433     backup_duplicate_context(dst, &bak);
434     for (i = 0; i < 12; i++) {
435         dst->pblocks[i] = &dst->block[i];
436     }
437     if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
438         // exchange uv
439         int16_t (*tmp)[64];
440         tmp             = dst->pblocks[4];
441         dst->pblocks[4] = dst->pblocks[5];
442         dst->pblocks[5] = tmp;
443     }
444     if (!dst->sc.edge_emu_buffer &&
445         (ret = ff_mpeg_framesize_alloc(dst->avctx, &dst->me,
446                                        &dst->sc, dst->linesize)) < 0) {
447         av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
448                "scratch buffers.\n");
449         return ret;
450     }
451     // STOP_TIMER("update_duplicate_context")
452     // about 10k cycles / 0.01 sec for  1000frames on 1ghz with 2 threads
453     return 0;
454 }
455
456 int ff_mpeg_update_thread_context(AVCodecContext *dst,
457                                   const AVCodecContext *src)
458 {
459     int i, ret;
460     MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
461
462     if (dst == src || !s1->context_initialized)
463         return 0;
464
465     // FIXME can parameters change on I-frames?
466     // in that case dst may need a reinit
467     if (!s->context_initialized) {
468         int err;
469         memcpy(s, s1, sizeof(MpegEncContext));
470
471         s->avctx                 = dst;
472         s->bitstream_buffer      = NULL;
473         s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
474
475         ff_mpv_idct_init(s);
476         if ((err = ff_mpv_common_init(s)) < 0)
477             return err;
478     }
479
480     if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
481         int err;
482         s->context_reinit = 0;
483         s->height = s1->height;
484         s->width  = s1->width;
485         if ((err = ff_mpv_common_frame_size_change(s)) < 0)
486             return err;
487     }
488
489     s->avctx->coded_height  = s1->avctx->coded_height;
490     s->avctx->coded_width   = s1->avctx->coded_width;
491     s->avctx->width         = s1->avctx->width;
492     s->avctx->height        = s1->avctx->height;
493
494     s->coded_picture_number = s1->coded_picture_number;
495     s->picture_number       = s1->picture_number;
496
497     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
498         ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
499         if (s1->picture[i].f->buf[0] &&
500             (ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
501             return ret;
502     }
503
504 #define UPDATE_PICTURE(pic)\
505 do {\
506     ff_mpeg_unref_picture(s->avctx, &s->pic);\
507     if (s1->pic.f->buf[0])\
508         ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
509     else\
510         ret = ff_update_picture_tables(&s->pic, &s1->pic);\
511     if (ret < 0)\
512         return ret;\
513 } while (0)
514
515     UPDATE_PICTURE(current_picture);
516     UPDATE_PICTURE(last_picture);
517     UPDATE_PICTURE(next_picture);
518
519 #define REBASE_PICTURE(pic, new_ctx, old_ctx)                                 \
520     ((pic && pic >= old_ctx->picture &&                                       \
521       pic < old_ctx->picture + MAX_PICTURE_COUNT) ?                           \
522         &new_ctx->picture[pic - old_ctx->picture] : NULL)
523
524     s->last_picture_ptr    = REBASE_PICTURE(s1->last_picture_ptr,    s, s1);
525     s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
526     s->next_picture_ptr    = REBASE_PICTURE(s1->next_picture_ptr,    s, s1);
527
528     // Error/bug resilience
529     s->next_p_frame_damaged = s1->next_p_frame_damaged;
530     s->workaround_bugs      = s1->workaround_bugs;
531
532     // MPEG-4 timing info
533     memcpy(&s->last_time_base, &s1->last_time_base,
534            (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
535            (char *) &s1->last_time_base);
536
537     // B-frame info
538     s->max_b_frames = s1->max_b_frames;
539     s->low_delay    = s1->low_delay;
540     s->droppable    = s1->droppable;
541
542     // DivX handling (doesn't work)
543     s->divx_packed  = s1->divx_packed;
544
545     if (s1->bitstream_buffer) {
546         if (s1->bitstream_buffer_size +
547             AV_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
548             av_fast_malloc(&s->bitstream_buffer,
549                            &s->allocated_bitstream_buffer_size,
550                            s1->allocated_bitstream_buffer_size);
551         s->bitstream_buffer_size = s1->bitstream_buffer_size;
552         memcpy(s->bitstream_buffer, s1->bitstream_buffer,
553                s1->bitstream_buffer_size);
554         memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
555                AV_INPUT_BUFFER_PADDING_SIZE);
556     }
557
558     // linesize-dependent scratch buffer allocation
559     if (!s->sc.edge_emu_buffer)
560         if (s1->linesize) {
561             if (ff_mpeg_framesize_alloc(s->avctx, &s->me,
562                                         &s->sc, s1->linesize) < 0) {
563                 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
564                        "scratch buffers.\n");
565                 return AVERROR(ENOMEM);
566             }
567         } else {
568             av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
569                    "be allocated due to unknown size.\n");
570             return AVERROR_BUG;
571         }
572
573     // MPEG-2/interlacing info
574     memcpy(&s->progressive_sequence, &s1->progressive_sequence,
575            (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
576
577     if (!s1->first_field) {
578         s->last_pict_type = s1->pict_type;
579         if (s1->current_picture_ptr)
580             s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
581     }
582
583     return 0;
584 }
585
586 /**
587  * Set the given MpegEncContext to common defaults
588  * (same for encoding and decoding).
589  * The changed fields will not depend upon the
590  * prior state of the MpegEncContext.
591  */
592 void ff_mpv_common_defaults(MpegEncContext *s)
593 {
594     s->y_dc_scale_table      =
595     s->c_dc_scale_table      = ff_mpeg1_dc_scale_table;
596     s->chroma_qscale_table   = ff_default_chroma_qscale_table;
597     s->progressive_frame     = 1;
598     s->progressive_sequence  = 1;
599     s->picture_structure     = PICT_FRAME;
600
601     s->coded_picture_number  = 0;
602     s->picture_number        = 0;
603
604     s->f_code                = 1;
605     s->b_code                = 1;
606
607     s->slice_context_count   = 1;
608 }
609
610 /**
611  * Set the given MpegEncContext to defaults for decoding.
612  * the changed fields will not depend upon
613  * the prior state of the MpegEncContext.
614  */
615 void ff_mpv_decode_defaults(MpegEncContext *s)
616 {
617     ff_mpv_common_defaults(s);
618 }
619
620 /**
621  * Initialize and allocates MpegEncContext fields dependent on the resolution.
622  */
623 static int init_context_frame(MpegEncContext *s)
624 {
625     int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
626
627     s->mb_width   = (s->width + 15) / 16;
628     s->mb_stride  = s->mb_width + 1;
629     s->b8_stride  = s->mb_width * 2 + 1;
630     mb_array_size = s->mb_height * s->mb_stride;
631     mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
632
633     /* set default edge pos, will be overridden
634      * in decode_header if needed */
635     s->h_edge_pos = s->mb_width * 16;
636     s->v_edge_pos = s->mb_height * 16;
637
638     s->mb_num     = s->mb_width * s->mb_height;
639
640     s->block_wrap[0] =
641     s->block_wrap[1] =
642     s->block_wrap[2] =
643     s->block_wrap[3] = s->b8_stride;
644     s->block_wrap[4] =
645     s->block_wrap[5] = s->mb_stride;
646
647     y_size  = s->b8_stride * (2 * s->mb_height + 1);
648     c_size  = s->mb_stride * (s->mb_height + 1);
649     yc_size = y_size + 2   * c_size;
650
651     FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
652                       fail); // error resilience code looks cleaner with this
653     for (y = 0; y < s->mb_height; y++)
654         for (x = 0; x < s->mb_width; x++)
655             s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
656
657     s->mb_index2xy[s->mb_height * s->mb_width] =
658         (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
659
660     if (s->encoding) {
661         /* Allocate MV tables */
662         FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
663                           mv_table_size * 2 * sizeof(int16_t), fail);
664         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
665                           mv_table_size * 2 * sizeof(int16_t), fail);
666         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
667                           mv_table_size * 2 * sizeof(int16_t), fail);
668         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
669                           mv_table_size * 2 * sizeof(int16_t), fail);
670         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
671                           mv_table_size * 2 * sizeof(int16_t), fail);
672         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
673                           mv_table_size * 2 * sizeof(int16_t), fail);
674         s->p_mv_table            = s->p_mv_table_base + s->mb_stride + 1;
675         s->b_forw_mv_table       = s->b_forw_mv_table_base + s->mb_stride + 1;
676         s->b_back_mv_table       = s->b_back_mv_table_base + s->mb_stride + 1;
677         s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
678                                    s->mb_stride + 1;
679         s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
680                                    s->mb_stride + 1;
681         s->b_direct_mv_table     = s->b_direct_mv_table_base + s->mb_stride + 1;
682
683         /* Allocate MB type table */
684         FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
685                           sizeof(uint16_t), fail); // needed for encoding
686
687         FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
688                           sizeof(int), fail);
689
690         FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
691                          mb_array_size * sizeof(float), fail);
692         FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
693                          mb_array_size * sizeof(float), fail);
694
695     }
696
697     if (s->codec_id == AV_CODEC_ID_MPEG4 ||
698         (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
699         /* interlaced direct mode decoding tables */
700         for (i = 0; i < 2; i++) {
701             int j, k;
702             for (j = 0; j < 2; j++) {
703                 for (k = 0; k < 2; k++) {
704                     FF_ALLOCZ_OR_GOTO(s->avctx,
705                                       s->b_field_mv_table_base[i][j][k],
706                                       mv_table_size * 2 * sizeof(int16_t),
707                                       fail);
708                     s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
709                                                    s->mb_stride + 1;
710                 }
711                 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
712                                   mb_array_size * 2 * sizeof(uint8_t), fail);
713                 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
714                                   mv_table_size * 2 * sizeof(int16_t), fail);
715                 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
716                                             + s->mb_stride + 1;
717             }
718             FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
719                               mb_array_size * 2 * sizeof(uint8_t), fail);
720         }
721     }
722     if (s->out_format == FMT_H263) {
723         /* cbp values */
724         FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
725         s->coded_block = s->coded_block_base + s->b8_stride + 1;
726
727         /* cbp, ac_pred, pred_dir */
728         FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
729                           mb_array_size * sizeof(uint8_t), fail);
730         FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
731                           mb_array_size * sizeof(uint8_t), fail);
732     }
733
734     if (s->h263_pred || s->h263_plus || !s->encoding) {
735         /* dc values */
736         // MN: we need these for  error resilience of intra-frames
737         FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
738                           yc_size * sizeof(int16_t), fail);
739         s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
740         s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
741         s->dc_val[2] = s->dc_val[1] + c_size;
742         for (i = 0; i < yc_size; i++)
743             s->dc_val_base[i] = 1024;
744     }
745
746     /* which mb is a intra block */
747     FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
748     memset(s->mbintra_table, 1, mb_array_size);
749
750     /* init macroblock skip table */
751     FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
752     // Note the + 1 is for  a quicker MPEG-4 slice_end detection
753
754     return ff_mpeg_er_init(s);
755 fail:
756     return AVERROR(ENOMEM);
757 }
758
759 /**
760  * init common structure for both encoder and decoder.
761  * this assumes that some variables like width/height are already set
762  */
763 av_cold int ff_mpv_common_init(MpegEncContext *s)
764 {
765     int i;
766     int nb_slices = (HAVE_THREADS &&
767                      s->avctx->active_thread_type & FF_THREAD_SLICE) ?
768                     s->avctx->thread_count : 1;
769
770     if (s->encoding && s->avctx->slices)
771         nb_slices = s->avctx->slices;
772
773     if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
774         s->mb_height = (s->height + 31) / 32 * 2;
775     else
776         s->mb_height = (s->height + 15) / 16;
777
778     if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
779         av_log(s->avctx, AV_LOG_ERROR,
780                "decoding to AV_PIX_FMT_NONE is not supported.\n");
781         return -1;
782     }
783
784     if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
785         int max_slices;
786         if (s->mb_height)
787             max_slices = FFMIN(MAX_THREADS, s->mb_height);
788         else
789             max_slices = MAX_THREADS;
790         av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
791                " reducing to %d\n", nb_slices, max_slices);
792         nb_slices = max_slices;
793     }
794
795     if ((s->width || s->height) &&
796         av_image_check_size(s->width, s->height, 0, s->avctx))
797         return -1;
798
799     dct_init(s);
800
801     /* set chroma shifts */
802     av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
803                                      &s->chroma_x_shift,
804                                      &s->chroma_y_shift);
805
806     /* convert fourcc to upper case */
807     s->codec_tag          = avpriv_toupper4(s->avctx->codec_tag);
808
809     FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
810                       MAX_PICTURE_COUNT * sizeof(Picture), fail);
811     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
812         s->picture[i].f = av_frame_alloc();
813         if (!s->picture[i].f)
814             goto fail;
815     }
816     memset(&s->next_picture, 0, sizeof(s->next_picture));
817     memset(&s->last_picture, 0, sizeof(s->last_picture));
818     memset(&s->current_picture, 0, sizeof(s->current_picture));
819     memset(&s->new_picture, 0, sizeof(s->new_picture));
820     s->next_picture.f = av_frame_alloc();
821     if (!s->next_picture.f)
822         goto fail;
823     s->last_picture.f = av_frame_alloc();
824     if (!s->last_picture.f)
825         goto fail;
826     s->current_picture.f = av_frame_alloc();
827     if (!s->current_picture.f)
828         goto fail;
829     s->new_picture.f = av_frame_alloc();
830     if (!s->new_picture.f)
831         goto fail;
832
833     if (s->width && s->height) {
834         if (init_context_frame(s))
835             goto fail;
836
837         s->parse_context.state = -1;
838     }
839
840     s->context_initialized = 1;
841     s->thread_context[0]   = s;
842
843     if (s->width && s->height) {
844         if (nb_slices > 1) {
845             for (i = 1; i < nb_slices; i++) {
846                 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
847                 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
848             }
849
850             for (i = 0; i < nb_slices; i++) {
851                 if (init_duplicate_context(s->thread_context[i]) < 0)
852                     goto fail;
853                 s->thread_context[i]->start_mb_y =
854                     (s->mb_height * (i) + nb_slices / 2) / nb_slices;
855                 s->thread_context[i]->end_mb_y   =
856                     (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
857             }
858         } else {
859             if (init_duplicate_context(s) < 0)
860                 goto fail;
861             s->start_mb_y = 0;
862             s->end_mb_y   = s->mb_height;
863         }
864         s->slice_context_count = nb_slices;
865     }
866
867     return 0;
868  fail:
869     ff_mpv_common_end(s);
870     return -1;
871 }
872
873 /**
874  * Frees and resets MpegEncContext fields depending on the resolution.
875  * Is used during resolution changes to avoid a full reinitialization of the
876  * codec.
877  */
878 static void free_context_frame(MpegEncContext *s)
879 {
880     int i, j, k;
881
882     av_freep(&s->mb_type);
883     av_freep(&s->p_mv_table_base);
884     av_freep(&s->b_forw_mv_table_base);
885     av_freep(&s->b_back_mv_table_base);
886     av_freep(&s->b_bidir_forw_mv_table_base);
887     av_freep(&s->b_bidir_back_mv_table_base);
888     av_freep(&s->b_direct_mv_table_base);
889     s->p_mv_table            = NULL;
890     s->b_forw_mv_table       = NULL;
891     s->b_back_mv_table       = NULL;
892     s->b_bidir_forw_mv_table = NULL;
893     s->b_bidir_back_mv_table = NULL;
894     s->b_direct_mv_table     = NULL;
895     for (i = 0; i < 2; i++) {
896         for (j = 0; j < 2; j++) {
897             for (k = 0; k < 2; k++) {
898                 av_freep(&s->b_field_mv_table_base[i][j][k]);
899                 s->b_field_mv_table[i][j][k] = NULL;
900             }
901             av_freep(&s->b_field_select_table[i][j]);
902             av_freep(&s->p_field_mv_table_base[i][j]);
903             s->p_field_mv_table[i][j] = NULL;
904         }
905         av_freep(&s->p_field_select_table[i]);
906     }
907
908     av_freep(&s->dc_val_base);
909     av_freep(&s->coded_block_base);
910     av_freep(&s->mbintra_table);
911     av_freep(&s->cbp_table);
912     av_freep(&s->pred_dir_table);
913
914     av_freep(&s->mbskip_table);
915
916     av_freep(&s->er.error_status_table);
917     av_freep(&s->er.er_temp_buffer);
918     av_freep(&s->mb_index2xy);
919     av_freep(&s->lambda_table);
920     av_freep(&s->cplx_tab);
921     av_freep(&s->bits_tab);
922
923     s->linesize = s->uvlinesize = 0;
924 }
925
926 int ff_mpv_common_frame_size_change(MpegEncContext *s)
927 {
928     int i, err = 0;
929
930     if (s->slice_context_count > 1) {
931         for (i = 0; i < s->slice_context_count; i++) {
932             free_duplicate_context(s->thread_context[i]);
933         }
934         for (i = 1; i < s->slice_context_count; i++) {
935             av_freep(&s->thread_context[i]);
936         }
937     } else
938         free_duplicate_context(s);
939
940     free_context_frame(s);
941
942     if (s->picture)
943         for (i = 0; i < MAX_PICTURE_COUNT; i++) {
944                 s->picture[i].needs_realloc = 1;
945         }
946
947     s->last_picture_ptr         =
948     s->next_picture_ptr         =
949     s->current_picture_ptr      = NULL;
950
951     // init
952     if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
953         s->mb_height = (s->height + 31) / 32 * 2;
954     else
955         s->mb_height = (s->height + 15) / 16;
956
957     if ((s->width || s->height) &&
958         (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
959         goto fail;
960
961     if ((err = init_context_frame(s)))
962         goto fail;
963
964     s->thread_context[0]   = s;
965
966     if (s->width && s->height) {
967         int nb_slices = s->slice_context_count;
968         if (nb_slices > 1) {
969             for (i = 1; i < nb_slices; i++) {
970                 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
971                 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
972             }
973
974             for (i = 0; i < nb_slices; i++) {
975                 if ((err = init_duplicate_context(s->thread_context[i])) < 0)
976                     goto fail;
977                 s->thread_context[i]->start_mb_y =
978                     (s->mb_height * (i) + nb_slices / 2) / nb_slices;
979                 s->thread_context[i]->end_mb_y   =
980                     (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
981             }
982         } else {
983             if (init_duplicate_context(s) < 0)
984                 goto fail;
985             s->start_mb_y = 0;
986             s->end_mb_y   = s->mb_height;
987         }
988         s->slice_context_count = nb_slices;
989     }
990
991     return 0;
992  fail:
993     ff_mpv_common_end(s);
994     return err;
995 }
996
997 /* init common structure for both encoder and decoder */
998 void ff_mpv_common_end(MpegEncContext *s)
999 {
1000     int i;
1001
1002     if (s->slice_context_count > 1) {
1003         for (i = 0; i < s->slice_context_count; i++) {
1004             free_duplicate_context(s->thread_context[i]);
1005         }
1006         for (i = 1; i < s->slice_context_count; i++) {
1007             av_freep(&s->thread_context[i]);
1008         }
1009         s->slice_context_count = 1;
1010     } else free_duplicate_context(s);
1011
1012     av_freep(&s->parse_context.buffer);
1013     s->parse_context.buffer_size = 0;
1014
1015     av_freep(&s->bitstream_buffer);
1016     s->allocated_bitstream_buffer_size = 0;
1017
1018     if (s->picture) {
1019         for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1020             ff_free_picture_tables(&s->picture[i]);
1021             ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1022             av_frame_free(&s->picture[i].f);
1023         }
1024     }
1025     av_freep(&s->picture);
1026     ff_free_picture_tables(&s->last_picture);
1027     ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1028     av_frame_free(&s->last_picture.f);
1029     ff_free_picture_tables(&s->current_picture);
1030     ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1031     av_frame_free(&s->current_picture.f);
1032     ff_free_picture_tables(&s->next_picture);
1033     ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1034     av_frame_free(&s->next_picture.f);
1035     ff_free_picture_tables(&s->new_picture);
1036     ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1037     av_frame_free(&s->new_picture.f);
1038
1039     free_context_frame(s);
1040
1041     s->context_initialized      = 0;
1042     s->last_picture_ptr         =
1043     s->next_picture_ptr         =
1044     s->current_picture_ptr      = NULL;
1045     s->linesize = s->uvlinesize = 0;
1046 }
1047
1048 /**
1049  * generic function called after decoding
1050  * the header and before a frame is decoded.
1051  */
1052 int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1053 {
1054     int i, ret;
1055     Picture *pic;
1056     s->mb_skipped = 0;
1057
1058     /* mark & release old frames */
1059     if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1060         s->last_picture_ptr != s->next_picture_ptr &&
1061         s->last_picture_ptr->f->buf[0]) {
1062         ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1063     }
1064
1065     /* release forgotten pictures */
1066     /* if (MPEG-124 / H.263) */
1067     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1068         if (&s->picture[i] != s->last_picture_ptr &&
1069             &s->picture[i] != s->next_picture_ptr &&
1070             s->picture[i].reference && !s->picture[i].needs_realloc) {
1071             ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1072         }
1073     }
1074
1075     ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1076
1077     /* release non reference frames */
1078     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1079         if (!s->picture[i].reference)
1080             ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1081     }
1082
1083     if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1084         // we already have a unused image
1085         // (maybe it was set before reading the header)
1086         pic = s->current_picture_ptr;
1087     } else {
1088         i   = ff_find_unused_picture(s->avctx, s->picture, 0);
1089         if (i < 0) {
1090             av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1091             return i;
1092         }
1093         pic = &s->picture[i];
1094     }
1095
1096     pic->reference = 0;
1097     if (!s->droppable) {
1098         if (s->pict_type != AV_PICTURE_TYPE_B)
1099             pic->reference = 3;
1100     }
1101
1102     pic->f->coded_picture_number = s->coded_picture_number++;
1103
1104     if (alloc_picture(s, pic, 0) < 0)
1105         return -1;
1106
1107     s->current_picture_ptr = pic;
1108     // FIXME use only the vars from current_pic
1109     s->current_picture_ptr->f->top_field_first = s->top_field_first;
1110     if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1111         s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1112         if (s->picture_structure != PICT_FRAME)
1113             s->current_picture_ptr->f->top_field_first =
1114                 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1115     }
1116     s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1117                                                  !s->progressive_sequence;
1118     s->current_picture_ptr->field_picture      =  s->picture_structure != PICT_FRAME;
1119
1120     s->current_picture_ptr->f->pict_type = s->pict_type;
1121     // if (s->avctx->flags && AV_CODEC_FLAG_QSCALE)
1122     //     s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1123     s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1124
1125     if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1126                                    s->current_picture_ptr)) < 0)
1127         return ret;
1128
1129     if (s->pict_type != AV_PICTURE_TYPE_B) {
1130         s->last_picture_ptr = s->next_picture_ptr;
1131         if (!s->droppable)
1132             s->next_picture_ptr = s->current_picture_ptr;
1133     }
1134     ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1135             s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1136             s->last_picture_ptr    ? s->last_picture_ptr->f->data[0]    : NULL,
1137             s->next_picture_ptr    ? s->next_picture_ptr->f->data[0]    : NULL,
1138             s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1139             s->pict_type, s->droppable);
1140
1141     if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1142         (s->pict_type != AV_PICTURE_TYPE_I ||
1143          s->picture_structure != PICT_FRAME)) {
1144         int h_chroma_shift, v_chroma_shift;
1145         av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1146                                          &h_chroma_shift, &v_chroma_shift);
1147         if (s->pict_type != AV_PICTURE_TYPE_I)
1148             av_log(avctx, AV_LOG_ERROR,
1149                    "warning: first frame is no keyframe\n");
1150         else if (s->picture_structure != PICT_FRAME)
1151             av_log(avctx, AV_LOG_INFO,
1152                    "allocate dummy last picture for field based first keyframe\n");
1153
1154         /* Allocate a dummy frame */
1155         i = ff_find_unused_picture(s->avctx, s->picture, 0);
1156         if (i < 0) {
1157             av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1158             return i;
1159         }
1160         s->last_picture_ptr = &s->picture[i];
1161
1162         s->last_picture_ptr->reference   = 3;
1163         s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1164
1165         if (alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1166             s->last_picture_ptr = NULL;
1167             return -1;
1168         }
1169
1170         memset(s->last_picture_ptr->f->data[0], 0,
1171                avctx->height * s->last_picture_ptr->f->linesize[0]);
1172         memset(s->last_picture_ptr->f->data[1], 0x80,
1173                (avctx->height >> v_chroma_shift) *
1174                s->last_picture_ptr->f->linesize[1]);
1175         memset(s->last_picture_ptr->f->data[2], 0x80,
1176                (avctx->height >> v_chroma_shift) *
1177                s->last_picture_ptr->f->linesize[2]);
1178
1179         ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1180         ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1181     }
1182     if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1183         s->pict_type == AV_PICTURE_TYPE_B) {
1184         /* Allocate a dummy frame */
1185         i = ff_find_unused_picture(s->avctx, s->picture, 0);
1186         if (i < 0) {
1187             av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1188             return i;
1189         }
1190         s->next_picture_ptr = &s->picture[i];
1191
1192         s->next_picture_ptr->reference   = 3;
1193         s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1194
1195         if (alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1196             s->next_picture_ptr = NULL;
1197             return -1;
1198         }
1199         ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1200         ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1201     }
1202
1203     if (s->last_picture_ptr) {
1204         ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1205         if (s->last_picture_ptr->f->buf[0] &&
1206             (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1207                                        s->last_picture_ptr)) < 0)
1208             return ret;
1209     }
1210     if (s->next_picture_ptr) {
1211         ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1212         if (s->next_picture_ptr->f->buf[0] &&
1213             (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1214                                        s->next_picture_ptr)) < 0)
1215             return ret;
1216     }
1217
1218     if (s->pict_type != AV_PICTURE_TYPE_I &&
1219         !(s->last_picture_ptr && s->last_picture_ptr->f->buf[0])) {
1220         av_log(s, AV_LOG_ERROR,
1221                "Non-reference picture received and no reference available\n");
1222         return AVERROR_INVALIDDATA;
1223     }
1224
1225     if (s->picture_structure!= PICT_FRAME) {
1226         int i;
1227         for (i = 0; i < 4; i++) {
1228             if (s->picture_structure == PICT_BOTTOM_FIELD) {
1229                 s->current_picture.f->data[i] +=
1230                     s->current_picture.f->linesize[i];
1231             }
1232             s->current_picture.f->linesize[i] *= 2;
1233             s->last_picture.f->linesize[i]    *= 2;
1234             s->next_picture.f->linesize[i]    *= 2;
1235         }
1236     }
1237
1238     /* set dequantizer, we can't do it during init as
1239      * it might change for MPEG-4 and we can't do it in the header
1240      * decode as init is not called for MPEG-4 there yet */
1241     if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1242         s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1243         s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1244     } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1245         s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1246         s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1247     } else {
1248         s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1249         s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1250     }
1251
1252 #if FF_API_XVMC
1253 FF_DISABLE_DEPRECATION_WARNINGS
1254     if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1255         return ff_xvmc_field_start(s, avctx);
1256 FF_ENABLE_DEPRECATION_WARNINGS
1257 #endif /* FF_API_XVMC */
1258
1259     return 0;
1260 }
1261
1262 /* called after a frame has been decoded. */
1263 void ff_mpv_frame_end(MpegEncContext *s)
1264 {
1265 #if FF_API_XVMC
1266 FF_DISABLE_DEPRECATION_WARNINGS
1267     /* redraw edges for the frame if decoding didn't complete */
1268     // just to make sure that all data is rendered.
1269     if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1270         ff_xvmc_field_end(s);
1271     } else
1272 FF_ENABLE_DEPRECATION_WARNINGS
1273 #endif /* FF_API_XVMC */
1274
1275     emms_c();
1276
1277     if (s->current_picture.reference)
1278         ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1279 }
1280
1281 /**
1282  * Print debugging info for the given picture.
1283  */
1284 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1285 {
1286     AVFrame *pict;
1287     if (s->avctx->hwaccel || !p || !p->mb_type)
1288         return;
1289     pict = p->f;
1290
1291     if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1292         int x,y;
1293
1294         av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1295         switch (pict->pict_type) {
1296         case AV_PICTURE_TYPE_I:
1297             av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1298             break;
1299         case AV_PICTURE_TYPE_P:
1300             av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1301             break;
1302         case AV_PICTURE_TYPE_B:
1303             av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1304             break;
1305         case AV_PICTURE_TYPE_S:
1306             av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1307             break;
1308         case AV_PICTURE_TYPE_SI:
1309             av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1310             break;
1311         case AV_PICTURE_TYPE_SP:
1312             av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1313             break;
1314         }
1315         for (y = 0; y < s->mb_height; y++) {
1316             for (x = 0; x < s->mb_width; x++) {
1317                 if (s->avctx->debug & FF_DEBUG_SKIP) {
1318                     int count = s->mbskip_table[x + y * s->mb_stride];
1319                     if (count > 9)
1320                         count = 9;
1321                     av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1322                 }
1323                 if (s->avctx->debug & FF_DEBUG_QP) {
1324                     av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1325                            p->qscale_table[x + y * s->mb_stride]);
1326                 }
1327                 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1328                     int mb_type = p->mb_type[x + y * s->mb_stride];
1329                     // Type & MV direction
1330                     if (IS_PCM(mb_type))
1331                         av_log(s->avctx, AV_LOG_DEBUG, "P");
1332                     else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1333                         av_log(s->avctx, AV_LOG_DEBUG, "A");
1334                     else if (IS_INTRA4x4(mb_type))
1335                         av_log(s->avctx, AV_LOG_DEBUG, "i");
1336                     else if (IS_INTRA16x16(mb_type))
1337                         av_log(s->avctx, AV_LOG_DEBUG, "I");
1338                     else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1339                         av_log(s->avctx, AV_LOG_DEBUG, "d");
1340                     else if (IS_DIRECT(mb_type))
1341                         av_log(s->avctx, AV_LOG_DEBUG, "D");
1342                     else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1343                         av_log(s->avctx, AV_LOG_DEBUG, "g");
1344                     else if (IS_GMC(mb_type))
1345                         av_log(s->avctx, AV_LOG_DEBUG, "G");
1346                     else if (IS_SKIP(mb_type))
1347                         av_log(s->avctx, AV_LOG_DEBUG, "S");
1348                     else if (!USES_LIST(mb_type, 1))
1349                         av_log(s->avctx, AV_LOG_DEBUG, ">");
1350                     else if (!USES_LIST(mb_type, 0))
1351                         av_log(s->avctx, AV_LOG_DEBUG, "<");
1352                     else {
1353                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1354                         av_log(s->avctx, AV_LOG_DEBUG, "X");
1355                     }
1356
1357                     // segmentation
1358                     if (IS_8X8(mb_type))
1359                         av_log(s->avctx, AV_LOG_DEBUG, "+");
1360                     else if (IS_16X8(mb_type))
1361                         av_log(s->avctx, AV_LOG_DEBUG, "-");
1362                     else if (IS_8X16(mb_type))
1363                         av_log(s->avctx, AV_LOG_DEBUG, "|");
1364                     else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1365                         av_log(s->avctx, AV_LOG_DEBUG, " ");
1366                     else
1367                         av_log(s->avctx, AV_LOG_DEBUG, "?");
1368
1369
1370                     if (IS_INTERLACED(mb_type))
1371                         av_log(s->avctx, AV_LOG_DEBUG, "=");
1372                     else
1373                         av_log(s->avctx, AV_LOG_DEBUG, " ");
1374                 }
1375             }
1376             av_log(s->avctx, AV_LOG_DEBUG, "\n");
1377         }
1378     }
1379 }
1380
1381 /**
1382  * find the lowest MB row referenced in the MVs
1383  */
1384 static int lowest_referenced_row(MpegEncContext *s, int dir)
1385 {
1386     int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1387     int my, off, i, mvs;
1388
1389     if (s->picture_structure != PICT_FRAME || s->mcsel)
1390         goto unhandled;
1391
1392     switch (s->mv_type) {
1393         case MV_TYPE_16X16:
1394             mvs = 1;
1395             break;
1396         case MV_TYPE_16X8:
1397             mvs = 2;
1398             break;
1399         case MV_TYPE_8X8:
1400             mvs = 4;
1401             break;
1402         default:
1403             goto unhandled;
1404     }
1405
1406     for (i = 0; i < mvs; i++) {
1407         my = s->mv[dir][i][1]<<qpel_shift;
1408         my_max = FFMAX(my_max, my);
1409         my_min = FFMIN(my_min, my);
1410     }
1411
1412     off = (FFMAX(-my_min, my_max) + 63) >> 6;
1413
1414     return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1415 unhandled:
1416     return s->mb_height-1;
1417 }
1418
1419 /* put block[] to dest[] */
1420 static inline void put_dct(MpegEncContext *s,
1421                            int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1422 {
1423     s->dct_unquantize_intra(s, block, i, qscale);
1424     s->idsp.idct_put(dest, line_size, block);
1425 }
1426
1427 /* add block[] to dest[] */
1428 static inline void add_dct(MpegEncContext *s,
1429                            int16_t *block, int i, uint8_t *dest, int line_size)
1430 {
1431     if (s->block_last_index[i] >= 0) {
1432         s->idsp.idct_add(dest, line_size, block);
1433     }
1434 }
1435
1436 static inline void add_dequant_dct(MpegEncContext *s,
1437                            int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1438 {
1439     if (s->block_last_index[i] >= 0) {
1440         s->dct_unquantize_inter(s, block, i, qscale);
1441
1442         s->idsp.idct_add(dest, line_size, block);
1443     }
1444 }
1445
1446 /**
1447  * Clean dc, ac, coded_block for the current non-intra MB.
1448  */
1449 void ff_clean_intra_table_entries(MpegEncContext *s)
1450 {
1451     int wrap = s->b8_stride;
1452     int xy = s->block_index[0];
1453
1454     s->dc_val[0][xy           ] =
1455     s->dc_val[0][xy + 1       ] =
1456     s->dc_val[0][xy     + wrap] =
1457     s->dc_val[0][xy + 1 + wrap] = 1024;
1458     /* ac pred */
1459     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
1460     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1461     if (s->msmpeg4_version>=3) {
1462         s->coded_block[xy           ] =
1463         s->coded_block[xy + 1       ] =
1464         s->coded_block[xy     + wrap] =
1465         s->coded_block[xy + 1 + wrap] = 0;
1466     }
1467     /* chroma */
1468     wrap = s->mb_stride;
1469     xy = s->mb_x + s->mb_y * wrap;
1470     s->dc_val[1][xy] =
1471     s->dc_val[2][xy] = 1024;
1472     /* ac pred */
1473     memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1474     memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1475
1476     s->mbintra_table[xy]= 0;
1477 }
1478
1479 /* generic function called after a macroblock has been parsed by the
1480    decoder or after it has been encoded by the encoder.
1481
1482    Important variables used:
1483    s->mb_intra : true if intra macroblock
1484    s->mv_dir   : motion vector direction
1485    s->mv_type  : motion vector type
1486    s->mv       : motion vector
1487    s->interlaced_dct : true if interlaced dct used (mpeg2)
1488  */
1489 static av_always_inline
1490 void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
1491                             int is_mpeg12)
1492 {
1493     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1494
1495 #if FF_API_XVMC
1496 FF_DISABLE_DEPRECATION_WARNINGS
1497     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1498         ff_xvmc_decode_mb(s);//xvmc uses pblocks
1499         return;
1500     }
1501 FF_ENABLE_DEPRECATION_WARNINGS
1502 #endif /* FF_API_XVMC */
1503
1504     if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1505        /* print DCT coefficients */
1506        int i,j;
1507        av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1508        for(i=0; i<6; i++){
1509            for(j=0; j<64; j++){
1510                av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1511                       block[i][s->idsp.idct_permutation[j]]);
1512            }
1513            av_log(s->avctx, AV_LOG_DEBUG, "\n");
1514        }
1515     }
1516
1517     s->current_picture.qscale_table[mb_xy] = s->qscale;
1518
1519     /* update DC predictors for P macroblocks */
1520     if (!s->mb_intra) {
1521         if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1522             if(s->mbintra_table[mb_xy])
1523                 ff_clean_intra_table_entries(s);
1524         } else {
1525             s->last_dc[0] =
1526             s->last_dc[1] =
1527             s->last_dc[2] = 128 << s->intra_dc_precision;
1528         }
1529     }
1530     else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1531         s->mbintra_table[mb_xy]=1;
1532
1533     if ((s->avctx->flags & AV_CODEC_FLAG_PSNR) ||
1534         !(s->encoding && (s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
1535           s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
1536         uint8_t *dest_y, *dest_cb, *dest_cr;
1537         int dct_linesize, dct_offset;
1538         op_pixels_func (*op_pix)[4];
1539         qpel_mc_func (*op_qpix)[16];
1540         const int linesize   = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
1541         const int uvlinesize = s->current_picture.f->linesize[1];
1542         const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
1543         const int block_size = 8;
1544
1545         /* avoid copy if macroblock skipped in last frame too */
1546         /* skip only during decoding as we might trash the buffers during encoding a bit */
1547         if(!s->encoding){
1548             uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1549
1550             if (s->mb_skipped) {
1551                 s->mb_skipped= 0;
1552                 assert(s->pict_type!=AV_PICTURE_TYPE_I);
1553                 *mbskip_ptr = 1;
1554             } else if(!s->current_picture.reference) {
1555                 *mbskip_ptr = 1;
1556             } else{
1557                 *mbskip_ptr = 0; /* not skipped */
1558             }
1559         }
1560
1561         dct_linesize = linesize << s->interlaced_dct;
1562         dct_offset   = s->interlaced_dct ? linesize : linesize * block_size;
1563
1564         if(readable){
1565             dest_y=  s->dest[0];
1566             dest_cb= s->dest[1];
1567             dest_cr= s->dest[2];
1568         }else{
1569             dest_y = s->sc.b_scratchpad;
1570             dest_cb= s->sc.b_scratchpad+16*linesize;
1571             dest_cr= s->sc.b_scratchpad+32*linesize;
1572         }
1573
1574         if (!s->mb_intra) {
1575             /* motion handling */
1576             /* decoding or more than one mb_type (MC was already done otherwise) */
1577             if(!s->encoding){
1578
1579                 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
1580                     if (s->mv_dir & MV_DIR_FORWARD) {
1581                         ff_thread_await_progress(&s->last_picture_ptr->tf,
1582                                                  lowest_referenced_row(s, 0),
1583                                                  0);
1584                     }
1585                     if (s->mv_dir & MV_DIR_BACKWARD) {
1586                         ff_thread_await_progress(&s->next_picture_ptr->tf,
1587                                                  lowest_referenced_row(s, 1),
1588                                                  0);
1589                     }
1590                 }
1591
1592                 op_qpix= s->me.qpel_put;
1593                 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
1594                     op_pix = s->hdsp.put_pixels_tab;
1595                 }else{
1596                     op_pix = s->hdsp.put_no_rnd_pixels_tab;
1597                 }
1598                 if (s->mv_dir & MV_DIR_FORWARD) {
1599                     ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
1600                     op_pix = s->hdsp.avg_pixels_tab;
1601                     op_qpix= s->me.qpel_avg;
1602                 }
1603                 if (s->mv_dir & MV_DIR_BACKWARD) {
1604                     ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
1605                 }
1606             }
1607
1608             /* skip dequant / idct if we are really late ;) */
1609             if(s->avctx->skip_idct){
1610                 if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
1611                    ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
1612                    || s->avctx->skip_idct >= AVDISCARD_ALL)
1613                     goto skip_idct;
1614             }
1615
1616             /* add dct residue */
1617             if(s->encoding || !(   s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
1618                                 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
1619                 add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
1620                 add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
1621                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
1622                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1623
1624                 if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1625                     if (s->chroma_y_shift){
1626                         add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1627                         add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1628                     }else{
1629                         dct_linesize >>= 1;
1630                         dct_offset >>=1;
1631                         add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
1632                         add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
1633                         add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1634                         add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1635                     }
1636                 }
1637             } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
1638                 add_dct(s, block[0], 0, dest_y                          , dct_linesize);
1639                 add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
1640                 add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
1641                 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
1642
1643                 if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1644                     if(s->chroma_y_shift){//Chroma420
1645                         add_dct(s, block[4], 4, dest_cb, uvlinesize);
1646                         add_dct(s, block[5], 5, dest_cr, uvlinesize);
1647                     }else{
1648                         //chroma422
1649                         dct_linesize = uvlinesize << s->interlaced_dct;
1650                         dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
1651
1652                         add_dct(s, block[4], 4, dest_cb, dct_linesize);
1653                         add_dct(s, block[5], 5, dest_cr, dct_linesize);
1654                         add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
1655                         add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
1656                         if(!s->chroma_x_shift){//Chroma444
1657                             add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
1658                             add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
1659                             add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
1660                             add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
1661                         }
1662                     }
1663                 }//fi gray
1664             }
1665             else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
1666                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
1667             }
1668         } else {
1669             /* dct only in intra block */
1670             if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
1671                 put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
1672                 put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
1673                 put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
1674                 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1675
1676                 if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1677                     if(s->chroma_y_shift){
1678                         put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1679                         put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1680                     }else{
1681                         dct_offset >>=1;
1682                         dct_linesize >>=1;
1683                         put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
1684                         put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
1685                         put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1686                         put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1687                     }
1688                 }
1689             }else{
1690                 s->idsp.idct_put(dest_y,                           dct_linesize, block[0]);
1691                 s->idsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
1692                 s->idsp.idct_put(dest_y + dct_offset,              dct_linesize, block[2]);
1693                 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
1694
1695                 if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1696                     if(s->chroma_y_shift){
1697                         s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
1698                         s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
1699                     }else{
1700
1701                         dct_linesize = uvlinesize << s->interlaced_dct;
1702                         dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
1703
1704                         s->idsp.idct_put(dest_cb,              dct_linesize, block[4]);
1705                         s->idsp.idct_put(dest_cr,              dct_linesize, block[5]);
1706                         s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
1707                         s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
1708                         if(!s->chroma_x_shift){//Chroma444
1709                             s->idsp.idct_put(dest_cb + 8,              dct_linesize, block[8]);
1710                             s->idsp.idct_put(dest_cr + 8,              dct_linesize, block[9]);
1711                             s->idsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
1712                             s->idsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
1713                         }
1714                     }
1715                 }//gray
1716             }
1717         }
1718 skip_idct:
1719         if(!readable){
1720             s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y ,   linesize,16);
1721             s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
1722             s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
1723         }
1724     }
1725 }
1726
1727 void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
1728 {
1729 #if !CONFIG_SMALL
1730     if(s->out_format == FMT_MPEG1) {
1731         mpv_decode_mb_internal(s, block, 1);
1732     } else
1733 #endif
1734         mpv_decode_mb_internal(s, block, 0);
1735 }
1736
1737 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
1738 {
1739     ff_draw_horiz_band(s->avctx, s->current_picture.f,
1740                        s->last_picture.f, y, h, s->picture_structure,
1741                        s->first_field, s->low_delay);
1742 }
1743
1744 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
1745     const int linesize   = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
1746     const int uvlinesize = s->current_picture.f->linesize[1];
1747     const int mb_size= 4;
1748
1749     s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
1750     s->block_index[1]= s->b8_stride*(s->mb_y*2    ) - 1 + s->mb_x*2;
1751     s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
1752     s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
1753     s->block_index[4]= s->mb_stride*(s->mb_y + 1)                + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
1754     s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
1755     //block_index is not used by mpeg2, so it is not affected by chroma_format
1756
1757     s->dest[0] = s->current_picture.f->data[0] + (s->mb_x - 1) * (1 << mb_size);
1758     s->dest[1] = s->current_picture.f->data[1] + (s->mb_x - 1) * (1 << (mb_size - s->chroma_x_shift));
1759     s->dest[2] = s->current_picture.f->data[2] + (s->mb_x - 1) * (1 << (mb_size - s->chroma_x_shift));
1760
1761     if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
1762     {
1763         if(s->picture_structure==PICT_FRAME){
1764         s->dest[0] += s->mb_y *   linesize << mb_size;
1765         s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
1766         s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
1767         }else{
1768             s->dest[0] += (s->mb_y>>1) *   linesize << mb_size;
1769             s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
1770             s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
1771             assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
1772         }
1773     }
1774 }
1775
1776 void ff_mpeg_flush(AVCodecContext *avctx){
1777     int i;
1778     MpegEncContext *s = avctx->priv_data;
1779
1780     if (!s || !s->picture)
1781         return;
1782
1783     for (i = 0; i < MAX_PICTURE_COUNT; i++)
1784         ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1785     s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
1786
1787     ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1788     ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1789     ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1790
1791     s->mb_x= s->mb_y= 0;
1792
1793     s->parse_context.state= -1;
1794     s->parse_context.frame_start_found= 0;
1795     s->parse_context.overread= 0;
1796     s->parse_context.overread_index= 0;
1797     s->parse_context.index= 0;
1798     s->parse_context.last_index= 0;
1799     s->bitstream_buffer_size=0;
1800     s->pp_time=0;
1801 }
1802
1803 /**
1804  * set qscale and update qscale dependent variables.
1805  */
1806 void ff_set_qscale(MpegEncContext * s, int qscale)
1807 {
1808     if (qscale < 1)
1809         qscale = 1;
1810     else if (qscale > 31)
1811         qscale = 31;
1812
1813     s->qscale = qscale;
1814     s->chroma_qscale= s->chroma_qscale_table[qscale];
1815
1816     s->y_dc_scale= s->y_dc_scale_table[ qscale ];
1817     s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
1818 }
1819
1820 void ff_mpv_report_decode_progress(MpegEncContext *s)
1821 {
1822     if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
1823         ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
1824 }