]> git.sesse.net Git - ffmpeg/blob - libavcodec/mpegvideo.c
lavf/qsvvpp: bypass vpp if not needed.
[ffmpeg] / libavcodec / mpegvideo.c
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
35 #include "avcodec.h"
36 #include "blockdsp.h"
37 #include "idctdsp.h"
38 #include "internal.h"
39 #include "mathops.h"
40 #include "mpeg_er.h"
41 #include "mpegutils.h"
42 #include "mpegvideo.h"
43 #include "mpegvideodata.h"
44 #include "mjpegenc.h"
45 #include "msmpeg4.h"
46 #include "qpeldsp.h"
47 #include "thread.h"
48 #include "wmv2.h"
49 #include <limits.h>
50
51 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
52                                    int16_t *block, int n, int qscale)
53 {
54     int i, level, nCoeffs;
55     const uint16_t *quant_matrix;
56
57     nCoeffs= s->block_last_index[n];
58
59     if (n < 4)
60         block[0] = block[0] * s->y_dc_scale;
61     else
62         block[0] = block[0] * s->c_dc_scale;
63     /* XXX: only MPEG-1 */
64     quant_matrix = s->intra_matrix;
65     for(i=1;i<=nCoeffs;i++) {
66         int j= s->intra_scantable.permutated[i];
67         level = block[j];
68         if (level) {
69             if (level < 0) {
70                 level = -level;
71                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
72                 level = (level - 1) | 1;
73                 level = -level;
74             } else {
75                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
76                 level = (level - 1) | 1;
77             }
78             block[j] = level;
79         }
80     }
81 }
82
83 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
84                                    int16_t *block, int n, int qscale)
85 {
86     int i, level, nCoeffs;
87     const uint16_t *quant_matrix;
88
89     nCoeffs= s->block_last_index[n];
90
91     quant_matrix = s->inter_matrix;
92     for(i=0; i<=nCoeffs; i++) {
93         int j= s->intra_scantable.permutated[i];
94         level = block[j];
95         if (level) {
96             if (level < 0) {
97                 level = -level;
98                 level = (((level << 1) + 1) * qscale *
99                          ((int) (quant_matrix[j]))) >> 4;
100                 level = (level - 1) | 1;
101                 level = -level;
102             } else {
103                 level = (((level << 1) + 1) * qscale *
104                          ((int) (quant_matrix[j]))) >> 4;
105                 level = (level - 1) | 1;
106             }
107             block[j] = level;
108         }
109     }
110 }
111
112 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
113                                    int16_t *block, int n, int qscale)
114 {
115     int i, level, nCoeffs;
116     const uint16_t *quant_matrix;
117
118     if(s->alternate_scan) nCoeffs= 63;
119     else nCoeffs= s->block_last_index[n];
120
121     if (n < 4)
122         block[0] = block[0] * s->y_dc_scale;
123     else
124         block[0] = block[0] * s->c_dc_scale;
125     quant_matrix = s->intra_matrix;
126     for(i=1;i<=nCoeffs;i++) {
127         int j= s->intra_scantable.permutated[i];
128         level = block[j];
129         if (level) {
130             if (level < 0) {
131                 level = -level;
132                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
133                 level = -level;
134             } else {
135                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
136             }
137             block[j] = level;
138         }
139     }
140 }
141
142 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
143                                    int16_t *block, int n, int qscale)
144 {
145     int i, level, nCoeffs;
146     const uint16_t *quant_matrix;
147     int sum=-1;
148
149     if(s->alternate_scan) nCoeffs= 63;
150     else nCoeffs= s->block_last_index[n];
151
152     if (n < 4)
153         block[0] = block[0] * s->y_dc_scale;
154     else
155         block[0] = block[0] * s->c_dc_scale;
156     quant_matrix = s->intra_matrix;
157     for(i=1;i<=nCoeffs;i++) {
158         int j= s->intra_scantable.permutated[i];
159         level = block[j];
160         if (level) {
161             if (level < 0) {
162                 level = -level;
163                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
164                 level = -level;
165             } else {
166                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
167             }
168             block[j] = level;
169             sum+=level;
170         }
171     }
172     block[63]^=sum&1;
173 }
174
175 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
176                                    int16_t *block, int n, int qscale)
177 {
178     int i, level, nCoeffs;
179     const uint16_t *quant_matrix;
180     int sum=-1;
181
182     if(s->alternate_scan) nCoeffs= 63;
183     else nCoeffs= s->block_last_index[n];
184
185     quant_matrix = s->inter_matrix;
186     for(i=0; i<=nCoeffs; i++) {
187         int j= s->intra_scantable.permutated[i];
188         level = block[j];
189         if (level) {
190             if (level < 0) {
191                 level = -level;
192                 level = (((level << 1) + 1) * qscale *
193                          ((int) (quant_matrix[j]))) >> 4;
194                 level = -level;
195             } else {
196                 level = (((level << 1) + 1) * qscale *
197                          ((int) (quant_matrix[j]))) >> 4;
198             }
199             block[j] = level;
200             sum+=level;
201         }
202     }
203     block[63]^=sum&1;
204 }
205
206 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
207                                   int16_t *block, int n, int qscale)
208 {
209     int i, level, qmul, qadd;
210     int nCoeffs;
211
212     assert(s->block_last_index[n]>=0);
213
214     qmul = qscale << 1;
215
216     if (!s->h263_aic) {
217         if (n < 4)
218             block[0] = block[0] * s->y_dc_scale;
219         else
220             block[0] = block[0] * s->c_dc_scale;
221         qadd = (qscale - 1) | 1;
222     }else{
223         qadd = 0;
224     }
225     if(s->ac_pred)
226         nCoeffs=63;
227     else
228         nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
229
230     for(i=1; i<=nCoeffs; i++) {
231         level = block[i];
232         if (level) {
233             if (level < 0) {
234                 level = level * qmul - qadd;
235             } else {
236                 level = level * qmul + qadd;
237             }
238             block[i] = level;
239         }
240     }
241 }
242
243 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
244                                   int16_t *block, int n, int qscale)
245 {
246     int i, level, qmul, qadd;
247     int nCoeffs;
248
249     assert(s->block_last_index[n]>=0);
250
251     qadd = (qscale - 1) | 1;
252     qmul = qscale << 1;
253
254     nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
255
256     for(i=0; i<=nCoeffs; i++) {
257         level = block[i];
258         if (level) {
259             if (level < 0) {
260                 level = level * qmul - qadd;
261             } else {
262                 level = level * qmul + qadd;
263             }
264             block[i] = level;
265         }
266     }
267 }
268
269 /* init common dct for both encoder and decoder */
270 static av_cold int dct_init(MpegEncContext *s)
271 {
272     ff_blockdsp_init(&s->bdsp);
273     ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
274     ff_mpegvideodsp_init(&s->mdsp);
275     ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
276
277     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
278     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
279     s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
280     s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
281     s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
282     if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT)
283         s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
284     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
285
286     if (HAVE_INTRINSICS_NEON)
287         ff_mpv_common_init_neon(s);
288
289     if (ARCH_ARM)
290         ff_mpv_common_init_arm(s);
291     if (ARCH_PPC)
292         ff_mpv_common_init_ppc(s);
293     if (ARCH_X86)
294         ff_mpv_common_init_x86(s);
295
296     return 0;
297 }
298
299 av_cold void ff_mpv_idct_init(MpegEncContext *s)
300 {
301     ff_idctdsp_init(&s->idsp, s->avctx);
302
303     /* load & permutate scantables
304      * note: only wmv uses different ones
305      */
306     if (s->alternate_scan) {
307         ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
308         ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
309     } else {
310         ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
311         ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
312     }
313     ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
314     ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
315 }
316
317 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
318 {
319     return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 0,
320                             s->chroma_x_shift, s->chroma_y_shift, s->out_format,
321                             s->mb_stride, s->mb_height, s->b8_stride,
322                             &s->linesize, &s->uvlinesize);
323 }
324
325 static int init_duplicate_context(MpegEncContext *s)
326 {
327     int y_size = s->b8_stride * (2 * s->mb_height + 1);
328     int c_size = s->mb_stride * (s->mb_height + 1);
329     int yc_size = y_size + 2 * c_size;
330     int i;
331
332     s->sc.edge_emu_buffer =
333     s->me.scratchpad   =
334     s->me.temp         =
335     s->sc.rd_scratchpad   =
336     s->sc.b_scratchpad    =
337     s->sc.obmc_scratchpad = NULL;
338
339     if (s->encoding) {
340         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
341                           ME_MAP_SIZE * sizeof(uint32_t), fail)
342         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
343                           ME_MAP_SIZE * sizeof(uint32_t), fail)
344         if (s->noise_reduction) {
345             FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
346                               2 * 64 * sizeof(int), fail)
347         }
348     }
349     FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
350     s->block = s->blocks[0];
351
352     for (i = 0; i < 12; i++) {
353         s->pblocks[i] = &s->block[i];
354     }
355     if (s->avctx->codec_tag == AV_RL32("VCR2")) {
356         // exchange uv
357         int16_t (*tmp)[64];
358         tmp           = s->pblocks[4];
359         s->pblocks[4] = s->pblocks[5];
360         s->pblocks[5] = tmp;
361     }
362
363     if (s->out_format == FMT_H263) {
364         /* ac values */
365         FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
366                           yc_size * sizeof(int16_t) * 16, fail);
367         s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
368         s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
369         s->ac_val[2] = s->ac_val[1] + c_size;
370     }
371
372     return 0;
373 fail:
374     return -1; // free() through ff_mpv_common_end()
375 }
376
377 static void free_duplicate_context(MpegEncContext *s)
378 {
379     if (!s)
380         return;
381
382     av_freep(&s->sc.edge_emu_buffer);
383     av_freep(&s->me.scratchpad);
384     s->me.temp =
385     s->sc.rd_scratchpad =
386     s->sc.b_scratchpad =
387     s->sc.obmc_scratchpad = NULL;
388
389     av_freep(&s->dct_error_sum);
390     av_freep(&s->me.map);
391     av_freep(&s->me.score_map);
392     av_freep(&s->blocks);
393     av_freep(&s->ac_val_base);
394     s->block = NULL;
395 }
396
397 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
398 {
399 #define COPY(a) bak->a = src->a
400     COPY(sc.edge_emu_buffer);
401     COPY(me.scratchpad);
402     COPY(me.temp);
403     COPY(sc.rd_scratchpad);
404     COPY(sc.b_scratchpad);
405     COPY(sc.obmc_scratchpad);
406     COPY(me.map);
407     COPY(me.score_map);
408     COPY(blocks);
409     COPY(block);
410     COPY(start_mb_y);
411     COPY(end_mb_y);
412     COPY(me.map_generation);
413     COPY(pb);
414     COPY(dct_error_sum);
415     COPY(dct_count[0]);
416     COPY(dct_count[1]);
417     COPY(ac_val_base);
418     COPY(ac_val[0]);
419     COPY(ac_val[1]);
420     COPY(ac_val[2]);
421 #undef COPY
422 }
423
424 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
425 {
426     MpegEncContext bak;
427     int i, ret;
428     // FIXME copy only needed parts
429     // START_TIMER
430     backup_duplicate_context(&bak, dst);
431     memcpy(dst, src, sizeof(MpegEncContext));
432     backup_duplicate_context(dst, &bak);
433     for (i = 0; i < 12; i++) {
434         dst->pblocks[i] = &dst->block[i];
435     }
436     if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
437         // exchange uv
438         int16_t (*tmp)[64];
439         tmp             = dst->pblocks[4];
440         dst->pblocks[4] = dst->pblocks[5];
441         dst->pblocks[5] = tmp;
442     }
443     if (!dst->sc.edge_emu_buffer &&
444         (ret = ff_mpeg_framesize_alloc(dst->avctx, &dst->me,
445                                        &dst->sc, dst->linesize)) < 0) {
446         av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
447                "scratch buffers.\n");
448         return ret;
449     }
450     // STOP_TIMER("update_duplicate_context")
451     // about 10k cycles / 0.01 sec for  1000frames on 1ghz with 2 threads
452     return 0;
453 }
454
455 int ff_mpeg_update_thread_context(AVCodecContext *dst,
456                                   const AVCodecContext *src)
457 {
458     int i, ret;
459     MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
460
461     if (dst == src || !s1->context_initialized)
462         return 0;
463
464     // FIXME can parameters change on I-frames?
465     // in that case dst may need a reinit
466     if (!s->context_initialized) {
467         int err;
468         memcpy(s, s1, sizeof(MpegEncContext));
469
470         s->avctx                 = dst;
471         s->bitstream_buffer      = NULL;
472         s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
473
474         ff_mpv_idct_init(s);
475         if ((err = ff_mpv_common_init(s)) < 0)
476             return err;
477     }
478
479     if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
480         int err;
481         s->context_reinit = 0;
482         s->height = s1->height;
483         s->width  = s1->width;
484         if ((err = ff_mpv_common_frame_size_change(s)) < 0)
485             return err;
486     }
487
488     s->avctx->coded_height  = s1->avctx->coded_height;
489     s->avctx->coded_width   = s1->avctx->coded_width;
490     s->avctx->width         = s1->avctx->width;
491     s->avctx->height        = s1->avctx->height;
492
493     s->coded_picture_number = s1->coded_picture_number;
494     s->picture_number       = s1->picture_number;
495
496     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
497         ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
498         if (s1->picture[i].f->buf[0] &&
499             (ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
500             return ret;
501     }
502
503 #define UPDATE_PICTURE(pic)\
504 do {\
505     ff_mpeg_unref_picture(s->avctx, &s->pic);\
506     if (s1->pic.f->buf[0])\
507         ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
508     else\
509         ret = ff_update_picture_tables(&s->pic, &s1->pic);\
510     if (ret < 0)\
511         return ret;\
512 } while (0)
513
514     UPDATE_PICTURE(current_picture);
515     UPDATE_PICTURE(last_picture);
516     UPDATE_PICTURE(next_picture);
517
518 #define REBASE_PICTURE(pic, new_ctx, old_ctx)                                 \
519     ((pic && pic >= old_ctx->picture &&                                       \
520       pic < old_ctx->picture + MAX_PICTURE_COUNT) ?                           \
521         &new_ctx->picture[pic - old_ctx->picture] : NULL)
522
523     s->last_picture_ptr    = REBASE_PICTURE(s1->last_picture_ptr,    s, s1);
524     s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
525     s->next_picture_ptr    = REBASE_PICTURE(s1->next_picture_ptr,    s, s1);
526
527     // Error/bug resilience
528     s->next_p_frame_damaged = s1->next_p_frame_damaged;
529     s->workaround_bugs      = s1->workaround_bugs;
530
531     // MPEG-4 timing info
532     memcpy(&s->last_time_base, &s1->last_time_base,
533            (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
534            (char *) &s1->last_time_base);
535
536     // B-frame info
537     s->max_b_frames = s1->max_b_frames;
538     s->low_delay    = s1->low_delay;
539     s->droppable    = s1->droppable;
540
541     // DivX handling (doesn't work)
542     s->divx_packed  = s1->divx_packed;
543
544     if (s1->bitstream_buffer) {
545         if (s1->bitstream_buffer_size +
546             AV_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
547             av_fast_malloc(&s->bitstream_buffer,
548                            &s->allocated_bitstream_buffer_size,
549                            s1->allocated_bitstream_buffer_size);
550         s->bitstream_buffer_size = s1->bitstream_buffer_size;
551         memcpy(s->bitstream_buffer, s1->bitstream_buffer,
552                s1->bitstream_buffer_size);
553         memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
554                AV_INPUT_BUFFER_PADDING_SIZE);
555     }
556
557     // linesize-dependent scratch buffer allocation
558     if (!s->sc.edge_emu_buffer)
559         if (s1->linesize) {
560             if (ff_mpeg_framesize_alloc(s->avctx, &s->me,
561                                         &s->sc, s1->linesize) < 0) {
562                 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
563                        "scratch buffers.\n");
564                 return AVERROR(ENOMEM);
565             }
566         } else {
567             av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
568                    "be allocated due to unknown size.\n");
569             return AVERROR_BUG;
570         }
571
572     // MPEG-2/interlacing info
573     memcpy(&s->progressive_sequence, &s1->progressive_sequence,
574            (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
575
576     if (!s1->first_field) {
577         s->last_pict_type = s1->pict_type;
578         if (s1->current_picture_ptr)
579             s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
580     }
581
582     return 0;
583 }
584
585 /**
586  * Set the given MpegEncContext to common defaults
587  * (same for encoding and decoding).
588  * The changed fields will not depend upon the
589  * prior state of the MpegEncContext.
590  */
591 void ff_mpv_common_defaults(MpegEncContext *s)
592 {
593     s->y_dc_scale_table      =
594     s->c_dc_scale_table      = ff_mpeg1_dc_scale_table;
595     s->chroma_qscale_table   = ff_default_chroma_qscale_table;
596     s->progressive_frame     = 1;
597     s->progressive_sequence  = 1;
598     s->picture_structure     = PICT_FRAME;
599
600     s->coded_picture_number  = 0;
601     s->picture_number        = 0;
602
603     s->f_code                = 1;
604     s->b_code                = 1;
605
606     s->slice_context_count   = 1;
607 }
608
609 /**
610  * Set the given MpegEncContext to defaults for decoding.
611  * the changed fields will not depend upon
612  * the prior state of the MpegEncContext.
613  */
614 void ff_mpv_decode_defaults(MpegEncContext *s)
615 {
616     ff_mpv_common_defaults(s);
617 }
618
619 /**
620  * Initialize and allocates MpegEncContext fields dependent on the resolution.
621  */
622 static int init_context_frame(MpegEncContext *s)
623 {
624     int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
625
626     s->mb_width   = (s->width + 15) / 16;
627     s->mb_stride  = s->mb_width + 1;
628     s->b8_stride  = s->mb_width * 2 + 1;
629     mb_array_size = s->mb_height * s->mb_stride;
630     mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
631
632     /* set default edge pos, will be overridden
633      * in decode_header if needed */
634     s->h_edge_pos = s->mb_width * 16;
635     s->v_edge_pos = s->mb_height * 16;
636
637     s->mb_num     = s->mb_width * s->mb_height;
638
639     s->block_wrap[0] =
640     s->block_wrap[1] =
641     s->block_wrap[2] =
642     s->block_wrap[3] = s->b8_stride;
643     s->block_wrap[4] =
644     s->block_wrap[5] = s->mb_stride;
645
646     y_size  = s->b8_stride * (2 * s->mb_height + 1);
647     c_size  = s->mb_stride * (s->mb_height + 1);
648     yc_size = y_size + 2   * c_size;
649
650     FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
651                       fail); // error resilience code looks cleaner with this
652     for (y = 0; y < s->mb_height; y++)
653         for (x = 0; x < s->mb_width; x++)
654             s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
655
656     s->mb_index2xy[s->mb_height * s->mb_width] =
657         (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
658
659     if (s->encoding) {
660         /* Allocate MV tables */
661         FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
662                           mv_table_size * 2 * sizeof(int16_t), fail);
663         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
664                           mv_table_size * 2 * sizeof(int16_t), fail);
665         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
666                           mv_table_size * 2 * sizeof(int16_t), fail);
667         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
668                           mv_table_size * 2 * sizeof(int16_t), fail);
669         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
670                           mv_table_size * 2 * sizeof(int16_t), fail);
671         FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
672                           mv_table_size * 2 * sizeof(int16_t), fail);
673         s->p_mv_table            = s->p_mv_table_base + s->mb_stride + 1;
674         s->b_forw_mv_table       = s->b_forw_mv_table_base + s->mb_stride + 1;
675         s->b_back_mv_table       = s->b_back_mv_table_base + s->mb_stride + 1;
676         s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
677                                    s->mb_stride + 1;
678         s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
679                                    s->mb_stride + 1;
680         s->b_direct_mv_table     = s->b_direct_mv_table_base + s->mb_stride + 1;
681
682         /* Allocate MB type table */
683         FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
684                           sizeof(uint16_t), fail); // needed for encoding
685
686         FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
687                           sizeof(int), fail);
688
689         FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
690                          mb_array_size * sizeof(float), fail);
691         FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
692                          mb_array_size * sizeof(float), fail);
693
694     }
695
696     if (s->codec_id == AV_CODEC_ID_MPEG4 ||
697         (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
698         /* interlaced direct mode decoding tables */
699         for (i = 0; i < 2; i++) {
700             int j, k;
701             for (j = 0; j < 2; j++) {
702                 for (k = 0; k < 2; k++) {
703                     FF_ALLOCZ_OR_GOTO(s->avctx,
704                                       s->b_field_mv_table_base[i][j][k],
705                                       mv_table_size * 2 * sizeof(int16_t),
706                                       fail);
707                     s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
708                                                    s->mb_stride + 1;
709                 }
710                 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
711                                   mb_array_size * 2 * sizeof(uint8_t), fail);
712                 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
713                                   mv_table_size * 2 * sizeof(int16_t), fail);
714                 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
715                                             + s->mb_stride + 1;
716             }
717             FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
718                               mb_array_size * 2 * sizeof(uint8_t), fail);
719         }
720     }
721     if (s->out_format == FMT_H263) {
722         /* cbp values */
723         FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
724         s->coded_block = s->coded_block_base + s->b8_stride + 1;
725
726         /* cbp, ac_pred, pred_dir */
727         FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
728                           mb_array_size * sizeof(uint8_t), fail);
729         FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
730                           mb_array_size * sizeof(uint8_t), fail);
731     }
732
733     if (s->h263_pred || s->h263_plus || !s->encoding) {
734         /* dc values */
735         // MN: we need these for  error resilience of intra-frames
736         FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
737                           yc_size * sizeof(int16_t), fail);
738         s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
739         s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
740         s->dc_val[2] = s->dc_val[1] + c_size;
741         for (i = 0; i < yc_size; i++)
742             s->dc_val_base[i] = 1024;
743     }
744
745     /* which mb is a intra block */
746     FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
747     memset(s->mbintra_table, 1, mb_array_size);
748
749     /* init macroblock skip table */
750     FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
751     // Note the + 1 is for  a quicker MPEG-4 slice_end detection
752
753     return ff_mpeg_er_init(s);
754 fail:
755     return AVERROR(ENOMEM);
756 }
757
758 /**
759  * init common structure for both encoder and decoder.
760  * this assumes that some variables like width/height are already set
761  */
762 av_cold int ff_mpv_common_init(MpegEncContext *s)
763 {
764     int i;
765     int nb_slices = (HAVE_THREADS &&
766                      s->avctx->active_thread_type & FF_THREAD_SLICE) ?
767                     s->avctx->thread_count : 1;
768
769     if (s->encoding && s->avctx->slices)
770         nb_slices = s->avctx->slices;
771
772     if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
773         s->mb_height = (s->height + 31) / 32 * 2;
774     else
775         s->mb_height = (s->height + 15) / 16;
776
777     if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
778         av_log(s->avctx, AV_LOG_ERROR,
779                "decoding to AV_PIX_FMT_NONE is not supported.\n");
780         return -1;
781     }
782
783     if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
784         int max_slices;
785         if (s->mb_height)
786             max_slices = FFMIN(MAX_THREADS, s->mb_height);
787         else
788             max_slices = MAX_THREADS;
789         av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
790                " reducing to %d\n", nb_slices, max_slices);
791         nb_slices = max_slices;
792     }
793
794     if ((s->width || s->height) &&
795         av_image_check_size(s->width, s->height, 0, s->avctx))
796         return -1;
797
798     dct_init(s);
799
800     /* set chroma shifts */
801     av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
802                                      &s->chroma_x_shift,
803                                      &s->chroma_y_shift);
804
805     /* convert fourcc to upper case */
806     s->codec_tag          = avpriv_toupper4(s->avctx->codec_tag);
807
808     FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
809                       MAX_PICTURE_COUNT * sizeof(Picture), fail);
810     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
811         s->picture[i].f = av_frame_alloc();
812         if (!s->picture[i].f)
813             goto fail;
814     }
815     memset(&s->next_picture, 0, sizeof(s->next_picture));
816     memset(&s->last_picture, 0, sizeof(s->last_picture));
817     memset(&s->current_picture, 0, sizeof(s->current_picture));
818     memset(&s->new_picture, 0, sizeof(s->new_picture));
819     s->next_picture.f = av_frame_alloc();
820     if (!s->next_picture.f)
821         goto fail;
822     s->last_picture.f = av_frame_alloc();
823     if (!s->last_picture.f)
824         goto fail;
825     s->current_picture.f = av_frame_alloc();
826     if (!s->current_picture.f)
827         goto fail;
828     s->new_picture.f = av_frame_alloc();
829     if (!s->new_picture.f)
830         goto fail;
831
832     if (s->width && s->height) {
833         if (init_context_frame(s))
834             goto fail;
835
836         s->parse_context.state = -1;
837     }
838
839     s->context_initialized = 1;
840     s->thread_context[0]   = s;
841
842     if (s->width && s->height) {
843         if (nb_slices > 1) {
844             for (i = 1; i < nb_slices; i++) {
845                 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
846                 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
847             }
848
849             for (i = 0; i < nb_slices; i++) {
850                 if (init_duplicate_context(s->thread_context[i]) < 0)
851                     goto fail;
852                 s->thread_context[i]->start_mb_y =
853                     (s->mb_height * (i) + nb_slices / 2) / nb_slices;
854                 s->thread_context[i]->end_mb_y   =
855                     (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
856             }
857         } else {
858             if (init_duplicate_context(s) < 0)
859                 goto fail;
860             s->start_mb_y = 0;
861             s->end_mb_y   = s->mb_height;
862         }
863         s->slice_context_count = nb_slices;
864     }
865
866     return 0;
867  fail:
868     ff_mpv_common_end(s);
869     return -1;
870 }
871
872 /**
873  * Frees and resets MpegEncContext fields depending on the resolution.
874  * Is used during resolution changes to avoid a full reinitialization of the
875  * codec.
876  */
877 static void free_context_frame(MpegEncContext *s)
878 {
879     int i, j, k;
880
881     av_freep(&s->mb_type);
882     av_freep(&s->p_mv_table_base);
883     av_freep(&s->b_forw_mv_table_base);
884     av_freep(&s->b_back_mv_table_base);
885     av_freep(&s->b_bidir_forw_mv_table_base);
886     av_freep(&s->b_bidir_back_mv_table_base);
887     av_freep(&s->b_direct_mv_table_base);
888     s->p_mv_table            = NULL;
889     s->b_forw_mv_table       = NULL;
890     s->b_back_mv_table       = NULL;
891     s->b_bidir_forw_mv_table = NULL;
892     s->b_bidir_back_mv_table = NULL;
893     s->b_direct_mv_table     = NULL;
894     for (i = 0; i < 2; i++) {
895         for (j = 0; j < 2; j++) {
896             for (k = 0; k < 2; k++) {
897                 av_freep(&s->b_field_mv_table_base[i][j][k]);
898                 s->b_field_mv_table[i][j][k] = NULL;
899             }
900             av_freep(&s->b_field_select_table[i][j]);
901             av_freep(&s->p_field_mv_table_base[i][j]);
902             s->p_field_mv_table[i][j] = NULL;
903         }
904         av_freep(&s->p_field_select_table[i]);
905     }
906
907     av_freep(&s->dc_val_base);
908     av_freep(&s->coded_block_base);
909     av_freep(&s->mbintra_table);
910     av_freep(&s->cbp_table);
911     av_freep(&s->pred_dir_table);
912
913     av_freep(&s->mbskip_table);
914
915     av_freep(&s->er.error_status_table);
916     av_freep(&s->er.er_temp_buffer);
917     av_freep(&s->mb_index2xy);
918     av_freep(&s->lambda_table);
919     av_freep(&s->cplx_tab);
920     av_freep(&s->bits_tab);
921
922     s->linesize = s->uvlinesize = 0;
923 }
924
925 int ff_mpv_common_frame_size_change(MpegEncContext *s)
926 {
927     int i, err = 0;
928
929     if (s->slice_context_count > 1) {
930         for (i = 0; i < s->slice_context_count; i++) {
931             free_duplicate_context(s->thread_context[i]);
932         }
933         for (i = 1; i < s->slice_context_count; i++) {
934             av_freep(&s->thread_context[i]);
935         }
936     } else
937         free_duplicate_context(s);
938
939     free_context_frame(s);
940
941     if (s->picture)
942         for (i = 0; i < MAX_PICTURE_COUNT; i++) {
943                 s->picture[i].needs_realloc = 1;
944         }
945
946     s->last_picture_ptr         =
947     s->next_picture_ptr         =
948     s->current_picture_ptr      = NULL;
949
950     // init
951     if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
952         s->mb_height = (s->height + 31) / 32 * 2;
953     else
954         s->mb_height = (s->height + 15) / 16;
955
956     if ((s->width || s->height) &&
957         (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
958         goto fail;
959
960     if ((err = init_context_frame(s)))
961         goto fail;
962
963     s->thread_context[0]   = s;
964
965     if (s->width && s->height) {
966         int nb_slices = s->slice_context_count;
967         if (nb_slices > 1) {
968             for (i = 1; i < nb_slices; i++) {
969                 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
970                 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
971             }
972
973             for (i = 0; i < nb_slices; i++) {
974                 if ((err = init_duplicate_context(s->thread_context[i])) < 0)
975                     goto fail;
976                 s->thread_context[i]->start_mb_y =
977                     (s->mb_height * (i) + nb_slices / 2) / nb_slices;
978                 s->thread_context[i]->end_mb_y   =
979                     (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
980             }
981         } else {
982             if (init_duplicate_context(s) < 0)
983                 goto fail;
984             s->start_mb_y = 0;
985             s->end_mb_y   = s->mb_height;
986         }
987         s->slice_context_count = nb_slices;
988     }
989
990     return 0;
991  fail:
992     ff_mpv_common_end(s);
993     return err;
994 }
995
996 /* init common structure for both encoder and decoder */
997 void ff_mpv_common_end(MpegEncContext *s)
998 {
999     int i;
1000
1001     if (s->slice_context_count > 1) {
1002         for (i = 0; i < s->slice_context_count; i++) {
1003             free_duplicate_context(s->thread_context[i]);
1004         }
1005         for (i = 1; i < s->slice_context_count; i++) {
1006             av_freep(&s->thread_context[i]);
1007         }
1008         s->slice_context_count = 1;
1009     } else free_duplicate_context(s);
1010
1011     av_freep(&s->parse_context.buffer);
1012     s->parse_context.buffer_size = 0;
1013
1014     av_freep(&s->bitstream_buffer);
1015     s->allocated_bitstream_buffer_size = 0;
1016
1017     if (s->picture) {
1018         for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1019             ff_free_picture_tables(&s->picture[i]);
1020             ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1021             av_frame_free(&s->picture[i].f);
1022         }
1023     }
1024     av_freep(&s->picture);
1025     ff_free_picture_tables(&s->last_picture);
1026     ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1027     av_frame_free(&s->last_picture.f);
1028     ff_free_picture_tables(&s->current_picture);
1029     ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1030     av_frame_free(&s->current_picture.f);
1031     ff_free_picture_tables(&s->next_picture);
1032     ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1033     av_frame_free(&s->next_picture.f);
1034     ff_free_picture_tables(&s->new_picture);
1035     ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1036     av_frame_free(&s->new_picture.f);
1037
1038     free_context_frame(s);
1039
1040     s->context_initialized      = 0;
1041     s->last_picture_ptr         =
1042     s->next_picture_ptr         =
1043     s->current_picture_ptr      = NULL;
1044     s->linesize = s->uvlinesize = 0;
1045 }
1046
1047 /**
1048  * generic function called after decoding
1049  * the header and before a frame is decoded.
1050  */
1051 int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1052 {
1053     int i, ret;
1054     Picture *pic;
1055     s->mb_skipped = 0;
1056
1057     /* mark & release old frames */
1058     if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1059         s->last_picture_ptr != s->next_picture_ptr &&
1060         s->last_picture_ptr->f->buf[0]) {
1061         ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1062     }
1063
1064     /* release forgotten pictures */
1065     /* if (MPEG-124 / H.263) */
1066     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1067         if (&s->picture[i] != s->last_picture_ptr &&
1068             &s->picture[i] != s->next_picture_ptr &&
1069             s->picture[i].reference && !s->picture[i].needs_realloc) {
1070             ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1071         }
1072     }
1073
1074     ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1075
1076     /* release non reference frames */
1077     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1078         if (!s->picture[i].reference)
1079             ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1080     }
1081
1082     if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1083         // we already have a unused image
1084         // (maybe it was set before reading the header)
1085         pic = s->current_picture_ptr;
1086     } else {
1087         i   = ff_find_unused_picture(s->avctx, s->picture, 0);
1088         if (i < 0) {
1089             av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1090             return i;
1091         }
1092         pic = &s->picture[i];
1093     }
1094
1095     pic->reference = 0;
1096     if (!s->droppable) {
1097         if (s->pict_type != AV_PICTURE_TYPE_B)
1098             pic->reference = 3;
1099     }
1100
1101     pic->f->coded_picture_number = s->coded_picture_number++;
1102
1103     if (alloc_picture(s, pic, 0) < 0)
1104         return -1;
1105
1106     s->current_picture_ptr = pic;
1107     // FIXME use only the vars from current_pic
1108     s->current_picture_ptr->f->top_field_first = s->top_field_first;
1109     if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1110         s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1111         if (s->picture_structure != PICT_FRAME)
1112             s->current_picture_ptr->f->top_field_first =
1113                 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1114     }
1115     s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1116                                                  !s->progressive_sequence;
1117     s->current_picture_ptr->field_picture      =  s->picture_structure != PICT_FRAME;
1118
1119     s->current_picture_ptr->f->pict_type = s->pict_type;
1120     // if (s->avctx->flags && AV_CODEC_FLAG_QSCALE)
1121     //     s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1122     s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1123
1124     if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1125                                    s->current_picture_ptr)) < 0)
1126         return ret;
1127
1128     if (s->pict_type != AV_PICTURE_TYPE_B) {
1129         s->last_picture_ptr = s->next_picture_ptr;
1130         if (!s->droppable)
1131             s->next_picture_ptr = s->current_picture_ptr;
1132     }
1133     ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1134             s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1135             s->last_picture_ptr    ? s->last_picture_ptr->f->data[0]    : NULL,
1136             s->next_picture_ptr    ? s->next_picture_ptr->f->data[0]    : NULL,
1137             s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1138             s->pict_type, s->droppable);
1139
1140     if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1141         (s->pict_type != AV_PICTURE_TYPE_I ||
1142          s->picture_structure != PICT_FRAME)) {
1143         int h_chroma_shift, v_chroma_shift;
1144         av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1145                                          &h_chroma_shift, &v_chroma_shift);
1146         if (s->pict_type != AV_PICTURE_TYPE_I)
1147             av_log(avctx, AV_LOG_ERROR,
1148                    "warning: first frame is no keyframe\n");
1149         else if (s->picture_structure != PICT_FRAME)
1150             av_log(avctx, AV_LOG_INFO,
1151                    "allocate dummy last picture for field based first keyframe\n");
1152
1153         /* Allocate a dummy frame */
1154         i = ff_find_unused_picture(s->avctx, s->picture, 0);
1155         if (i < 0) {
1156             av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1157             return i;
1158         }
1159         s->last_picture_ptr = &s->picture[i];
1160
1161         s->last_picture_ptr->reference   = 3;
1162         s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1163
1164         if (alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1165             s->last_picture_ptr = NULL;
1166             return -1;
1167         }
1168
1169         memset(s->last_picture_ptr->f->data[0], 0,
1170                avctx->height * s->last_picture_ptr->f->linesize[0]);
1171         memset(s->last_picture_ptr->f->data[1], 0x80,
1172                (avctx->height >> v_chroma_shift) *
1173                s->last_picture_ptr->f->linesize[1]);
1174         memset(s->last_picture_ptr->f->data[2], 0x80,
1175                (avctx->height >> v_chroma_shift) *
1176                s->last_picture_ptr->f->linesize[2]);
1177
1178         ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1179         ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1180     }
1181     if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1182         s->pict_type == AV_PICTURE_TYPE_B) {
1183         /* Allocate a dummy frame */
1184         i = ff_find_unused_picture(s->avctx, s->picture, 0);
1185         if (i < 0) {
1186             av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1187             return i;
1188         }
1189         s->next_picture_ptr = &s->picture[i];
1190
1191         s->next_picture_ptr->reference   = 3;
1192         s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1193
1194         if (alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1195             s->next_picture_ptr = NULL;
1196             return -1;
1197         }
1198         ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1199         ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1200     }
1201
1202     if (s->last_picture_ptr) {
1203         ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1204         if (s->last_picture_ptr->f->buf[0] &&
1205             (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1206                                        s->last_picture_ptr)) < 0)
1207             return ret;
1208     }
1209     if (s->next_picture_ptr) {
1210         ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1211         if (s->next_picture_ptr->f->buf[0] &&
1212             (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1213                                        s->next_picture_ptr)) < 0)
1214             return ret;
1215     }
1216
1217     if (s->pict_type != AV_PICTURE_TYPE_I &&
1218         !(s->last_picture_ptr && s->last_picture_ptr->f->buf[0])) {
1219         av_log(s, AV_LOG_ERROR,
1220                "Non-reference picture received and no reference available\n");
1221         return AVERROR_INVALIDDATA;
1222     }
1223
1224     if (s->picture_structure!= PICT_FRAME) {
1225         int i;
1226         for (i = 0; i < 4; i++) {
1227             if (s->picture_structure == PICT_BOTTOM_FIELD) {
1228                 s->current_picture.f->data[i] +=
1229                     s->current_picture.f->linesize[i];
1230             }
1231             s->current_picture.f->linesize[i] *= 2;
1232             s->last_picture.f->linesize[i]    *= 2;
1233             s->next_picture.f->linesize[i]    *= 2;
1234         }
1235     }
1236
1237     /* set dequantizer, we can't do it during init as
1238      * it might change for MPEG-4 and we can't do it in the header
1239      * decode as init is not called for MPEG-4 there yet */
1240     if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1241         s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1242         s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1243     } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1244         s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1245         s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1246     } else {
1247         s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1248         s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1249     }
1250
1251     return 0;
1252 }
1253
1254 /* called after a frame has been decoded. */
1255 void ff_mpv_frame_end(MpegEncContext *s)
1256 {
1257     emms_c();
1258
1259     if (s->current_picture.reference)
1260         ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1261 }
1262
1263 /**
1264  * Print debugging info for the given picture.
1265  */
1266 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1267 {
1268     AVFrame *pict;
1269     if (s->avctx->hwaccel || !p || !p->mb_type)
1270         return;
1271     pict = p->f;
1272
1273     if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1274         int x,y;
1275
1276         av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1277         switch (pict->pict_type) {
1278         case AV_PICTURE_TYPE_I:
1279             av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1280             break;
1281         case AV_PICTURE_TYPE_P:
1282             av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1283             break;
1284         case AV_PICTURE_TYPE_B:
1285             av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1286             break;
1287         case AV_PICTURE_TYPE_S:
1288             av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1289             break;
1290         case AV_PICTURE_TYPE_SI:
1291             av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1292             break;
1293         case AV_PICTURE_TYPE_SP:
1294             av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1295             break;
1296         }
1297         for (y = 0; y < s->mb_height; y++) {
1298             for (x = 0; x < s->mb_width; x++) {
1299                 if (s->avctx->debug & FF_DEBUG_SKIP) {
1300                     int count = s->mbskip_table[x + y * s->mb_stride];
1301                     if (count > 9)
1302                         count = 9;
1303                     av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1304                 }
1305                 if (s->avctx->debug & FF_DEBUG_QP) {
1306                     av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1307                            p->qscale_table[x + y * s->mb_stride]);
1308                 }
1309                 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1310                     int mb_type = p->mb_type[x + y * s->mb_stride];
1311                     // Type & MV direction
1312                     if (IS_PCM(mb_type))
1313                         av_log(s->avctx, AV_LOG_DEBUG, "P");
1314                     else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1315                         av_log(s->avctx, AV_LOG_DEBUG, "A");
1316                     else if (IS_INTRA4x4(mb_type))
1317                         av_log(s->avctx, AV_LOG_DEBUG, "i");
1318                     else if (IS_INTRA16x16(mb_type))
1319                         av_log(s->avctx, AV_LOG_DEBUG, "I");
1320                     else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1321                         av_log(s->avctx, AV_LOG_DEBUG, "d");
1322                     else if (IS_DIRECT(mb_type))
1323                         av_log(s->avctx, AV_LOG_DEBUG, "D");
1324                     else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1325                         av_log(s->avctx, AV_LOG_DEBUG, "g");
1326                     else if (IS_GMC(mb_type))
1327                         av_log(s->avctx, AV_LOG_DEBUG, "G");
1328                     else if (IS_SKIP(mb_type))
1329                         av_log(s->avctx, AV_LOG_DEBUG, "S");
1330                     else if (!USES_LIST(mb_type, 1))
1331                         av_log(s->avctx, AV_LOG_DEBUG, ">");
1332                     else if (!USES_LIST(mb_type, 0))
1333                         av_log(s->avctx, AV_LOG_DEBUG, "<");
1334                     else {
1335                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1336                         av_log(s->avctx, AV_LOG_DEBUG, "X");
1337                     }
1338
1339                     // segmentation
1340                     if (IS_8X8(mb_type))
1341                         av_log(s->avctx, AV_LOG_DEBUG, "+");
1342                     else if (IS_16X8(mb_type))
1343                         av_log(s->avctx, AV_LOG_DEBUG, "-");
1344                     else if (IS_8X16(mb_type))
1345                         av_log(s->avctx, AV_LOG_DEBUG, "|");
1346                     else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1347                         av_log(s->avctx, AV_LOG_DEBUG, " ");
1348                     else
1349                         av_log(s->avctx, AV_LOG_DEBUG, "?");
1350
1351
1352                     if (IS_INTERLACED(mb_type))
1353                         av_log(s->avctx, AV_LOG_DEBUG, "=");
1354                     else
1355                         av_log(s->avctx, AV_LOG_DEBUG, " ");
1356                 }
1357             }
1358             av_log(s->avctx, AV_LOG_DEBUG, "\n");
1359         }
1360     }
1361 }
1362
1363 /**
1364  * find the lowest MB row referenced in the MVs
1365  */
1366 static int lowest_referenced_row(MpegEncContext *s, int dir)
1367 {
1368     int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1369     int my, off, i, mvs;
1370
1371     if (s->picture_structure != PICT_FRAME || s->mcsel)
1372         goto unhandled;
1373
1374     switch (s->mv_type) {
1375         case MV_TYPE_16X16:
1376             mvs = 1;
1377             break;
1378         case MV_TYPE_16X8:
1379             mvs = 2;
1380             break;
1381         case MV_TYPE_8X8:
1382             mvs = 4;
1383             break;
1384         default:
1385             goto unhandled;
1386     }
1387
1388     for (i = 0; i < mvs; i++) {
1389         my = s->mv[dir][i][1]<<qpel_shift;
1390         my_max = FFMAX(my_max, my);
1391         my_min = FFMIN(my_min, my);
1392     }
1393
1394     off = (FFMAX(-my_min, my_max) + 63) >> 6;
1395
1396     return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1397 unhandled:
1398     return s->mb_height-1;
1399 }
1400
1401 /* put block[] to dest[] */
1402 static inline void put_dct(MpegEncContext *s,
1403                            int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1404 {
1405     s->dct_unquantize_intra(s, block, i, qscale);
1406     s->idsp.idct_put(dest, line_size, block);
1407 }
1408
1409 /* add block[] to dest[] */
1410 static inline void add_dct(MpegEncContext *s,
1411                            int16_t *block, int i, uint8_t *dest, int line_size)
1412 {
1413     if (s->block_last_index[i] >= 0) {
1414         s->idsp.idct_add(dest, line_size, block);
1415     }
1416 }
1417
1418 static inline void add_dequant_dct(MpegEncContext *s,
1419                            int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1420 {
1421     if (s->block_last_index[i] >= 0) {
1422         s->dct_unquantize_inter(s, block, i, qscale);
1423
1424         s->idsp.idct_add(dest, line_size, block);
1425     }
1426 }
1427
1428 /**
1429  * Clean dc, ac, coded_block for the current non-intra MB.
1430  */
1431 void ff_clean_intra_table_entries(MpegEncContext *s)
1432 {
1433     int wrap = s->b8_stride;
1434     int xy = s->block_index[0];
1435
1436     s->dc_val[0][xy           ] =
1437     s->dc_val[0][xy + 1       ] =
1438     s->dc_val[0][xy     + wrap] =
1439     s->dc_val[0][xy + 1 + wrap] = 1024;
1440     /* ac pred */
1441     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
1442     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1443     if (s->msmpeg4_version>=3) {
1444         s->coded_block[xy           ] =
1445         s->coded_block[xy + 1       ] =
1446         s->coded_block[xy     + wrap] =
1447         s->coded_block[xy + 1 + wrap] = 0;
1448     }
1449     /* chroma */
1450     wrap = s->mb_stride;
1451     xy = s->mb_x + s->mb_y * wrap;
1452     s->dc_val[1][xy] =
1453     s->dc_val[2][xy] = 1024;
1454     /* ac pred */
1455     memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1456     memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1457
1458     s->mbintra_table[xy]= 0;
1459 }
1460
1461 /* generic function called after a macroblock has been parsed by the
1462    decoder or after it has been encoded by the encoder.
1463
1464    Important variables used:
1465    s->mb_intra : true if intra macroblock
1466    s->mv_dir   : motion vector direction
1467    s->mv_type  : motion vector type
1468    s->mv       : motion vector
1469    s->interlaced_dct : true if interlaced dct used (mpeg2)
1470  */
1471 static av_always_inline
1472 void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
1473                             int is_mpeg12)
1474 {
1475     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1476
1477     if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1478        /* print DCT coefficients */
1479        int i,j;
1480        av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1481        for(i=0; i<6; i++){
1482            for(j=0; j<64; j++){
1483                av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1484                       block[i][s->idsp.idct_permutation[j]]);
1485            }
1486            av_log(s->avctx, AV_LOG_DEBUG, "\n");
1487        }
1488     }
1489
1490     s->current_picture.qscale_table[mb_xy] = s->qscale;
1491
1492     /* update DC predictors for P macroblocks */
1493     if (!s->mb_intra) {
1494         if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1495             if(s->mbintra_table[mb_xy])
1496                 ff_clean_intra_table_entries(s);
1497         } else {
1498             s->last_dc[0] =
1499             s->last_dc[1] =
1500             s->last_dc[2] = 128 << s->intra_dc_precision;
1501         }
1502     }
1503     else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1504         s->mbintra_table[mb_xy]=1;
1505
1506     if ((s->avctx->flags & AV_CODEC_FLAG_PSNR) ||
1507         !(s->encoding && (s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
1508           s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
1509         uint8_t *dest_y, *dest_cb, *dest_cr;
1510         int dct_linesize, dct_offset;
1511         op_pixels_func (*op_pix)[4];
1512         qpel_mc_func (*op_qpix)[16];
1513         const int linesize   = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
1514         const int uvlinesize = s->current_picture.f->linesize[1];
1515         const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
1516         const int block_size = 8;
1517
1518         /* avoid copy if macroblock skipped in last frame too */
1519         /* skip only during decoding as we might trash the buffers during encoding a bit */
1520         if(!s->encoding){
1521             uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1522
1523             if (s->mb_skipped) {
1524                 s->mb_skipped= 0;
1525                 assert(s->pict_type!=AV_PICTURE_TYPE_I);
1526                 *mbskip_ptr = 1;
1527             } else if(!s->current_picture.reference) {
1528                 *mbskip_ptr = 1;
1529             } else{
1530                 *mbskip_ptr = 0; /* not skipped */
1531             }
1532         }
1533
1534         dct_linesize = linesize << s->interlaced_dct;
1535         dct_offset   = s->interlaced_dct ? linesize : linesize * block_size;
1536
1537         if(readable){
1538             dest_y=  s->dest[0];
1539             dest_cb= s->dest[1];
1540             dest_cr= s->dest[2];
1541         }else{
1542             dest_y = s->sc.b_scratchpad;
1543             dest_cb= s->sc.b_scratchpad+16*linesize;
1544             dest_cr= s->sc.b_scratchpad+32*linesize;
1545         }
1546
1547         if (!s->mb_intra) {
1548             /* motion handling */
1549             /* decoding or more than one mb_type (MC was already done otherwise) */
1550             if(!s->encoding){
1551
1552                 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
1553                     if (s->mv_dir & MV_DIR_FORWARD) {
1554                         ff_thread_await_progress(&s->last_picture_ptr->tf,
1555                                                  lowest_referenced_row(s, 0),
1556                                                  0);
1557                     }
1558                     if (s->mv_dir & MV_DIR_BACKWARD) {
1559                         ff_thread_await_progress(&s->next_picture_ptr->tf,
1560                                                  lowest_referenced_row(s, 1),
1561                                                  0);
1562                     }
1563                 }
1564
1565                 op_qpix= s->me.qpel_put;
1566                 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
1567                     op_pix = s->hdsp.put_pixels_tab;
1568                 }else{
1569                     op_pix = s->hdsp.put_no_rnd_pixels_tab;
1570                 }
1571                 if (s->mv_dir & MV_DIR_FORWARD) {
1572                     ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
1573                     op_pix = s->hdsp.avg_pixels_tab;
1574                     op_qpix= s->me.qpel_avg;
1575                 }
1576                 if (s->mv_dir & MV_DIR_BACKWARD) {
1577                     ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
1578                 }
1579             }
1580
1581             /* skip dequant / idct if we are really late ;) */
1582             if(s->avctx->skip_idct){
1583                 if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
1584                    ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
1585                    || s->avctx->skip_idct >= AVDISCARD_ALL)
1586                     goto skip_idct;
1587             }
1588
1589             /* add dct residue */
1590             if(s->encoding || !(   s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
1591                                 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
1592                 add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
1593                 add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
1594                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
1595                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1596
1597                 if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1598                     if (s->chroma_y_shift){
1599                         add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1600                         add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1601                     }else{
1602                         dct_linesize >>= 1;
1603                         dct_offset >>=1;
1604                         add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
1605                         add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
1606                         add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1607                         add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1608                     }
1609                 }
1610             } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
1611                 add_dct(s, block[0], 0, dest_y                          , dct_linesize);
1612                 add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
1613                 add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
1614                 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
1615
1616                 if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1617                     if(s->chroma_y_shift){//Chroma420
1618                         add_dct(s, block[4], 4, dest_cb, uvlinesize);
1619                         add_dct(s, block[5], 5, dest_cr, uvlinesize);
1620                     }else{
1621                         //chroma422
1622                         dct_linesize = uvlinesize << s->interlaced_dct;
1623                         dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
1624
1625                         add_dct(s, block[4], 4, dest_cb, dct_linesize);
1626                         add_dct(s, block[5], 5, dest_cr, dct_linesize);
1627                         add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
1628                         add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
1629                         if(!s->chroma_x_shift){//Chroma444
1630                             add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
1631                             add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
1632                             add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
1633                             add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
1634                         }
1635                     }
1636                 }//fi gray
1637             }
1638             else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
1639                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
1640             }
1641         } else {
1642             /* dct only in intra block */
1643             if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
1644                 put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
1645                 put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
1646                 put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
1647                 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1648
1649                 if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1650                     if(s->chroma_y_shift){
1651                         put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1652                         put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1653                     }else{
1654                         dct_offset >>=1;
1655                         dct_linesize >>=1;
1656                         put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
1657                         put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
1658                         put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1659                         put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1660                     }
1661                 }
1662             }else{
1663                 s->idsp.idct_put(dest_y,                           dct_linesize, block[0]);
1664                 s->idsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
1665                 s->idsp.idct_put(dest_y + dct_offset,              dct_linesize, block[2]);
1666                 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
1667
1668                 if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1669                     if(s->chroma_y_shift){
1670                         s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
1671                         s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
1672                     }else{
1673
1674                         dct_linesize = uvlinesize << s->interlaced_dct;
1675                         dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
1676
1677                         s->idsp.idct_put(dest_cb,              dct_linesize, block[4]);
1678                         s->idsp.idct_put(dest_cr,              dct_linesize, block[5]);
1679                         s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
1680                         s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
1681                         if(!s->chroma_x_shift){//Chroma444
1682                             s->idsp.idct_put(dest_cb + 8,              dct_linesize, block[8]);
1683                             s->idsp.idct_put(dest_cr + 8,              dct_linesize, block[9]);
1684                             s->idsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
1685                             s->idsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
1686                         }
1687                     }
1688                 }//gray
1689             }
1690         }
1691 skip_idct:
1692         if(!readable){
1693             s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y ,   linesize,16);
1694             s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
1695             s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
1696         }
1697     }
1698 }
1699
1700 void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
1701 {
1702 #if !CONFIG_SMALL
1703     if(s->out_format == FMT_MPEG1) {
1704         mpv_decode_mb_internal(s, block, 1);
1705     } else
1706 #endif
1707         mpv_decode_mb_internal(s, block, 0);
1708 }
1709
1710 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
1711 {
1712     ff_draw_horiz_band(s->avctx, s->current_picture.f,
1713                        s->last_picture.f, y, h, s->picture_structure,
1714                        s->first_field, s->low_delay);
1715 }
1716
1717 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
1718     const int linesize   = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
1719     const int uvlinesize = s->current_picture.f->linesize[1];
1720     const int mb_size= 4;
1721
1722     s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
1723     s->block_index[1]= s->b8_stride*(s->mb_y*2    ) - 1 + s->mb_x*2;
1724     s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
1725     s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
1726     s->block_index[4]= s->mb_stride*(s->mb_y + 1)                + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
1727     s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
1728     //block_index is not used by mpeg2, so it is not affected by chroma_format
1729
1730     s->dest[0] = s->current_picture.f->data[0] + (s->mb_x - 1) * (1 << mb_size);
1731     s->dest[1] = s->current_picture.f->data[1] + (s->mb_x - 1) * (1 << (mb_size - s->chroma_x_shift));
1732     s->dest[2] = s->current_picture.f->data[2] + (s->mb_x - 1) * (1 << (mb_size - s->chroma_x_shift));
1733
1734     if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
1735     {
1736         if(s->picture_structure==PICT_FRAME){
1737         s->dest[0] += s->mb_y *   linesize << mb_size;
1738         s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
1739         s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
1740         }else{
1741             s->dest[0] += (s->mb_y>>1) *   linesize << mb_size;
1742             s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
1743             s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
1744             assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
1745         }
1746     }
1747 }
1748
1749 void ff_mpeg_flush(AVCodecContext *avctx){
1750     int i;
1751     MpegEncContext *s = avctx->priv_data;
1752
1753     if (!s || !s->picture)
1754         return;
1755
1756     for (i = 0; i < MAX_PICTURE_COUNT; i++)
1757         ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1758     s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
1759
1760     ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1761     ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1762     ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1763
1764     s->mb_x= s->mb_y= 0;
1765
1766     s->parse_context.state= -1;
1767     s->parse_context.frame_start_found= 0;
1768     s->parse_context.overread= 0;
1769     s->parse_context.overread_index= 0;
1770     s->parse_context.index= 0;
1771     s->parse_context.last_index= 0;
1772     s->bitstream_buffer_size=0;
1773     s->pp_time=0;
1774 }
1775
1776 /**
1777  * set qscale and update qscale dependent variables.
1778  */
1779 void ff_set_qscale(MpegEncContext * s, int qscale)
1780 {
1781     if (qscale < 1)
1782         qscale = 1;
1783     else if (qscale > 31)
1784         qscale = 31;
1785
1786     s->qscale = qscale;
1787     s->chroma_qscale= s->chroma_qscale_table[qscale];
1788
1789     s->y_dc_scale= s->y_dc_scale_table[ qscale ];
1790     s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
1791 }
1792
1793 void ff_mpv_report_decode_progress(MpegEncContext *s)
1794 {
1795     if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
1796         ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
1797 }