2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32]={
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
74 const uint8_t ff_mpeg1_dc_scale_table[128]={
75 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 static const uint8_t mpeg2_dc_scale_table1[128]={
83 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128]={
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 static const uint8_t mpeg2_dc_scale_table3[128]={
99 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
107 ff_mpeg1_dc_scale_table,
108 mpeg2_dc_scale_table1,
109 mpeg2_dc_scale_table2,
110 mpeg2_dc_scale_table3,
113 const enum PixelFormat ff_pixfmt_list_420[] = {
118 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
125 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
133 uint32_t tmp= *state << 8;
134 *state= tmp + *(p++);
135 if(tmp == 0x100 || p==end)
140 if (p[-1] > 1 ) p+= 3;
141 else if(p[-2] ) p+= 2;
142 else if(p[-3]|(p[-1]-1)) p++;
155 /* init common dct for both encoder and decoder */
156 av_cold int ff_dct_common_init(MpegEncContext *s)
158 dsputil_init(&s->dsp, s->avctx);
160 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
161 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
162 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
163 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
164 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
165 if(s->flags & CODEC_FLAG_BITEXACT)
166 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
167 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
170 MPV_common_init_mmx(s);
172 MPV_common_init_axp(s);
174 MPV_common_init_mlib(s);
176 MPV_common_init_mmi(s);
178 MPV_common_init_arm(s);
180 MPV_common_init_altivec(s);
182 MPV_common_init_bfin(s);
185 /* load & permutate scantables
186 note: only wmv uses different ones
188 if(s->alternate_scan){
189 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
190 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
192 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
195 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
196 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
201 void ff_copy_picture(Picture *dst, Picture *src){
203 dst->f.type= FF_BUFFER_TYPE_COPY;
207 * Release a frame buffer
209 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
211 ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
212 av_freep(&pic->f.hwaccel_picture_private);
216 * Allocate a frame buffer
218 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
222 if (s->avctx->hwaccel) {
223 assert(!pic->hwaccel_picture_private);
224 if (s->avctx->hwaccel->priv_data_size) {
225 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
226 if (!pic->f.hwaccel_picture_private) {
227 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
233 r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
235 if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
236 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
237 r, pic->f.age, pic->f.type, pic->f.data[0]);
238 av_freep(&pic->f.hwaccel_picture_private);
242 if (s->linesize && (s->linesize != pic->f.linesize[0] || s->uvlinesize != pic->f.linesize[1])) {
243 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
244 free_frame_buffer(s, pic);
248 if (pic->f.linesize[1] != pic->f.linesize[2]) {
249 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
250 free_frame_buffer(s, pic);
258 * allocates a Picture
259 * The pixels are allocated/set by calling get_buffer() if shared=0
261 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
262 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
263 const int mb_array_size= s->mb_stride*s->mb_height;
264 const int b8_array_size= s->b8_stride*s->mb_height*2;
265 const int b4_array_size= s->b4_stride*s->mb_height*4;
270 assert(pic->f.data[0]);
271 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
272 pic->f.type = FF_BUFFER_TYPE_SHARED;
274 assert(!pic->f.data[0]);
276 if (alloc_frame_buffer(s, pic) < 0)
279 s->linesize = pic->f.linesize[0];
280 s->uvlinesize = pic->f.linesize[1];
283 if (pic->f.qscale_table == NULL) {
285 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
286 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
287 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
290 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table, mb_array_size * sizeof(uint8_t) + 2, fail) //the +2 is for the slice end check
291 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t) , fail)
292 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
293 pic->f.mb_type = pic->mb_type_base + 2*s->mb_stride + 1;
294 pic->f.qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1;
295 if(s->out_format == FMT_H264){
297 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
298 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
299 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
301 pic->f.motion_subsample_log2 = 2;
302 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
304 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
305 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
306 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
308 pic->f.motion_subsample_log2 = 3;
310 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
311 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff, 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
313 pic->f.qstride = s->mb_stride;
314 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan , 1 * sizeof(AVPanScan), fail)
317 /* It might be nicer if the application would keep track of these
318 * but it would require an API change. */
319 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
320 s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
321 if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B)
322 pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
326 fail: //for the FF_ALLOCZ_OR_GOTO macro
328 free_frame_buffer(s, pic);
333 * deallocates a picture
335 static void free_picture(MpegEncContext *s, Picture *pic){
338 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
339 free_frame_buffer(s, pic);
342 av_freep(&pic->mb_var);
343 av_freep(&pic->mc_mb_var);
344 av_freep(&pic->mb_mean);
345 av_freep(&pic->f.mbskip_table);
346 av_freep(&pic->qscale_table_base);
347 av_freep(&pic->mb_type_base);
348 av_freep(&pic->f.dct_coeff);
349 av_freep(&pic->f.pan_scan);
350 pic->f.mb_type = NULL;
352 av_freep(&pic->motion_val_base[i]);
353 av_freep(&pic->f.ref_index[i]);
356 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
359 pic->f.data[i] = NULL;
365 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
366 int y_size = s->b8_stride * (2 * s->mb_height + 1);
367 int c_size = s->mb_stride * (s->mb_height + 1);
368 int yc_size = y_size + 2 * c_size;
371 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
372 FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
373 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
375 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
376 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
377 s->me.temp= s->me.scratchpad;
378 s->rd_scratchpad= s->me.scratchpad;
379 s->b_scratchpad= s->me.scratchpad;
380 s->obmc_scratchpad= s->me.scratchpad + 16;
382 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
383 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
384 if(s->avctx->noise_reduction){
385 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
388 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
389 s->block= s->blocks[0];
392 s->pblocks[i] = &s->block[i];
395 if (s->out_format == FMT_H263) {
397 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
398 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
399 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
400 s->ac_val[2] = s->ac_val[1] + c_size;
405 return -1; //free() through MPV_common_end()
408 static void free_duplicate_context(MpegEncContext *s){
411 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
412 av_freep(&s->me.scratchpad);
416 s->obmc_scratchpad= NULL;
418 av_freep(&s->dct_error_sum);
419 av_freep(&s->me.map);
420 av_freep(&s->me.score_map);
421 av_freep(&s->blocks);
422 av_freep(&s->ac_val_base);
426 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
427 #define COPY(a) bak->a= src->a
428 COPY(allocated_edge_emu_buffer);
429 COPY(edge_emu_buffer);
434 COPY(obmc_scratchpad);
441 COPY(me.map_generation);
453 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
456 //FIXME copy only needed parts
458 backup_duplicate_context(&bak, dst);
459 memcpy(dst, src, sizeof(MpegEncContext));
460 backup_duplicate_context(dst, &bak);
462 dst->pblocks[i] = &dst->block[i];
464 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
467 int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
469 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
471 if(dst == src || !s1->context_initialized) return 0;
473 //FIXME can parameters change on I-frames? in that case dst may need a reinit
474 if(!s->context_initialized){
475 memcpy(s, s1, sizeof(MpegEncContext));
478 s->picture_range_start += MAX_PICTURE_COUNT;
479 s->picture_range_end += MAX_PICTURE_COUNT;
480 s->bitstream_buffer = NULL;
481 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
486 s->avctx->coded_height = s1->avctx->coded_height;
487 s->avctx->coded_width = s1->avctx->coded_width;
488 s->avctx->width = s1->avctx->width;
489 s->avctx->height = s1->avctx->height;
491 s->coded_picture_number = s1->coded_picture_number;
492 s->picture_number = s1->picture_number;
493 s->input_picture_number = s1->input_picture_number;
495 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
496 memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture);
498 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
499 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
500 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
502 memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
504 //Error/bug resilience
505 s->next_p_frame_damaged = s1->next_p_frame_damaged;
506 s->workaround_bugs = s1->workaround_bugs;
509 memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits);
512 s->max_b_frames = s1->max_b_frames;
513 s->low_delay = s1->low_delay;
514 s->dropable = s1->dropable;
516 //DivX handling (doesn't work)
517 s->divx_packed = s1->divx_packed;
519 if(s1->bitstream_buffer){
520 if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
521 av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size);
522 s->bitstream_buffer_size = s1->bitstream_buffer_size;
523 memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size);
524 memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
527 //MPEG2/interlacing info
528 memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence);
530 if(!s1->first_field){
531 s->last_pict_type= s1->pict_type;
532 if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
534 if(s1->pict_type!=FF_B_TYPE){
535 s->last_non_b_pict_type= s1->pict_type;
543 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
544 * the changed fields will not depend upon the prior state of the MpegEncContext.
546 void MPV_common_defaults(MpegEncContext *s){
548 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
549 s->chroma_qscale_table= ff_default_chroma_qscale_table;
550 s->progressive_frame= 1;
551 s->progressive_sequence= 1;
552 s->picture_structure= PICT_FRAME;
554 s->coded_picture_number = 0;
555 s->picture_number = 0;
556 s->input_picture_number = 0;
558 s->picture_in_gop_number = 0;
563 s->picture_range_start = 0;
564 s->picture_range_end = MAX_PICTURE_COUNT;
568 * sets the given MpegEncContext to defaults for decoding.
569 * the changed fields will not depend upon the prior state of the MpegEncContext.
571 void MPV_decode_defaults(MpegEncContext *s){
572 MPV_common_defaults(s);
576 * init common structure for both encoder and decoder.
577 * this assumes that some variables like width/height are already set
579 av_cold int MPV_common_init(MpegEncContext *s)
581 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y,
582 threads = (s->encoding ||
584 s->avctx->active_thread_type & FF_THREAD_SLICE)) ?
585 s->avctx->thread_count : 1;
587 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
588 s->mb_height = (s->height + 31) / 32 * 2;
589 else if (s->codec_id != CODEC_ID_H264)
590 s->mb_height = (s->height + 15) / 16;
592 if(s->avctx->pix_fmt == PIX_FMT_NONE){
593 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
597 if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
598 (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
599 int max_threads = FFMIN(MAX_THREADS, s->mb_height);
600 av_log(s->avctx, AV_LOG_WARNING, "too many threads (%d), reducing to %d\n",
601 s->avctx->thread_count, max_threads);
602 threads = max_threads;
605 if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
608 ff_dct_common_init(s);
610 s->flags= s->avctx->flags;
611 s->flags2= s->avctx->flags2;
613 s->mb_width = (s->width + 15) / 16;
614 s->mb_stride = s->mb_width + 1;
615 s->b8_stride = s->mb_width*2 + 1;
616 s->b4_stride = s->mb_width*4 + 1;
617 mb_array_size= s->mb_height * s->mb_stride;
618 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
620 /* set chroma shifts */
621 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
622 &(s->chroma_y_shift) );
624 /* set default edge pos, will be overriden in decode_header if needed */
625 s->h_edge_pos= s->mb_width*16;
626 s->v_edge_pos= s->mb_height*16;
628 s->mb_num = s->mb_width * s->mb_height;
633 s->block_wrap[3]= s->b8_stride;
635 s->block_wrap[5]= s->mb_stride;
637 y_size = s->b8_stride * (2 * s->mb_height + 1);
638 c_size = s->mb_stride * (s->mb_height + 1);
639 yc_size = y_size + 2 * c_size;
641 /* convert fourcc to upper case */
642 s->codec_tag = ff_toupper4(s->avctx->codec_tag);
644 s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
646 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
648 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
649 for(y=0; y<s->mb_height; y++){
650 for(x=0; x<s->mb_width; x++){
651 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
654 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
657 /* Allocate MV tables */
658 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
659 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
660 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
661 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
662 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
663 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
664 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
665 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
666 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
667 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
668 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
669 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
671 if(s->msmpeg4_version){
672 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
674 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
676 /* Allocate MB type table */
677 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
679 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
681 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
682 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
683 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
684 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
685 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
686 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
688 if(s->avctx->noise_reduction){
689 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
693 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
694 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
695 for(i = 0; i < s->picture_count; i++) {
696 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
699 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
701 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
702 /* interlaced direct mode decoding tables */
707 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
708 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
710 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
711 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
712 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
714 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
717 if (s->out_format == FMT_H263) {
719 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
720 s->coded_block= s->coded_block_base + s->b8_stride + 1;
722 /* cbp, ac_pred, pred_dir */
723 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
724 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
727 if (s->h263_pred || s->h263_plus || !s->encoding) {
729 //MN: we need these for error resilience of intra-frames
730 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
731 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
732 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
733 s->dc_val[2] = s->dc_val[1] + c_size;
734 for(i=0;i<yc_size;i++)
735 s->dc_val_base[i] = 1024;
738 /* which mb is a intra block */
739 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
740 memset(s->mbintra_table, 1, mb_array_size);
742 /* init macroblock skip table */
743 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
744 //Note the +1 is for a quicker mpeg4 slice_end detection
745 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
747 s->parse_context.state= -1;
748 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
749 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
750 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
751 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
754 s->context_initialized = 1;
755 s->thread_context[0]= s;
757 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
758 for(i=1; i<threads; i++){
759 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
760 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
763 for(i=0; i<threads; i++){
764 if(init_duplicate_context(s->thread_context[i], s) < 0)
766 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
767 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
770 if(init_duplicate_context(s, s) < 0) goto fail;
772 s->end_mb_y = s->mb_height;
782 /* init common structure for both encoder and decoder */
783 void MPV_common_end(MpegEncContext *s)
787 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
788 for(i=0; i<s->avctx->thread_count; i++){
789 free_duplicate_context(s->thread_context[i]);
791 for(i=1; i<s->avctx->thread_count; i++){
792 av_freep(&s->thread_context[i]);
794 } else free_duplicate_context(s);
796 av_freep(&s->parse_context.buffer);
797 s->parse_context.buffer_size=0;
799 av_freep(&s->mb_type);
800 av_freep(&s->p_mv_table_base);
801 av_freep(&s->b_forw_mv_table_base);
802 av_freep(&s->b_back_mv_table_base);
803 av_freep(&s->b_bidir_forw_mv_table_base);
804 av_freep(&s->b_bidir_back_mv_table_base);
805 av_freep(&s->b_direct_mv_table_base);
807 s->b_forw_mv_table= NULL;
808 s->b_back_mv_table= NULL;
809 s->b_bidir_forw_mv_table= NULL;
810 s->b_bidir_back_mv_table= NULL;
811 s->b_direct_mv_table= NULL;
815 av_freep(&s->b_field_mv_table_base[i][j][k]);
816 s->b_field_mv_table[i][j][k]=NULL;
818 av_freep(&s->b_field_select_table[i][j]);
819 av_freep(&s->p_field_mv_table_base[i][j]);
820 s->p_field_mv_table[i][j]=NULL;
822 av_freep(&s->p_field_select_table[i]);
825 av_freep(&s->dc_val_base);
826 av_freep(&s->coded_block_base);
827 av_freep(&s->mbintra_table);
828 av_freep(&s->cbp_table);
829 av_freep(&s->pred_dir_table);
831 av_freep(&s->mbskip_table);
832 av_freep(&s->prev_pict_types);
833 av_freep(&s->bitstream_buffer);
834 s->allocated_bitstream_buffer_size=0;
836 av_freep(&s->avctx->stats_out);
837 av_freep(&s->ac_stats);
838 av_freep(&s->error_status_table);
839 av_freep(&s->mb_index2xy);
840 av_freep(&s->lambda_table);
841 av_freep(&s->q_intra_matrix);
842 av_freep(&s->q_inter_matrix);
843 av_freep(&s->q_intra_matrix16);
844 av_freep(&s->q_inter_matrix16);
845 av_freep(&s->input_picture);
846 av_freep(&s->reordered_input_picture);
847 av_freep(&s->dct_offset);
849 if(s->picture && !s->avctx->is_copy){
850 for(i=0; i<s->picture_count; i++){
851 free_picture(s, &s->picture[i]);
854 av_freep(&s->picture);
855 s->context_initialized = 0;
858 s->current_picture_ptr= NULL;
859 s->linesize= s->uvlinesize= 0;
862 av_freep(&s->visualization_buffer[i]);
864 if(!(s->avctx->active_thread_type&FF_THREAD_FRAME))
865 avcodec_default_free_buffers(s->avctx);
868 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
870 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
871 uint8_t index_run[MAX_RUN+1];
872 int last, run, level, start, end, i;
874 /* If table is static, we can quit if rl->max_level[0] is not NULL */
875 if(static_store && rl->max_level[0])
878 /* compute max_level[], max_run[] and index_run[] */
879 for(last=0;last<2;last++) {
888 memset(max_level, 0, MAX_RUN + 1);
889 memset(max_run, 0, MAX_LEVEL + 1);
890 memset(index_run, rl->n, MAX_RUN + 1);
891 for(i=start;i<end;i++) {
892 run = rl->table_run[i];
893 level = rl->table_level[i];
894 if (index_run[run] == rl->n)
896 if (level > max_level[run])
897 max_level[run] = level;
898 if (run > max_run[level])
899 max_run[level] = run;
902 rl->max_level[last] = static_store[last];
904 rl->max_level[last] = av_malloc(MAX_RUN + 1);
905 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
907 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
909 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
910 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
912 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
914 rl->index_run[last] = av_malloc(MAX_RUN + 1);
915 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
919 void init_vlc_rl(RLTable *rl)
931 for(i=0; i<rl->vlc.table_size; i++){
932 int code= rl->vlc.table[i][0];
933 int len = rl->vlc.table[i][1];
936 if(len==0){ // illegal code
939 }else if(len<0){ //more bits needed
943 if(code==rl->n){ //esc
947 run= rl->table_run [code] + 1;
948 level= rl->table_level[code] * qmul + qadd;
949 if(code >= rl->last) run+=192;
952 rl->rl_vlc[q][i].len= len;
953 rl->rl_vlc[q][i].level= level;
954 rl->rl_vlc[q][i].run= run;
959 void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
963 /* release non reference frames */
964 for(i=0; i<s->picture_count; i++){
965 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
966 && (!s->picture[i].owner2 || s->picture[i].owner2 == s)
967 && (remove_current || &s->picture[i] != s->current_picture_ptr)
968 /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
969 free_frame_buffer(s, &s->picture[i]);
974 int ff_find_unused_picture(MpegEncContext *s, int shared){
978 for(i=s->picture_range_start; i<s->picture_range_end; i++){
979 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
983 for(i=s->picture_range_start; i<s->picture_range_end; i++){
984 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
987 for(i=s->picture_range_start; i<s->picture_range_end; i++){
988 if (s->picture[i].f.data[0] == NULL)
993 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
994 /* We could return -1, but the codec would crash trying to draw into a
995 * non-existing frame anyway. This is safer than waiting for a random crash.
996 * Also the return of this is never useful, an encoder must only allocate
997 * as much as allowed in the specification. This has no relationship to how
998 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
999 * enough for such valid streams).
1000 * Plus, a decoder has to check stream validity and remove frames if too
1001 * many reference frames are around. Waiting for "OOM" is not correct at
1002 * all. Similarly, missing reference frames have to be replaced by
1003 * interpolated/MC frames, anything else is a bug in the codec ...
1009 static void update_noise_reduction(MpegEncContext *s){
1012 for(intra=0; intra<2; intra++){
1013 if(s->dct_count[intra] > (1<<16)){
1014 for(i=0; i<64; i++){
1015 s->dct_error_sum[intra][i] >>=1;
1017 s->dct_count[intra] >>= 1;
1020 for(i=0; i<64; i++){
1021 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1027 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1029 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1035 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1037 /* mark&release old frames */
1038 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.data[0]) {
1039 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1040 if (s->last_picture_ptr->owner2 == s)
1041 free_frame_buffer(s, s->last_picture_ptr);
1043 /* release forgotten pictures */
1044 /* if(mpeg124/h263) */
1046 for(i=0; i<s->picture_count; i++){
1047 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].f.reference) {
1048 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1049 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1050 free_frame_buffer(s, &s->picture[i]);
1058 ff_release_unused_pictures(s, 1);
1060 if (s->current_picture_ptr && s->current_picture_ptr->f.data[0] == NULL)
1061 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
1063 i= ff_find_unused_picture(s, 0);
1064 pic= &s->picture[i];
1067 pic->f.reference = 0;
1069 if (s->codec_id == CODEC_ID_H264)
1070 pic->f.reference = s->picture_structure;
1071 else if (s->pict_type != AV_PICTURE_TYPE_B)
1072 pic->f.reference = 3;
1075 pic->f.coded_picture_number = s->coded_picture_number++;
1077 if(ff_alloc_picture(s, pic, 0) < 0)
1080 s->current_picture_ptr= pic;
1081 //FIXME use only the vars from current_pic
1082 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1083 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1084 if(s->picture_structure != PICT_FRAME)
1085 s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1087 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence;
1088 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1091 s->current_picture_ptr->f.pict_type = s->pict_type;
1092 // if(s->flags && CODEC_FLAG_QSCALE)
1093 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1094 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1096 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1098 if (s->pict_type != AV_PICTURE_TYPE_B) {
1099 s->last_picture_ptr= s->next_picture_ptr;
1101 s->next_picture_ptr= s->current_picture_ptr;
1103 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1104 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1105 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1106 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1107 s->pict_type, s->dropable);*/
1109 if(s->codec_id != CODEC_ID_H264){
1110 if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.data[0] == NULL) &&
1111 (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
1112 if (s->pict_type != AV_PICTURE_TYPE_I)
1113 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1114 else if (s->picture_structure != PICT_FRAME)
1115 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
1117 /* Allocate a dummy frame */
1118 i= ff_find_unused_picture(s, 0);
1119 s->last_picture_ptr= &s->picture[i];
1120 s->last_picture_ptr->f.key_frame = 0;
1121 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1123 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
1124 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
1126 if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) {
1127 /* Allocate a dummy frame */
1128 i= ff_find_unused_picture(s, 0);
1129 s->next_picture_ptr= &s->picture[i];
1130 s->next_picture_ptr->f.key_frame = 0;
1131 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1133 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
1134 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
1138 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1139 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1141 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.data[0]));
1143 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1146 if(s->picture_structure == PICT_BOTTOM_FIELD){
1147 s->current_picture.f.data[i] += s->current_picture.f.linesize[i];
1149 s->current_picture.f.linesize[i] *= 2;
1150 s->last_picture.f.linesize[i] *= 2;
1151 s->next_picture.f.linesize[i] *= 2;
1155 s->error_recognition= avctx->error_recognition;
1157 /* set dequantizer, we can't do it during init as it might change for mpeg4
1158 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1159 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1160 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1161 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1162 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1163 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1164 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1166 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1167 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1170 if(s->dct_error_sum){
1171 assert(s->avctx->noise_reduction && s->encoding);
1173 update_noise_reduction(s);
1176 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1177 return ff_xvmc_field_start(s, avctx);
1182 /* generic function for encode/decode called after a frame has been coded/decoded */
1183 void MPV_frame_end(MpegEncContext *s)
1186 /* redraw edges for the frame if decoding didn't complete */
1187 //just to make sure that all data is rendered.
1188 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1189 ff_xvmc_field_end(s);
1190 }else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND))
1191 && !s->avctx->hwaccel
1192 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1193 && s->unrestricted_mv
1194 && s->current_picture.f.reference
1196 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1197 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1198 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1199 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1200 s->h_edge_pos , s->v_edge_pos,
1201 EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
1202 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1203 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1204 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1205 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1206 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1207 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1212 s->last_pict_type = s->pict_type;
1213 s->last_lambda_for[s->pict_type] = s->current_picture_ptr->f.quality;
1214 if(s->pict_type!=AV_PICTURE_TYPE_B){
1215 s->last_non_b_pict_type= s->pict_type;
1218 /* copy back current_picture variables */
1219 for(i=0; i<MAX_PICTURE_COUNT; i++){
1220 if(s->picture[i].f.data[0] == s->current_picture.f.data[0]){
1221 s->picture[i]= s->current_picture;
1225 assert(i<MAX_PICTURE_COUNT);
1229 /* release non-reference frames */
1230 for(i=0; i<s->picture_count; i++){
1231 if (s->picture[i].f.data[0] && !s->picture[i].f.reference /*&& s->picture[i].type != FF_BUFFER_TYPE_SHARED*/) {
1232 free_frame_buffer(s, &s->picture[i]);
1236 // clear copies, to avoid confusion
1238 memset(&s->last_picture, 0, sizeof(Picture));
1239 memset(&s->next_picture, 0, sizeof(Picture));
1240 memset(&s->current_picture, 0, sizeof(Picture));
1242 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1244 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1245 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
1250 * draws an line from (ex, ey) -> (sx, sy).
1251 * @param w width of the image
1252 * @param h height of the image
1253 * @param stride stride/linesize of the image
1254 * @param color color of the arrow
1256 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1259 sx= av_clip(sx, 0, w-1);
1260 sy= av_clip(sy, 0, h-1);
1261 ex= av_clip(ex, 0, w-1);
1262 ey= av_clip(ey, 0, h-1);
1264 buf[sy*stride + sx]+= color;
1266 if(FFABS(ex - sx) > FFABS(ey - sy)){
1268 FFSWAP(int, sx, ex);
1269 FFSWAP(int, sy, ey);
1271 buf+= sx + sy*stride;
1273 f= ((ey-sy)<<16)/ex;
1274 for(x= 0; x <= ex; x++){
1277 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1278 buf[(y+1)*stride + x]+= (color* fr )>>16;
1282 FFSWAP(int, sx, ex);
1283 FFSWAP(int, sy, ey);
1285 buf+= sx + sy*stride;
1287 if(ey) f= ((ex-sx)<<16)/ey;
1289 for(y= 0; y <= ey; y++){
1292 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1293 buf[y*stride + x+1]+= (color* fr )>>16;
1299 * draws an arrow from (ex, ey) -> (sx, sy).
1300 * @param w width of the image
1301 * @param h height of the image
1302 * @param stride stride/linesize of the image
1303 * @param color color of the arrow
1305 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1308 sx= av_clip(sx, -100, w+100);
1309 sy= av_clip(sy, -100, h+100);
1310 ex= av_clip(ex, -100, w+100);
1311 ey= av_clip(ey, -100, h+100);
1316 if(dx*dx + dy*dy > 3*3){
1319 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1321 //FIXME subpixel accuracy
1322 rx= ROUNDED_DIV(rx*3<<4, length);
1323 ry= ROUNDED_DIV(ry*3<<4, length);
1325 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1326 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1328 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1332 * prints debuging info for the given picture.
1334 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1336 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1338 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1341 av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1342 av_get_picture_type_char(pict->pict_type));
1343 for(y=0; y<s->mb_height; y++){
1344 for(x=0; x<s->mb_width; x++){
1345 if(s->avctx->debug&FF_DEBUG_SKIP){
1346 int count= s->mbskip_table[x + y*s->mb_stride];
1347 if(count>9) count=9;
1348 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1350 if(s->avctx->debug&FF_DEBUG_QP){
1351 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1353 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1354 int mb_type= pict->mb_type[x + y*s->mb_stride];
1355 //Type & MV direction
1357 av_log(s->avctx, AV_LOG_DEBUG, "P");
1358 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1359 av_log(s->avctx, AV_LOG_DEBUG, "A");
1360 else if(IS_INTRA4x4(mb_type))
1361 av_log(s->avctx, AV_LOG_DEBUG, "i");
1362 else if(IS_INTRA16x16(mb_type))
1363 av_log(s->avctx, AV_LOG_DEBUG, "I");
1364 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1365 av_log(s->avctx, AV_LOG_DEBUG, "d");
1366 else if(IS_DIRECT(mb_type))
1367 av_log(s->avctx, AV_LOG_DEBUG, "D");
1368 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1369 av_log(s->avctx, AV_LOG_DEBUG, "g");
1370 else if(IS_GMC(mb_type))
1371 av_log(s->avctx, AV_LOG_DEBUG, "G");
1372 else if(IS_SKIP(mb_type))
1373 av_log(s->avctx, AV_LOG_DEBUG, "S");
1374 else if(!USES_LIST(mb_type, 1))
1375 av_log(s->avctx, AV_LOG_DEBUG, ">");
1376 else if(!USES_LIST(mb_type, 0))
1377 av_log(s->avctx, AV_LOG_DEBUG, "<");
1379 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1380 av_log(s->avctx, AV_LOG_DEBUG, "X");
1385 av_log(s->avctx, AV_LOG_DEBUG, "+");
1386 else if(IS_16X8(mb_type))
1387 av_log(s->avctx, AV_LOG_DEBUG, "-");
1388 else if(IS_8X16(mb_type))
1389 av_log(s->avctx, AV_LOG_DEBUG, "|");
1390 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1391 av_log(s->avctx, AV_LOG_DEBUG, " ");
1393 av_log(s->avctx, AV_LOG_DEBUG, "?");
1396 if(IS_INTERLACED(mb_type))
1397 av_log(s->avctx, AV_LOG_DEBUG, "=");
1399 av_log(s->avctx, AV_LOG_DEBUG, " ");
1401 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1403 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1407 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1408 const int shift= 1 + s->quarter_sample;
1412 int h_chroma_shift, v_chroma_shift, block_height;
1413 const int width = s->avctx->width;
1414 const int height= s->avctx->height;
1415 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1416 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1417 s->low_delay=0; //needed to see the vectors without trashing the buffers
1419 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1421 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1422 pict->data[i]= s->visualization_buffer[i];
1424 pict->type= FF_BUFFER_TYPE_COPY;
1427 block_height = 16>>v_chroma_shift;
1429 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1431 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1432 const int mb_index= mb_x + mb_y*s->mb_stride;
1433 if((s->avctx->debug_mv) && pict->motion_val){
1435 for(type=0; type<3; type++){
1438 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
1442 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1446 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1451 if(!USES_LIST(pict->mb_type[mb_index], direction))
1454 if(IS_8X8(pict->mb_type[mb_index])){
1457 int sx= mb_x*16 + 4 + 8*(i&1);
1458 int sy= mb_y*16 + 4 + 8*(i>>1);
1459 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1460 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1461 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1462 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1464 }else if(IS_16X8(pict->mb_type[mb_index])){
1468 int sy=mb_y*16 + 4 + 8*i;
1469 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1470 int mx=(pict->motion_val[direction][xy][0]>>shift);
1471 int my=(pict->motion_val[direction][xy][1]>>shift);
1473 if(IS_INTERLACED(pict->mb_type[mb_index]))
1476 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1478 }else if(IS_8X16(pict->mb_type[mb_index])){
1481 int sx=mb_x*16 + 4 + 8*i;
1483 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1484 int mx=(pict->motion_val[direction][xy][0]>>shift);
1485 int my=(pict->motion_val[direction][xy][1]>>shift);
1487 if(IS_INTERLACED(pict->mb_type[mb_index]))
1490 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1493 int sx= mb_x*16 + 8;
1494 int sy= mb_y*16 + 8;
1495 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1496 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1497 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1498 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1502 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1503 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1505 for(y=0; y<block_height; y++){
1506 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1507 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1510 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1511 int mb_type= pict->mb_type[mb_index];
1514 #define COLOR(theta, r)\
1515 u= (int)(128 + r*cos(theta*3.141592/180));\
1516 v= (int)(128 + r*sin(theta*3.141592/180));
1520 if(IS_PCM(mb_type)){
1522 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1524 }else if(IS_INTRA4x4(mb_type)){
1526 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1528 }else if(IS_DIRECT(mb_type)){
1530 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1532 }else if(IS_GMC(mb_type)){
1534 }else if(IS_SKIP(mb_type)){
1536 }else if(!USES_LIST(mb_type, 1)){
1538 }else if(!USES_LIST(mb_type, 0)){
1541 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1545 u*= 0x0101010101010101ULL;
1546 v*= 0x0101010101010101ULL;
1547 for(y=0; y<block_height; y++){
1548 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1549 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1553 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1554 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1555 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1557 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1559 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1561 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1562 int dm= 1 << (mv_sample_log2-2);
1564 int sx= mb_x*16 + 8*(i&1);
1565 int sy= mb_y*16 + 8*(i>>1);
1566 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1568 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1569 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1571 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1572 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1573 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1577 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1581 s->mbskip_table[mb_index]=0;
1587 static inline int hpel_motion_lowres(MpegEncContext *s,
1588 uint8_t *dest, uint8_t *src,
1589 int field_based, int field_select,
1590 int src_x, int src_y,
1591 int width, int height, int stride,
1592 int h_edge_pos, int v_edge_pos,
1593 int w, int h, h264_chroma_mc_func *pix_op,
1594 int motion_x, int motion_y)
1596 const int lowres= s->avctx->lowres;
1597 const int op_index= FFMIN(lowres, 2);
1598 const int s_mask= (2<<lowres)-1;
1602 if(s->quarter_sample){
1607 sx= motion_x & s_mask;
1608 sy= motion_y & s_mask;
1609 src_x += motion_x >> (lowres+1);
1610 src_y += motion_y >> (lowres+1);
1612 src += src_y * stride + src_x;
1614 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1615 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1616 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1617 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1618 src= s->edge_emu_buffer;
1622 sx= (sx << 2) >> lowres;
1623 sy= (sy << 2) >> lowres;
1626 pix_op[op_index](dest, src, stride, h, sx, sy);
1630 /* apply one mpeg motion vector to the three components */
1631 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1632 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1633 int field_based, int bottom_field, int field_select,
1634 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1635 int motion_x, int motion_y, int h, int mb_y)
1637 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1638 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1639 const int lowres= s->avctx->lowres;
1640 const int op_index= FFMIN(lowres-1+s->chroma_x_shift, 2);
1641 const int block_s= 8>>lowres;
1642 const int s_mask= (2<<lowres)-1;
1643 const int h_edge_pos = s->h_edge_pos >> lowres;
1644 const int v_edge_pos = s->v_edge_pos >> lowres;
1645 linesize = s->current_picture.f.linesize[0] << field_based;
1646 uvlinesize = s->current_picture.f.linesize[1] << field_based;
1648 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1654 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1657 sx= motion_x & s_mask;
1658 sy= motion_y & s_mask;
1659 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1660 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1662 if (s->out_format == FMT_H263) {
1663 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1664 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1667 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1670 uvsx = (2*mx) & s_mask;
1671 uvsy = (2*my) & s_mask;
1672 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1673 uvsrc_y = mb_y*block_s + (my >> lowres);
1675 if(s->chroma_y_shift){
1680 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1681 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1683 if(s->chroma_x_shift){
1687 uvsy = motion_y & s_mask;
1689 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1692 uvsx = motion_x & s_mask;
1693 uvsy = motion_y & s_mask;
1700 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1701 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1702 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1704 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1705 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1706 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1707 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1708 ptr_y = s->edge_emu_buffer;
1709 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1710 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1711 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1712 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1713 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1714 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1720 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
1721 dest_y += s->linesize;
1722 dest_cb+= s->uvlinesize;
1723 dest_cr+= s->uvlinesize;
1727 ptr_y += s->linesize;
1728 ptr_cb+= s->uvlinesize;
1729 ptr_cr+= s->uvlinesize;
1732 sx= (sx << 2) >> lowres;
1733 sy= (sy << 2) >> lowres;
1734 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1736 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1737 uvsx= (uvsx << 2) >> lowres;
1738 uvsy= (uvsy << 2) >> lowres;
1739 if(h >> s->chroma_y_shift){
1740 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1741 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1744 //FIXME h261 lowres loop filter
1747 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1748 uint8_t *dest_cb, uint8_t *dest_cr,
1749 uint8_t **ref_picture,
1750 h264_chroma_mc_func *pix_op,
1752 const int lowres= s->avctx->lowres;
1753 const int op_index= FFMIN(lowres, 2);
1754 const int block_s= 8>>lowres;
1755 const int s_mask= (2<<lowres)-1;
1756 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1757 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1758 int emu=0, src_x, src_y, offset, sx, sy;
1761 if(s->quarter_sample){
1766 /* In case of 8X8, we construct a single chroma motion vector
1767 with a special rounding */
1768 mx= ff_h263_round_chroma(mx);
1769 my= ff_h263_round_chroma(my);
1773 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1774 src_y = s->mb_y*block_s + (my >> (lowres+1));
1776 offset = src_y * s->uvlinesize + src_x;
1777 ptr = ref_picture[1] + offset;
1778 if(s->flags&CODEC_FLAG_EMU_EDGE){
1779 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1780 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1781 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1782 ptr= s->edge_emu_buffer;
1786 sx= (sx << 2) >> lowres;
1787 sy= (sy << 2) >> lowres;
1788 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1790 ptr = ref_picture[2] + offset;
1792 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1793 ptr= s->edge_emu_buffer;
1795 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1799 * motion compensation of a single macroblock
1801 * @param dest_y luma destination pointer
1802 * @param dest_cb chroma cb/u destination pointer
1803 * @param dest_cr chroma cr/v destination pointer
1804 * @param dir direction (0->forward, 1->backward)
1805 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1806 * @param pix_op halfpel motion compensation function (average or put normally)
1807 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1809 static inline void MPV_motion_lowres(MpegEncContext *s,
1810 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1811 int dir, uint8_t **ref_picture,
1812 h264_chroma_mc_func *pix_op)
1816 const int lowres= s->avctx->lowres;
1817 const int block_s= 8>>lowres;
1822 switch(s->mv_type) {
1824 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1826 ref_picture, pix_op,
1827 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1833 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1834 ref_picture[0], 0, 0,
1835 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1836 s->width, s->height, s->linesize,
1837 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1838 block_s, block_s, pix_op,
1839 s->mv[dir][i][0], s->mv[dir][i][1]);
1841 mx += s->mv[dir][i][0];
1842 my += s->mv[dir][i][1];
1845 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1846 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1849 if (s->picture_structure == PICT_FRAME) {
1851 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1852 1, 0, s->field_select[dir][0],
1853 ref_picture, pix_op,
1854 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1856 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1857 1, 1, s->field_select[dir][1],
1858 ref_picture, pix_op,
1859 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1861 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
1862 ref_picture = s->current_picture_ptr->f.data;
1865 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1866 0, 0, s->field_select[dir][0],
1867 ref_picture, pix_op,
1868 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1873 uint8_t ** ref2picture;
1875 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
1876 ref2picture= ref_picture;
1878 ref2picture = s->current_picture_ptr->f.data;
1881 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1882 0, 0, s->field_select[dir][i],
1883 ref2picture, pix_op,
1884 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1886 dest_y += 2*block_s*s->linesize;
1887 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1888 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1892 if(s->picture_structure == PICT_FRAME){
1896 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1898 ref_picture, pix_op,
1899 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1901 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1905 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1906 0, 0, s->picture_structure != i+1,
1907 ref_picture, pix_op,
1908 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1910 // after put we make avg of the same block
1911 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1913 //opposite parity is always in the same frame if this is second field
1914 if(!s->first_field){
1915 ref_picture = s->current_picture_ptr->f.data;
1925 * find the lowest MB row referenced in the MVs
1927 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1929 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1930 int my, off, i, mvs;
1932 if (s->picture_structure != PICT_FRAME) goto unhandled;
1934 switch (s->mv_type) {
1948 for (i = 0; i < mvs; i++) {
1949 my = s->mv[dir][i][1]<<qpel_shift;
1950 my_max = FFMAX(my_max, my);
1951 my_min = FFMIN(my_min, my);
1954 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1956 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1958 return s->mb_height-1;
1961 /* put block[] to dest[] */
1962 static inline void put_dct(MpegEncContext *s,
1963 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1965 s->dct_unquantize_intra(s, block, i, qscale);
1966 s->dsp.idct_put (dest, line_size, block);
1969 /* add block[] to dest[] */
1970 static inline void add_dct(MpegEncContext *s,
1971 DCTELEM *block, int i, uint8_t *dest, int line_size)
1973 if (s->block_last_index[i] >= 0) {
1974 s->dsp.idct_add (dest, line_size, block);
1978 static inline void add_dequant_dct(MpegEncContext *s,
1979 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1981 if (s->block_last_index[i] >= 0) {
1982 s->dct_unquantize_inter(s, block, i, qscale);
1984 s->dsp.idct_add (dest, line_size, block);
1989 * cleans dc, ac, coded_block for the current non intra MB
1991 void ff_clean_intra_table_entries(MpegEncContext *s)
1993 int wrap = s->b8_stride;
1994 int xy = s->block_index[0];
1997 s->dc_val[0][xy + 1 ] =
1998 s->dc_val[0][xy + wrap] =
1999 s->dc_val[0][xy + 1 + wrap] = 1024;
2001 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2002 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2003 if (s->msmpeg4_version>=3) {
2004 s->coded_block[xy ] =
2005 s->coded_block[xy + 1 ] =
2006 s->coded_block[xy + wrap] =
2007 s->coded_block[xy + 1 + wrap] = 0;
2010 wrap = s->mb_stride;
2011 xy = s->mb_x + s->mb_y * wrap;
2013 s->dc_val[2][xy] = 1024;
2015 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2016 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2018 s->mbintra_table[xy]= 0;
2021 /* generic function called after a macroblock has been parsed by the
2022 decoder or after it has been encoded by the encoder.
2024 Important variables used:
2025 s->mb_intra : true if intra macroblock
2026 s->mv_dir : motion vector direction
2027 s->mv_type : motion vector type
2028 s->mv : motion vector
2029 s->interlaced_dct : true if interlaced dct used (mpeg2)
2031 static av_always_inline
2032 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2033 int lowres_flag, int is_mpeg12)
2035 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2036 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2037 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2041 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2042 /* save DCT coefficients */
2044 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2045 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2047 for(j=0; j<64; j++){
2048 *dct++ = block[i][s->dsp.idct_permutation[j]];
2049 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2051 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2055 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2057 /* update DC predictors for P macroblocks */
2059 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2060 if(s->mbintra_table[mb_xy])
2061 ff_clean_intra_table_entries(s);
2065 s->last_dc[2] = 128 << s->intra_dc_precision;
2068 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2069 s->mbintra_table[mb_xy]=1;
2071 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2072 uint8_t *dest_y, *dest_cb, *dest_cr;
2073 int dct_linesize, dct_offset;
2074 op_pixels_func (*op_pix)[4];
2075 qpel_mc_func (*op_qpix)[16];
2076 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2077 const int uvlinesize = s->current_picture.f.linesize[1];
2078 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2079 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2081 /* avoid copy if macroblock skipped in last frame too */
2082 /* skip only during decoding as we might trash the buffers during encoding a bit */
2084 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2085 const int age = s->current_picture.f.age;
2089 if (s->mb_skipped) {
2091 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2093 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
2094 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2096 /* if previous was skipped too, then nothing to do ! */
2097 if (*mbskip_ptr >= age && s->current_picture.f.reference){
2100 } else if(!s->current_picture.f.reference) {
2101 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2102 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2104 *mbskip_ptr = 0; /* not skipped */
2108 dct_linesize = linesize << s->interlaced_dct;
2109 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
2113 dest_cb= s->dest[1];
2114 dest_cr= s->dest[2];
2116 dest_y = s->b_scratchpad;
2117 dest_cb= s->b_scratchpad+16*linesize;
2118 dest_cr= s->b_scratchpad+32*linesize;
2122 /* motion handling */
2123 /* decoding or more than one mb_type (MC was already done otherwise) */
2126 if(HAVE_PTHREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2127 if (s->mv_dir & MV_DIR_FORWARD) {
2128 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
2130 if (s->mv_dir & MV_DIR_BACKWARD) {
2131 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
2136 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2138 if (s->mv_dir & MV_DIR_FORWARD) {
2139 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2140 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2142 if (s->mv_dir & MV_DIR_BACKWARD) {
2143 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2146 op_qpix= s->me.qpel_put;
2147 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2148 op_pix = s->dsp.put_pixels_tab;
2150 op_pix = s->dsp.put_no_rnd_pixels_tab;
2152 if (s->mv_dir & MV_DIR_FORWARD) {
2153 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2154 op_pix = s->dsp.avg_pixels_tab;
2155 op_qpix= s->me.qpel_avg;
2157 if (s->mv_dir & MV_DIR_BACKWARD) {
2158 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2163 /* skip dequant / idct if we are really late ;) */
2164 if(s->avctx->skip_idct){
2165 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2166 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2167 || s->avctx->skip_idct >= AVDISCARD_ALL)
2171 /* add dct residue */
2172 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2173 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2174 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2175 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2176 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2177 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2179 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2180 if (s->chroma_y_shift){
2181 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2182 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2186 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2187 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2188 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2189 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2192 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2193 add_dct(s, block[0], 0, dest_y , dct_linesize);
2194 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2195 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2196 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2198 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2199 if(s->chroma_y_shift){//Chroma420
2200 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2201 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2204 dct_linesize = uvlinesize << s->interlaced_dct;
2205 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
2207 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2208 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2209 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2210 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2211 if(!s->chroma_x_shift){//Chroma444
2212 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2213 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2214 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2215 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2220 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2221 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2224 /* dct only in intra block */
2225 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2226 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2227 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2228 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2229 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2231 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2232 if(s->chroma_y_shift){
2233 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2234 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2238 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2239 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2240 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2241 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2245 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2246 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2247 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2248 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2250 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2251 if(s->chroma_y_shift){
2252 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2253 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2256 dct_linesize = uvlinesize << s->interlaced_dct;
2257 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
2259 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2260 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2261 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2262 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2263 if(!s->chroma_x_shift){//Chroma444
2264 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2265 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2266 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2267 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2275 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2276 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2277 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2282 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2284 if(s->out_format == FMT_MPEG1) {
2285 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2286 else MPV_decode_mb_internal(s, block, 0, 1);
2289 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2290 else MPV_decode_mb_internal(s, block, 0, 0);
2295 * @param h is the normal height, this will be reduced automatically if needed for the last row
2297 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2298 const int field_pic= s->picture_structure != PICT_FRAME;
2304 if (!s->avctx->hwaccel
2305 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2306 && s->unrestricted_mv
2307 && s->current_picture.f.reference
2309 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2310 int sides = 0, edge_h;
2311 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2312 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2313 if (y==0) sides |= EDGE_TOP;
2314 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2316 edge_h= FFMIN(h, s->v_edge_pos - y);
2318 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize , s->linesize,
2319 s->h_edge_pos , edge_h , EDGE_WIDTH , EDGE_WIDTH , sides);
2320 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
2321 s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2322 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
2323 s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2326 h= FFMIN(h, s->avctx->height - y);
2328 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2330 if (s->avctx->draw_horiz_band) {
2334 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2335 src= (AVFrame*)s->current_picture_ptr;
2336 else if(s->last_picture_ptr)
2337 src= (AVFrame*)s->last_picture_ptr;
2341 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2347 offset[0]= y * s->linesize;
2349 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2355 s->avctx->draw_horiz_band(s->avctx, src, offset,
2356 y, s->picture_structure, h);
2360 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2361 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2362 const int uvlinesize = s->current_picture.f.linesize[1];
2363 const int mb_size= 4 - s->avctx->lowres;
2365 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2366 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2367 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2368 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2369 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2370 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2371 //block_index is not used by mpeg2, so it is not affected by chroma_format
2373 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2374 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2375 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2377 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2379 if(s->picture_structure==PICT_FRAME){
2380 s->dest[0] += s->mb_y * linesize << mb_size;
2381 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2382 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2384 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2385 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2386 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2387 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2392 void ff_mpeg_flush(AVCodecContext *avctx){
2394 MpegEncContext *s = avctx->priv_data;
2396 if(s==NULL || s->picture==NULL)
2399 for(i=0; i<s->picture_count; i++){
2400 if (s->picture[i].f.data[0] &&
2401 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2402 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2403 free_frame_buffer(s, &s->picture[i]);
2405 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2407 s->mb_x= s->mb_y= 0;
2410 s->parse_context.state= -1;
2411 s->parse_context.frame_start_found= 0;
2412 s->parse_context.overread= 0;
2413 s->parse_context.overread_index= 0;
2414 s->parse_context.index= 0;
2415 s->parse_context.last_index= 0;
2416 s->bitstream_buffer_size=0;
2420 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2421 DCTELEM *block, int n, int qscale)
2423 int i, level, nCoeffs;
2424 const uint16_t *quant_matrix;
2426 nCoeffs= s->block_last_index[n];
2429 block[0] = block[0] * s->y_dc_scale;
2431 block[0] = block[0] * s->c_dc_scale;
2432 /* XXX: only mpeg1 */
2433 quant_matrix = s->intra_matrix;
2434 for(i=1;i<=nCoeffs;i++) {
2435 int j= s->intra_scantable.permutated[i];
2440 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2441 level = (level - 1) | 1;
2444 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2445 level = (level - 1) | 1;
2452 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2453 DCTELEM *block, int n, int qscale)
2455 int i, level, nCoeffs;
2456 const uint16_t *quant_matrix;
2458 nCoeffs= s->block_last_index[n];
2460 quant_matrix = s->inter_matrix;
2461 for(i=0; i<=nCoeffs; i++) {
2462 int j= s->intra_scantable.permutated[i];
2467 level = (((level << 1) + 1) * qscale *
2468 ((int) (quant_matrix[j]))) >> 4;
2469 level = (level - 1) | 1;
2472 level = (((level << 1) + 1) * qscale *
2473 ((int) (quant_matrix[j]))) >> 4;
2474 level = (level - 1) | 1;
2481 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2482 DCTELEM *block, int n, int qscale)
2484 int i, level, nCoeffs;
2485 const uint16_t *quant_matrix;
2487 if(s->alternate_scan) nCoeffs= 63;
2488 else nCoeffs= s->block_last_index[n];
2491 block[0] = block[0] * s->y_dc_scale;
2493 block[0] = block[0] * s->c_dc_scale;
2494 quant_matrix = s->intra_matrix;
2495 for(i=1;i<=nCoeffs;i++) {
2496 int j= s->intra_scantable.permutated[i];
2501 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2504 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2511 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2512 DCTELEM *block, int n, int qscale)
2514 int i, level, nCoeffs;
2515 const uint16_t *quant_matrix;
2518 if(s->alternate_scan) nCoeffs= 63;
2519 else nCoeffs= s->block_last_index[n];
2522 block[0] = block[0] * s->y_dc_scale;
2524 block[0] = block[0] * s->c_dc_scale;
2525 quant_matrix = s->intra_matrix;
2526 for(i=1;i<=nCoeffs;i++) {
2527 int j= s->intra_scantable.permutated[i];
2532 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2535 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2544 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2545 DCTELEM *block, int n, int qscale)
2547 int i, level, nCoeffs;
2548 const uint16_t *quant_matrix;
2551 if(s->alternate_scan) nCoeffs= 63;
2552 else nCoeffs= s->block_last_index[n];
2554 quant_matrix = s->inter_matrix;
2555 for(i=0; i<=nCoeffs; i++) {
2556 int j= s->intra_scantable.permutated[i];
2561 level = (((level << 1) + 1) * qscale *
2562 ((int) (quant_matrix[j]))) >> 4;
2565 level = (((level << 1) + 1) * qscale *
2566 ((int) (quant_matrix[j]))) >> 4;
2575 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2576 DCTELEM *block, int n, int qscale)
2578 int i, level, qmul, qadd;
2581 assert(s->block_last_index[n]>=0);
2587 block[0] = block[0] * s->y_dc_scale;
2589 block[0] = block[0] * s->c_dc_scale;
2590 qadd = (qscale - 1) | 1;
2597 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2599 for(i=1; i<=nCoeffs; i++) {
2603 level = level * qmul - qadd;
2605 level = level * qmul + qadd;
2612 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2613 DCTELEM *block, int n, int qscale)
2615 int i, level, qmul, qadd;
2618 assert(s->block_last_index[n]>=0);
2620 qadd = (qscale - 1) | 1;
2623 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2625 for(i=0; i<=nCoeffs; i++) {
2629 level = level * qmul - qadd;
2631 level = level * qmul + qadd;
2639 * set qscale and update qscale dependent variables.
2641 void ff_set_qscale(MpegEncContext * s, int qscale)
2645 else if (qscale > 31)
2649 s->chroma_qscale= s->chroma_qscale_table[qscale];
2651 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2652 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2655 void MPV_report_decode_progress(MpegEncContext *s)
2657 if (s->pict_type != FF_B_TYPE && !s->partitioned_frame && !s->error_occurred)
2658 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);