2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32]={
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
74 const uint8_t ff_mpeg1_dc_scale_table[128]={
75 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 static const uint8_t mpeg2_dc_scale_table1[128]={
83 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128]={
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 static const uint8_t mpeg2_dc_scale_table3[128]={
99 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
107 ff_mpeg1_dc_scale_table,
108 mpeg2_dc_scale_table1,
109 mpeg2_dc_scale_table2,
110 mpeg2_dc_scale_table3,
113 const enum PixelFormat ff_pixfmt_list_420[] = {
118 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
125 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
133 uint32_t tmp= *state << 8;
134 *state= tmp + *(p++);
135 if(tmp == 0x100 || p==end)
140 if (p[-1] > 1 ) p+= 3;
141 else if(p[-2] ) p+= 2;
142 else if(p[-3]|(p[-1]-1)) p++;
155 /* init common dct for both encoder and decoder */
156 av_cold int ff_dct_common_init(MpegEncContext *s)
158 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
159 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
160 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
161 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
162 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
163 if(s->flags & CODEC_FLAG_BITEXACT)
164 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
165 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
168 MPV_common_init_mmx(s);
170 MPV_common_init_axp(s);
172 MPV_common_init_mlib(s);
174 MPV_common_init_mmi(s);
176 MPV_common_init_arm(s);
178 MPV_common_init_altivec(s);
180 MPV_common_init_bfin(s);
183 /* load & permutate scantables
184 note: only wmv uses different ones
186 if(s->alternate_scan){
187 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
190 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
194 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
199 void ff_copy_picture(Picture *dst, Picture *src){
201 dst->type= FF_BUFFER_TYPE_COPY;
205 * Release a frame buffer
207 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
209 ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
210 av_freep(&pic->hwaccel_picture_private);
214 * Allocate a frame buffer
216 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
220 if (s->avctx->hwaccel) {
221 assert(!pic->hwaccel_picture_private);
222 if (s->avctx->hwaccel->priv_data_size) {
223 pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
224 if (!pic->hwaccel_picture_private) {
225 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
231 r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
233 if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
234 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
235 av_freep(&pic->hwaccel_picture_private);
239 if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
240 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
241 free_frame_buffer(s, pic);
245 if (pic->linesize[1] != pic->linesize[2]) {
246 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
247 free_frame_buffer(s, pic);
255 * allocates a Picture
256 * The pixels are allocated/set by calling get_buffer() if shared=0
258 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
259 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
260 const int mb_array_size= s->mb_stride*s->mb_height;
261 const int b8_array_size= s->b8_stride*s->mb_height*2;
262 const int b4_array_size= s->b4_stride*s->mb_height*4;
267 assert(pic->data[0]);
268 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
269 pic->type= FF_BUFFER_TYPE_SHARED;
271 assert(!pic->data[0]);
273 if (alloc_frame_buffer(s, pic) < 0)
276 s->linesize = pic->linesize[0];
277 s->uvlinesize= pic->linesize[1];
280 if(pic->qscale_table==NULL){
282 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
283 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
284 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
287 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check
288 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table , mb_array_size * sizeof(uint8_t) , fail)
289 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
290 pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
291 if(s->out_format == FMT_H264){
293 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
294 pic->motion_val[i]= pic->motion_val_base[i]+4;
295 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
297 pic->motion_subsample_log2= 2;
298 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
300 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
301 pic->motion_val[i]= pic->motion_val_base[i]+4;
302 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
304 pic->motion_subsample_log2= 3;
306 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
307 FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
309 pic->qstride= s->mb_stride;
310 FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
313 /* It might be nicer if the application would keep track of these
314 * but it would require an API change. */
315 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
316 s->prev_pict_types[0]= s->dropable ? FF_B_TYPE : s->pict_type;
317 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == FF_B_TYPE)
318 pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
321 fail: //for the FF_ALLOCZ_OR_GOTO macro
323 free_frame_buffer(s, pic);
328 * deallocates a picture
330 static void free_picture(MpegEncContext *s, Picture *pic){
333 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
334 free_frame_buffer(s, pic);
337 av_freep(&pic->mb_var);
338 av_freep(&pic->mc_mb_var);
339 av_freep(&pic->mb_mean);
340 av_freep(&pic->mbskip_table);
341 av_freep(&pic->qscale_table);
342 av_freep(&pic->mb_type_base);
343 av_freep(&pic->dct_coeff);
344 av_freep(&pic->pan_scan);
347 av_freep(&pic->motion_val_base[i]);
348 av_freep(&pic->ref_index[i]);
351 if(pic->type == FF_BUFFER_TYPE_SHARED){
360 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
361 int y_size = s->b8_stride * (2 * s->mb_height + 1);
362 int c_size = s->mb_stride * (s->mb_height + 1);
363 int yc_size = y_size + 2 * c_size;
366 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
367 FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
368 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
370 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
371 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
372 s->me.temp= s->me.scratchpad;
373 s->rd_scratchpad= s->me.scratchpad;
374 s->b_scratchpad= s->me.scratchpad;
375 s->obmc_scratchpad= s->me.scratchpad + 16;
377 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
378 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
379 if(s->avctx->noise_reduction){
380 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
383 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
384 s->block= s->blocks[0];
387 s->pblocks[i] = &s->block[i];
390 if (s->out_format == FMT_H263) {
392 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
393 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
394 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
395 s->ac_val[2] = s->ac_val[1] + c_size;
400 return -1; //free() through MPV_common_end()
403 static void free_duplicate_context(MpegEncContext *s){
406 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
407 av_freep(&s->me.scratchpad);
411 s->obmc_scratchpad= NULL;
413 av_freep(&s->dct_error_sum);
414 av_freep(&s->me.map);
415 av_freep(&s->me.score_map);
416 av_freep(&s->blocks);
417 av_freep(&s->ac_val_base);
421 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
422 #define COPY(a) bak->a= src->a
423 COPY(allocated_edge_emu_buffer);
424 COPY(edge_emu_buffer);
429 COPY(obmc_scratchpad);
436 COPY(me.map_generation);
448 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
451 //FIXME copy only needed parts
453 backup_duplicate_context(&bak, dst);
454 memcpy(dst, src, sizeof(MpegEncContext));
455 backup_duplicate_context(dst, &bak);
457 dst->pblocks[i] = &dst->block[i];
459 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
462 int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
464 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
466 if(dst == src || !s1->context_initialized) return 0;
468 //FIXME can parameters change on I-frames? in that case dst may need a reinit
469 if(!s->context_initialized){
470 memcpy(s, s1, sizeof(MpegEncContext));
473 s->picture_range_start += MAX_PICTURE_COUNT;
474 s->picture_range_end += MAX_PICTURE_COUNT;
475 s->bitstream_buffer = NULL;
476 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
481 s->avctx->coded_height = s1->avctx->coded_height;
482 s->avctx->coded_width = s1->avctx->coded_width;
483 s->avctx->width = s1->avctx->width;
484 s->avctx->height = s1->avctx->height;
486 s->coded_picture_number = s1->coded_picture_number;
487 s->picture_number = s1->picture_number;
488 s->input_picture_number = s1->input_picture_number;
490 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
491 memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture);
493 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
494 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
495 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
497 memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
499 //Error/bug resilience
500 s->next_p_frame_damaged = s1->next_p_frame_damaged;
501 s->workaround_bugs = s1->workaround_bugs;
504 memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits);
507 s->max_b_frames = s1->max_b_frames;
508 s->low_delay = s1->low_delay;
509 s->dropable = s1->dropable;
511 //DivX handling (doesn't work)
512 s->divx_packed = s1->divx_packed;
514 if(s1->bitstream_buffer){
515 if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
516 av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size);
517 s->bitstream_buffer_size = s1->bitstream_buffer_size;
518 memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size);
519 memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
522 //MPEG2/interlacing info
523 memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence);
525 if(!s1->first_field){
526 s->last_pict_type= s1->pict_type;
527 if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->quality;
529 if(s1->pict_type!=FF_B_TYPE){
530 s->last_non_b_pict_type= s1->pict_type;
538 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
539 * the changed fields will not depend upon the prior state of the MpegEncContext.
541 void MPV_common_defaults(MpegEncContext *s){
543 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
544 s->chroma_qscale_table= ff_default_chroma_qscale_table;
545 s->progressive_frame= 1;
546 s->progressive_sequence= 1;
547 s->picture_structure= PICT_FRAME;
549 s->coded_picture_number = 0;
550 s->picture_number = 0;
551 s->input_picture_number = 0;
553 s->picture_in_gop_number = 0;
558 s->picture_range_start = 0;
559 s->picture_range_end = MAX_PICTURE_COUNT;
563 * sets the given MpegEncContext to defaults for decoding.
564 * the changed fields will not depend upon the prior state of the MpegEncContext.
566 void MPV_decode_defaults(MpegEncContext *s){
567 MPV_common_defaults(s);
571 * init common structure for both encoder and decoder.
572 * this assumes that some variables like width/height are already set
574 av_cold int MPV_common_init(MpegEncContext *s)
576 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
578 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
579 s->mb_height = (s->height + 31) / 32 * 2;
580 else if (s->codec_id != CODEC_ID_H264)
581 s->mb_height = (s->height + 15) / 16;
583 if(s->avctx->pix_fmt == PIX_FMT_NONE){
584 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
588 if(s->avctx->active_thread_type&FF_THREAD_SLICE &&
589 (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
590 av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
594 if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
597 dsputil_init(&s->dsp, s->avctx);
598 ff_dct_common_init(s);
600 s->flags= s->avctx->flags;
601 s->flags2= s->avctx->flags2;
603 s->mb_width = (s->width + 15) / 16;
604 s->mb_stride = s->mb_width + 1;
605 s->b8_stride = s->mb_width*2 + 1;
606 s->b4_stride = s->mb_width*4 + 1;
607 mb_array_size= s->mb_height * s->mb_stride;
608 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
610 /* set chroma shifts */
611 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
612 &(s->chroma_y_shift) );
614 /* set default edge pos, will be overriden in decode_header if needed */
615 s->h_edge_pos= s->mb_width*16;
616 s->v_edge_pos= s->mb_height*16;
618 s->mb_num = s->mb_width * s->mb_height;
623 s->block_wrap[3]= s->b8_stride;
625 s->block_wrap[5]= s->mb_stride;
627 y_size = s->b8_stride * (2 * s->mb_height + 1);
628 c_size = s->mb_stride * (s->mb_height + 1);
629 yc_size = y_size + 2 * c_size;
631 /* convert fourcc to upper case */
632 s->codec_tag = ff_toupper4(s->avctx->codec_tag);
634 s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
636 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
638 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
639 for(y=0; y<s->mb_height; y++){
640 for(x=0; x<s->mb_width; x++){
641 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
644 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
647 /* Allocate MV tables */
648 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
649 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
650 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
651 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
652 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
653 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
654 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
655 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
656 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
657 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
658 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
659 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
661 if(s->msmpeg4_version){
662 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
664 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
666 /* Allocate MB type table */
667 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
669 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
671 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
672 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
673 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
674 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
675 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
676 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
678 if(s->avctx->noise_reduction){
679 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
682 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
683 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
684 for(i = 0; i < s->picture_count; i++) {
685 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
688 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
690 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
691 /* interlaced direct mode decoding tables */
696 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
697 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
699 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
700 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
701 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
703 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
706 if (s->out_format == FMT_H263) {
708 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
709 s->coded_block= s->coded_block_base + s->b8_stride + 1;
711 /* cbp, ac_pred, pred_dir */
712 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
713 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
716 if (s->h263_pred || s->h263_plus || !s->encoding) {
718 //MN: we need these for error resilience of intra-frames
719 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
720 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
721 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
722 s->dc_val[2] = s->dc_val[1] + c_size;
723 for(i=0;i<yc_size;i++)
724 s->dc_val_base[i] = 1024;
727 /* which mb is a intra block */
728 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
729 memset(s->mbintra_table, 1, mb_array_size);
731 /* init macroblock skip table */
732 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
733 //Note the +1 is for a quicker mpeg4 slice_end detection
734 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
736 s->parse_context.state= -1;
737 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
738 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
739 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
740 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
743 s->context_initialized = 1;
744 s->thread_context[0]= s;
746 if (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE) {
747 threads = s->avctx->thread_count;
749 for(i=1; i<threads; i++){
750 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
751 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
754 for(i=0; i<threads; i++){
755 if(init_duplicate_context(s->thread_context[i], s) < 0)
757 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
758 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
761 if(init_duplicate_context(s, s) < 0) goto fail;
763 s->end_mb_y = s->mb_height;
772 /* init common structure for both encoder and decoder */
773 void MPV_common_end(MpegEncContext *s)
777 if (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE) {
778 for(i=0; i<s->avctx->thread_count; i++){
779 free_duplicate_context(s->thread_context[i]);
781 for(i=1; i<s->avctx->thread_count; i++){
782 av_freep(&s->thread_context[i]);
784 } else free_duplicate_context(s);
786 av_freep(&s->parse_context.buffer);
787 s->parse_context.buffer_size=0;
789 av_freep(&s->mb_type);
790 av_freep(&s->p_mv_table_base);
791 av_freep(&s->b_forw_mv_table_base);
792 av_freep(&s->b_back_mv_table_base);
793 av_freep(&s->b_bidir_forw_mv_table_base);
794 av_freep(&s->b_bidir_back_mv_table_base);
795 av_freep(&s->b_direct_mv_table_base);
797 s->b_forw_mv_table= NULL;
798 s->b_back_mv_table= NULL;
799 s->b_bidir_forw_mv_table= NULL;
800 s->b_bidir_back_mv_table= NULL;
801 s->b_direct_mv_table= NULL;
805 av_freep(&s->b_field_mv_table_base[i][j][k]);
806 s->b_field_mv_table[i][j][k]=NULL;
808 av_freep(&s->b_field_select_table[i][j]);
809 av_freep(&s->p_field_mv_table_base[i][j]);
810 s->p_field_mv_table[i][j]=NULL;
812 av_freep(&s->p_field_select_table[i]);
815 av_freep(&s->dc_val_base);
816 av_freep(&s->coded_block_base);
817 av_freep(&s->mbintra_table);
818 av_freep(&s->cbp_table);
819 av_freep(&s->pred_dir_table);
821 av_freep(&s->mbskip_table);
822 av_freep(&s->prev_pict_types);
823 av_freep(&s->bitstream_buffer);
824 s->allocated_bitstream_buffer_size=0;
826 av_freep(&s->avctx->stats_out);
827 av_freep(&s->ac_stats);
828 av_freep(&s->error_status_table);
829 av_freep(&s->mb_index2xy);
830 av_freep(&s->lambda_table);
831 av_freep(&s->q_intra_matrix);
832 av_freep(&s->q_inter_matrix);
833 av_freep(&s->q_intra_matrix16);
834 av_freep(&s->q_inter_matrix16);
835 av_freep(&s->input_picture);
836 av_freep(&s->reordered_input_picture);
837 av_freep(&s->dct_offset);
839 if(s->picture && !s->avctx->is_copy){
840 for(i=0; i<s->picture_count; i++){
841 free_picture(s, &s->picture[i]);
844 av_freep(&s->picture);
845 s->context_initialized = 0;
848 s->current_picture_ptr= NULL;
849 s->linesize= s->uvlinesize= 0;
852 av_freep(&s->visualization_buffer[i]);
854 if(!(s->avctx->active_thread_type&FF_THREAD_FRAME))
855 avcodec_default_free_buffers(s->avctx);
858 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
860 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
861 uint8_t index_run[MAX_RUN+1];
862 int last, run, level, start, end, i;
864 /* If table is static, we can quit if rl->max_level[0] is not NULL */
865 if(static_store && rl->max_level[0])
868 /* compute max_level[], max_run[] and index_run[] */
869 for(last=0;last<2;last++) {
878 memset(max_level, 0, MAX_RUN + 1);
879 memset(max_run, 0, MAX_LEVEL + 1);
880 memset(index_run, rl->n, MAX_RUN + 1);
881 for(i=start;i<end;i++) {
882 run = rl->table_run[i];
883 level = rl->table_level[i];
884 if (index_run[run] == rl->n)
886 if (level > max_level[run])
887 max_level[run] = level;
888 if (run > max_run[level])
889 max_run[level] = run;
892 rl->max_level[last] = static_store[last];
894 rl->max_level[last] = av_malloc(MAX_RUN + 1);
895 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
897 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
899 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
900 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
902 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
904 rl->index_run[last] = av_malloc(MAX_RUN + 1);
905 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
909 void init_vlc_rl(RLTable *rl)
921 for(i=0; i<rl->vlc.table_size; i++){
922 int code= rl->vlc.table[i][0];
923 int len = rl->vlc.table[i][1];
926 if(len==0){ // illegal code
929 }else if(len<0){ //more bits needed
933 if(code==rl->n){ //esc
937 run= rl->table_run [code] + 1;
938 level= rl->table_level[code] * qmul + qadd;
939 if(code >= rl->last) run+=192;
942 rl->rl_vlc[q][i].len= len;
943 rl->rl_vlc[q][i].level= level;
944 rl->rl_vlc[q][i].run= run;
949 int ff_find_unused_picture(MpegEncContext *s, int shared){
953 for(i=s->picture_range_start; i<s->picture_range_end; i++){
954 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
957 for(i=s->picture_range_start; i<s->picture_range_end; i++){
958 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
960 for(i=s->picture_range_start; i<s->picture_range_end; i++){
961 if(s->picture[i].data[0]==NULL) return i;
965 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
966 /* We could return -1, but the codec would crash trying to draw into a
967 * non-existing frame anyway. This is safer than waiting for a random crash.
968 * Also the return of this is never useful, an encoder must only allocate
969 * as much as allowed in the specification. This has no relationship to how
970 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
971 * enough for such valid streams).
972 * Plus, a decoder has to check stream validity and remove frames if too
973 * many reference frames are around. Waiting for "OOM" is not correct at
974 * all. Similarly, missing reference frames have to be replaced by
975 * interpolated/MC frames, anything else is a bug in the codec ...
981 static void update_noise_reduction(MpegEncContext *s){
984 for(intra=0; intra<2; intra++){
985 if(s->dct_count[intra] > (1<<16)){
987 s->dct_error_sum[intra][i] >>=1;
989 s->dct_count[intra] >>= 1;
993 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
999 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1001 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1007 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1009 /* mark&release old frames */
1010 if (s->pict_type != FF_B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
1011 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1012 free_frame_buffer(s, s->last_picture_ptr);
1014 /* release forgotten pictures */
1015 /* if(mpeg124/h263) */
1017 for(i=0; i<s->picture_count; i++){
1018 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
1019 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1020 free_frame_buffer(s, &s->picture[i]);
1028 /* release non reference frames */
1029 for(i=0; i<s->picture_count; i++){
1030 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1031 free_frame_buffer(s, &s->picture[i]);
1035 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
1036 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
1038 i= ff_find_unused_picture(s, 0);
1039 pic= &s->picture[i];
1044 if (s->codec_id == CODEC_ID_H264)
1045 pic->reference = s->picture_structure;
1046 else if (s->pict_type != FF_B_TYPE)
1050 pic->coded_picture_number= s->coded_picture_number++;
1052 if(ff_alloc_picture(s, pic, 0) < 0)
1055 s->current_picture_ptr= pic;
1056 //FIXME use only the vars from current_pic
1057 s->current_picture_ptr->top_field_first= s->top_field_first;
1058 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1059 if(s->picture_structure != PICT_FRAME)
1060 s->current_picture_ptr->top_field_first= (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1062 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
1063 s->current_picture_ptr->field_picture= s->picture_structure != PICT_FRAME;
1066 s->current_picture_ptr->pict_type= s->pict_type;
1067 // if(s->flags && CODEC_FLAG_QSCALE)
1068 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1069 s->current_picture_ptr->key_frame= s->pict_type == FF_I_TYPE;
1071 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1073 if (s->pict_type != FF_B_TYPE) {
1074 s->last_picture_ptr= s->next_picture_ptr;
1076 s->next_picture_ptr= s->current_picture_ptr;
1078 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1079 s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
1080 s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
1081 s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
1082 s->pict_type, s->dropable);*/
1084 if(s->codec_id != CODEC_ID_H264){
1085 if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && s->pict_type!=FF_I_TYPE){
1086 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1087 /* Allocate a dummy frame */
1088 i= ff_find_unused_picture(s, 0);
1089 s->last_picture_ptr= &s->picture[i];
1090 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1092 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
1093 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
1095 if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==FF_B_TYPE){
1096 /* Allocate a dummy frame */
1097 i= ff_find_unused_picture(s, 0);
1098 s->next_picture_ptr= &s->picture[i];
1099 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1101 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
1102 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
1106 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1107 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1109 assert(s->pict_type == FF_I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1111 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1114 if(s->picture_structure == PICT_BOTTOM_FIELD){
1115 s->current_picture.data[i] += s->current_picture.linesize[i];
1117 s->current_picture.linesize[i] *= 2;
1118 s->last_picture.linesize[i] *=2;
1119 s->next_picture.linesize[i] *=2;
1124 s->hurry_up= s->avctx->hurry_up;
1126 s->error_recognition= avctx->error_recognition;
1128 /* set dequantizer, we can't do it during init as it might change for mpeg4
1129 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1130 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1131 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1132 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1133 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1134 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1135 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1137 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1138 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1141 if(s->dct_error_sum){
1142 assert(s->avctx->noise_reduction && s->encoding);
1144 update_noise_reduction(s);
1147 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1148 return ff_xvmc_field_start(s, avctx);
1153 /* generic function for encode/decode called after a frame has been coded/decoded */
1154 void MPV_frame_end(MpegEncContext *s)
1157 /* redraw edges for the frame if decoding didn't complete */
1158 //just to make sure that all data is rendered.
1159 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1160 ff_xvmc_field_end(s);
1161 }else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND))
1162 && !s->avctx->hwaccel
1163 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1164 && s->unrestricted_mv
1165 && s->current_picture.reference
1167 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1168 int edges = EDGE_BOTTOM | EDGE_TOP, h = s->v_edge_pos;
1170 s->dsp.draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , h , EDGE_WIDTH , edges);
1171 s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, h>>1, EDGE_WIDTH/2, edges);
1172 s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, h>>1, EDGE_WIDTH/2, edges);
1178 s->last_pict_type = s->pict_type;
1179 s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
1180 if(s->pict_type!=FF_B_TYPE){
1181 s->last_non_b_pict_type= s->pict_type;
1184 /* copy back current_picture variables */
1185 for(i=0; i<MAX_PICTURE_COUNT; i++){
1186 if(s->picture[i].data[0] == s->current_picture.data[0]){
1187 s->picture[i]= s->current_picture;
1191 assert(i<MAX_PICTURE_COUNT);
1195 /* release non-reference frames */
1196 for(i=0; i<s->picture_count; i++){
1197 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1198 free_frame_buffer(s, &s->picture[i]);
1202 // clear copies, to avoid confusion
1204 memset(&s->last_picture, 0, sizeof(Picture));
1205 memset(&s->next_picture, 0, sizeof(Picture));
1206 memset(&s->current_picture, 0, sizeof(Picture));
1208 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1210 if (s->codec_id != CODEC_ID_H264 && s->current_picture.reference) {
1211 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
1216 * draws an line from (ex, ey) -> (sx, sy).
1217 * @param w width of the image
1218 * @param h height of the image
1219 * @param stride stride/linesize of the image
1220 * @param color color of the arrow
1222 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1225 sx= av_clip(sx, 0, w-1);
1226 sy= av_clip(sy, 0, h-1);
1227 ex= av_clip(ex, 0, w-1);
1228 ey= av_clip(ey, 0, h-1);
1230 buf[sy*stride + sx]+= color;
1232 if(FFABS(ex - sx) > FFABS(ey - sy)){
1234 FFSWAP(int, sx, ex);
1235 FFSWAP(int, sy, ey);
1237 buf+= sx + sy*stride;
1239 f= ((ey-sy)<<16)/ex;
1240 for(x= 0; x <= ex; x++){
1243 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1244 buf[(y+1)*stride + x]+= (color* fr )>>16;
1248 FFSWAP(int, sx, ex);
1249 FFSWAP(int, sy, ey);
1251 buf+= sx + sy*stride;
1253 if(ey) f= ((ex-sx)<<16)/ey;
1255 for(y= 0; y <= ey; y++){
1258 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1259 buf[y*stride + x+1]+= (color* fr )>>16;
1265 * draws an arrow from (ex, ey) -> (sx, sy).
1266 * @param w width of the image
1267 * @param h height of the image
1268 * @param stride stride/linesize of the image
1269 * @param color color of the arrow
1271 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1274 sx= av_clip(sx, -100, w+100);
1275 sy= av_clip(sy, -100, h+100);
1276 ex= av_clip(ex, -100, w+100);
1277 ey= av_clip(ey, -100, h+100);
1282 if(dx*dx + dy*dy > 3*3){
1285 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1287 //FIXME subpixel accuracy
1288 rx= ROUNDED_DIV(rx*3<<4, length);
1289 ry= ROUNDED_DIV(ry*3<<4, length);
1291 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1292 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1294 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1298 * prints debuging info for the given picture.
1300 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1302 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1304 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1307 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1308 switch (pict->pict_type) {
1309 case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1310 case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1311 case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1312 case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1313 case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1314 case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1316 for(y=0; y<s->mb_height; y++){
1317 for(x=0; x<s->mb_width; x++){
1318 if(s->avctx->debug&FF_DEBUG_SKIP){
1319 int count= s->mbskip_table[x + y*s->mb_stride];
1320 if(count>9) count=9;
1321 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1323 if(s->avctx->debug&FF_DEBUG_QP){
1324 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1326 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1327 int mb_type= pict->mb_type[x + y*s->mb_stride];
1328 //Type & MV direction
1330 av_log(s->avctx, AV_LOG_DEBUG, "P");
1331 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1332 av_log(s->avctx, AV_LOG_DEBUG, "A");
1333 else if(IS_INTRA4x4(mb_type))
1334 av_log(s->avctx, AV_LOG_DEBUG, "i");
1335 else if(IS_INTRA16x16(mb_type))
1336 av_log(s->avctx, AV_LOG_DEBUG, "I");
1337 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1338 av_log(s->avctx, AV_LOG_DEBUG, "d");
1339 else if(IS_DIRECT(mb_type))
1340 av_log(s->avctx, AV_LOG_DEBUG, "D");
1341 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1342 av_log(s->avctx, AV_LOG_DEBUG, "g");
1343 else if(IS_GMC(mb_type))
1344 av_log(s->avctx, AV_LOG_DEBUG, "G");
1345 else if(IS_SKIP(mb_type))
1346 av_log(s->avctx, AV_LOG_DEBUG, "S");
1347 else if(!USES_LIST(mb_type, 1))
1348 av_log(s->avctx, AV_LOG_DEBUG, ">");
1349 else if(!USES_LIST(mb_type, 0))
1350 av_log(s->avctx, AV_LOG_DEBUG, "<");
1352 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1353 av_log(s->avctx, AV_LOG_DEBUG, "X");
1358 av_log(s->avctx, AV_LOG_DEBUG, "+");
1359 else if(IS_16X8(mb_type))
1360 av_log(s->avctx, AV_LOG_DEBUG, "-");
1361 else if(IS_8X16(mb_type))
1362 av_log(s->avctx, AV_LOG_DEBUG, "|");
1363 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1364 av_log(s->avctx, AV_LOG_DEBUG, " ");
1366 av_log(s->avctx, AV_LOG_DEBUG, "?");
1369 if(IS_INTERLACED(mb_type))
1370 av_log(s->avctx, AV_LOG_DEBUG, "=");
1372 av_log(s->avctx, AV_LOG_DEBUG, " ");
1374 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1376 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1380 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1381 const int shift= 1 + s->quarter_sample;
1385 int h_chroma_shift, v_chroma_shift, block_height;
1386 const int width = s->avctx->width;
1387 const int height= s->avctx->height;
1388 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1389 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1390 s->low_delay=0; //needed to see the vectors without trashing the buffers
1392 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1394 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1395 pict->data[i]= s->visualization_buffer[i];
1397 pict->type= FF_BUFFER_TYPE_COPY;
1399 block_height = 16>>v_chroma_shift;
1401 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1403 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1404 const int mb_index= mb_x + mb_y*s->mb_stride;
1405 if((s->avctx->debug_mv) && pict->motion_val){
1407 for(type=0; type<3; type++){
1410 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
1414 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
1418 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
1423 if(!USES_LIST(pict->mb_type[mb_index], direction))
1426 if(IS_8X8(pict->mb_type[mb_index])){
1429 int sx= mb_x*16 + 4 + 8*(i&1);
1430 int sy= mb_y*16 + 4 + 8*(i>>1);
1431 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1432 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1433 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1434 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1436 }else if(IS_16X8(pict->mb_type[mb_index])){
1440 int sy=mb_y*16 + 4 + 8*i;
1441 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1442 int mx=(pict->motion_val[direction][xy][0]>>shift);
1443 int my=(pict->motion_val[direction][xy][1]>>shift);
1445 if(IS_INTERLACED(pict->mb_type[mb_index]))
1448 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1450 }else if(IS_8X16(pict->mb_type[mb_index])){
1453 int sx=mb_x*16 + 4 + 8*i;
1455 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1456 int mx=(pict->motion_val[direction][xy][0]>>shift);
1457 int my=(pict->motion_val[direction][xy][1]>>shift);
1459 if(IS_INTERLACED(pict->mb_type[mb_index]))
1462 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1465 int sx= mb_x*16 + 8;
1466 int sy= mb_y*16 + 8;
1467 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1468 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1469 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1470 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1474 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1475 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1477 for(y=0; y<block_height; y++){
1478 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1479 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1482 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1483 int mb_type= pict->mb_type[mb_index];
1486 #define COLOR(theta, r)\
1487 u= (int)(128 + r*cos(theta*3.141592/180));\
1488 v= (int)(128 + r*sin(theta*3.141592/180));
1492 if(IS_PCM(mb_type)){
1494 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1496 }else if(IS_INTRA4x4(mb_type)){
1498 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1500 }else if(IS_DIRECT(mb_type)){
1502 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1504 }else if(IS_GMC(mb_type)){
1506 }else if(IS_SKIP(mb_type)){
1508 }else if(!USES_LIST(mb_type, 1)){
1510 }else if(!USES_LIST(mb_type, 0)){
1513 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1517 u*= 0x0101010101010101ULL;
1518 v*= 0x0101010101010101ULL;
1519 for(y=0; y<block_height; y++){
1520 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1521 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1525 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1526 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1527 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1529 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1531 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1533 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1534 int dm= 1 << (mv_sample_log2-2);
1536 int sx= mb_x*16 + 8*(i&1);
1537 int sy= mb_y*16 + 8*(i>>1);
1538 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1540 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1541 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1543 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1544 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1545 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1549 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1553 s->mbskip_table[mb_index]=0;
1559 static inline int hpel_motion_lowres(MpegEncContext *s,
1560 uint8_t *dest, uint8_t *src,
1561 int field_based, int field_select,
1562 int src_x, int src_y,
1563 int width, int height, int stride,
1564 int h_edge_pos, int v_edge_pos,
1565 int w, int h, h264_chroma_mc_func *pix_op,
1566 int motion_x, int motion_y)
1568 const int lowres= s->avctx->lowres;
1569 const int op_index= FFMIN(lowres, 2);
1570 const int s_mask= (2<<lowres)-1;
1574 if(s->quarter_sample){
1579 sx= motion_x & s_mask;
1580 sy= motion_y & s_mask;
1581 src_x += motion_x >> (lowres+1);
1582 src_y += motion_y >> (lowres+1);
1584 src += src_y * stride + src_x;
1586 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1587 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1588 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1589 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1590 src= s->edge_emu_buffer;
1594 sx= (sx << 2) >> lowres;
1595 sy= (sy << 2) >> lowres;
1598 pix_op[op_index](dest, src, stride, h, sx, sy);
1602 /* apply one mpeg motion vector to the three components */
1603 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1604 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1605 int field_based, int bottom_field, int field_select,
1606 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1607 int motion_x, int motion_y, int h, int mb_y)
1609 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1610 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1611 const int lowres= s->avctx->lowres;
1612 const int op_index= FFMIN(lowres, 2);
1613 const int block_s= 8>>lowres;
1614 const int s_mask= (2<<lowres)-1;
1615 const int h_edge_pos = s->h_edge_pos >> lowres;
1616 const int v_edge_pos = s->v_edge_pos >> lowres;
1617 linesize = s->current_picture.linesize[0] << field_based;
1618 uvlinesize = s->current_picture.linesize[1] << field_based;
1620 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1626 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1629 sx= motion_x & s_mask;
1630 sy= motion_y & s_mask;
1631 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1632 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1634 if (s->out_format == FMT_H263) {
1635 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1636 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1639 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1642 uvsx = (2*mx) & s_mask;
1643 uvsy = (2*my) & s_mask;
1644 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1645 uvsrc_y = mb_y*block_s + (my >> lowres);
1651 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1652 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1655 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1656 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1657 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1659 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1660 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1661 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1662 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1663 ptr_y = s->edge_emu_buffer;
1664 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1665 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1666 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1667 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1668 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1669 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1675 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
1676 dest_y += s->linesize;
1677 dest_cb+= s->uvlinesize;
1678 dest_cr+= s->uvlinesize;
1682 ptr_y += s->linesize;
1683 ptr_cb+= s->uvlinesize;
1684 ptr_cr+= s->uvlinesize;
1687 sx= (sx << 2) >> lowres;
1688 sy= (sy << 2) >> lowres;
1689 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1691 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1692 uvsx= (uvsx << 2) >> lowres;
1693 uvsy= (uvsy << 2) >> lowres;
1694 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1695 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1697 //FIXME h261 lowres loop filter
1700 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1701 uint8_t *dest_cb, uint8_t *dest_cr,
1702 uint8_t **ref_picture,
1703 h264_chroma_mc_func *pix_op,
1705 const int lowres= s->avctx->lowres;
1706 const int op_index= FFMIN(lowres, 2);
1707 const int block_s= 8>>lowres;
1708 const int s_mask= (2<<lowres)-1;
1709 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1710 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1711 int emu=0, src_x, src_y, offset, sx, sy;
1714 if(s->quarter_sample){
1719 /* In case of 8X8, we construct a single chroma motion vector
1720 with a special rounding */
1721 mx= ff_h263_round_chroma(mx);
1722 my= ff_h263_round_chroma(my);
1726 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1727 src_y = s->mb_y*block_s + (my >> (lowres+1));
1729 offset = src_y * s->uvlinesize + src_x;
1730 ptr = ref_picture[1] + offset;
1731 if(s->flags&CODEC_FLAG_EMU_EDGE){
1732 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1733 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1734 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1735 ptr= s->edge_emu_buffer;
1739 sx= (sx << 2) >> lowres;
1740 sy= (sy << 2) >> lowres;
1741 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1743 ptr = ref_picture[2] + offset;
1745 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1746 ptr= s->edge_emu_buffer;
1748 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1752 * motion compensation of a single macroblock
1754 * @param dest_y luma destination pointer
1755 * @param dest_cb chroma cb/u destination pointer
1756 * @param dest_cr chroma cr/v destination pointer
1757 * @param dir direction (0->forward, 1->backward)
1758 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1759 * @param pix_op halfpel motion compensation function (average or put normally)
1760 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1762 static inline void MPV_motion_lowres(MpegEncContext *s,
1763 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1764 int dir, uint8_t **ref_picture,
1765 h264_chroma_mc_func *pix_op)
1769 const int lowres= s->avctx->lowres;
1770 const int block_s= 8>>lowres;
1775 switch(s->mv_type) {
1777 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1779 ref_picture, pix_op,
1780 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1786 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1787 ref_picture[0], 0, 0,
1788 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1789 s->width, s->height, s->linesize,
1790 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1791 block_s, block_s, pix_op,
1792 s->mv[dir][i][0], s->mv[dir][i][1]);
1794 mx += s->mv[dir][i][0];
1795 my += s->mv[dir][i][1];
1798 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1799 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1802 if (s->picture_structure == PICT_FRAME) {
1804 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1805 1, 0, s->field_select[dir][0],
1806 ref_picture, pix_op,
1807 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1809 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1810 1, 1, s->field_select[dir][1],
1811 ref_picture, pix_op,
1812 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1814 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){
1815 ref_picture= s->current_picture_ptr->data;
1818 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1819 0, 0, s->field_select[dir][0],
1820 ref_picture, pix_op,
1821 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1826 uint8_t ** ref2picture;
1828 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == FF_B_TYPE || s->first_field){
1829 ref2picture= ref_picture;
1831 ref2picture= s->current_picture_ptr->data;
1834 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1835 0, 0, s->field_select[dir][i],
1836 ref2picture, pix_op,
1837 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1839 dest_y += 2*block_s*s->linesize;
1840 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1841 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1845 if(s->picture_structure == PICT_FRAME){
1849 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1851 ref_picture, pix_op,
1852 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1854 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1858 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1859 0, 0, s->picture_structure != i+1,
1860 ref_picture, pix_op,
1861 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1863 // after put we make avg of the same block
1864 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1866 //opposite parity is always in the same frame if this is second field
1867 if(!s->first_field){
1868 ref_picture = s->current_picture_ptr->data;
1878 * find the lowest MB row referenced in the MVs
1880 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1882 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1883 int my, off, i, mvs;
1885 if (s->picture_structure != PICT_FRAME) goto unhandled;
1887 switch (s->mv_type) {
1901 for (i = 0; i < mvs; i++) {
1902 my = s->mv[dir][i][1]<<qpel_shift;
1903 my_max = FFMAX(my_max, my);
1904 my_min = FFMIN(my_min, my);
1907 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1909 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1911 return s->mb_height-1;
1914 /* put block[] to dest[] */
1915 static inline void put_dct(MpegEncContext *s,
1916 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1918 s->dct_unquantize_intra(s, block, i, qscale);
1919 s->dsp.idct_put (dest, line_size, block);
1922 /* add block[] to dest[] */
1923 static inline void add_dct(MpegEncContext *s,
1924 DCTELEM *block, int i, uint8_t *dest, int line_size)
1926 if (s->block_last_index[i] >= 0) {
1927 s->dsp.idct_add (dest, line_size, block);
1931 static inline void add_dequant_dct(MpegEncContext *s,
1932 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1934 if (s->block_last_index[i] >= 0) {
1935 s->dct_unquantize_inter(s, block, i, qscale);
1937 s->dsp.idct_add (dest, line_size, block);
1942 * cleans dc, ac, coded_block for the current non intra MB
1944 void ff_clean_intra_table_entries(MpegEncContext *s)
1946 int wrap = s->b8_stride;
1947 int xy = s->block_index[0];
1950 s->dc_val[0][xy + 1 ] =
1951 s->dc_val[0][xy + wrap] =
1952 s->dc_val[0][xy + 1 + wrap] = 1024;
1954 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1955 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1956 if (s->msmpeg4_version>=3) {
1957 s->coded_block[xy ] =
1958 s->coded_block[xy + 1 ] =
1959 s->coded_block[xy + wrap] =
1960 s->coded_block[xy + 1 + wrap] = 0;
1963 wrap = s->mb_stride;
1964 xy = s->mb_x + s->mb_y * wrap;
1966 s->dc_val[2][xy] = 1024;
1968 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1969 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1971 s->mbintra_table[xy]= 0;
1974 /* generic function called after a macroblock has been parsed by the
1975 decoder or after it has been encoded by the encoder.
1977 Important variables used:
1978 s->mb_intra : true if intra macroblock
1979 s->mv_dir : motion vector direction
1980 s->mv_type : motion vector type
1981 s->mv : motion vector
1982 s->interlaced_dct : true if interlaced dct used (mpeg2)
1984 static av_always_inline
1985 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
1986 int lowres_flag, int is_mpeg12)
1988 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1989 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1990 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1994 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1995 /* save DCT coefficients */
1997 DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
1998 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2000 for(j=0; j<64; j++){
2001 *dct++ = block[i][s->dsp.idct_permutation[j]];
2002 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2004 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2008 s->current_picture.qscale_table[mb_xy]= s->qscale;
2010 /* update DC predictors for P macroblocks */
2012 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2013 if(s->mbintra_table[mb_xy])
2014 ff_clean_intra_table_entries(s);
2018 s->last_dc[2] = 128 << s->intra_dc_precision;
2021 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2022 s->mbintra_table[mb_xy]=1;
2024 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2025 uint8_t *dest_y, *dest_cb, *dest_cr;
2026 int dct_linesize, dct_offset;
2027 op_pixels_func (*op_pix)[4];
2028 qpel_mc_func (*op_qpix)[16];
2029 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2030 const int uvlinesize= s->current_picture.linesize[1];
2031 const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2032 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2034 /* avoid copy if macroblock skipped in last frame too */
2035 /* skip only during decoding as we might trash the buffers during encoding a bit */
2037 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2038 const int age= s->current_picture.age;
2042 if (s->mb_skipped) {
2044 assert(s->pict_type!=FF_I_TYPE);
2046 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
2047 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2049 /* if previous was skipped too, then nothing to do ! */
2050 if (*mbskip_ptr >= age && s->current_picture.reference){
2053 } else if(!s->current_picture.reference){
2054 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2055 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2057 *mbskip_ptr = 0; /* not skipped */
2061 dct_linesize = linesize << s->interlaced_dct;
2062 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
2066 dest_cb= s->dest[1];
2067 dest_cr= s->dest[2];
2069 dest_y = s->b_scratchpad;
2070 dest_cb= s->b_scratchpad+16*linesize;
2071 dest_cr= s->b_scratchpad+32*linesize;
2075 /* motion handling */
2076 /* decoding or more than one mb_type (MC was already done otherwise) */
2079 if(HAVE_PTHREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2080 if (s->mv_dir & MV_DIR_FORWARD) {
2081 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
2083 if (s->mv_dir & MV_DIR_BACKWARD) {
2084 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
2089 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2091 if (s->mv_dir & MV_DIR_FORWARD) {
2092 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
2093 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2095 if (s->mv_dir & MV_DIR_BACKWARD) {
2096 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
2099 op_qpix= s->me.qpel_put;
2100 if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
2101 op_pix = s->dsp.put_pixels_tab;
2103 op_pix = s->dsp.put_no_rnd_pixels_tab;
2105 if (s->mv_dir & MV_DIR_FORWARD) {
2106 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2107 op_pix = s->dsp.avg_pixels_tab;
2108 op_qpix= s->me.qpel_avg;
2110 if (s->mv_dir & MV_DIR_BACKWARD) {
2111 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2116 /* skip dequant / idct if we are really late ;) */
2118 if(s->hurry_up>1) goto skip_idct;
2120 if(s->avctx->skip_idct){
2121 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
2122 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
2123 || s->avctx->skip_idct >= AVDISCARD_ALL)
2127 /* add dct residue */
2128 if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2129 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2130 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2131 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2132 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2133 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2135 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2136 if (s->chroma_y_shift){
2137 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2138 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2142 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2143 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2144 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2145 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2148 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2149 add_dct(s, block[0], 0, dest_y , dct_linesize);
2150 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2151 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2152 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2154 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2155 if(s->chroma_y_shift){//Chroma420
2156 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2157 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2160 dct_linesize = uvlinesize << s->interlaced_dct;
2161 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2163 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2164 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2165 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2166 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2167 if(!s->chroma_x_shift){//Chroma444
2168 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2169 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2170 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2171 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2176 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2177 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2180 /* dct only in intra block */
2181 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2182 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2183 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2184 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2185 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2187 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2188 if(s->chroma_y_shift){
2189 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2190 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2194 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2195 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2196 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2197 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2201 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2202 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2203 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2204 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2206 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2207 if(s->chroma_y_shift){
2208 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2209 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2212 dct_linesize = uvlinesize << s->interlaced_dct;
2213 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2215 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2216 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2217 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2218 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2219 if(!s->chroma_x_shift){//Chroma444
2220 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2221 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2222 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2223 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2231 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2232 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2233 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2238 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2240 if(s->out_format == FMT_MPEG1) {
2241 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2242 else MPV_decode_mb_internal(s, block, 0, 1);
2245 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2246 else MPV_decode_mb_internal(s, block, 0, 0);
2251 * @param h is the normal height, this will be reduced automatically if needed for the last row
2253 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2254 const int field_pic= s->picture_structure != PICT_FRAME;
2260 if (!s->avctx->hwaccel
2261 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2262 && s->unrestricted_mv
2263 && s->current_picture.reference
2265 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2266 int sides = 0, edge_h;
2267 if (y==0) sides |= EDGE_TOP;
2268 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2270 edge_h= FFMIN(h, s->v_edge_pos - y);
2272 s->dsp.draw_edges(s->current_picture_ptr->data[0] + y *s->linesize , s->linesize , s->h_edge_pos , edge_h , EDGE_WIDTH , sides);
2273 s->dsp.draw_edges(s->current_picture_ptr->data[1] + (y>>1)*s->uvlinesize, s->uvlinesize, s->h_edge_pos>>1, edge_h>>1, EDGE_WIDTH/2, sides);
2274 s->dsp.draw_edges(s->current_picture_ptr->data[2] + (y>>1)*s->uvlinesize, s->uvlinesize, s->h_edge_pos>>1, edge_h>>1, EDGE_WIDTH/2, sides);
2277 h= FFMIN(h, s->avctx->height - y);
2279 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2281 if (s->avctx->draw_horiz_band) {
2285 if(s->pict_type==FF_B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2286 src= (AVFrame*)s->current_picture_ptr;
2287 else if(s->last_picture_ptr)
2288 src= (AVFrame*)s->last_picture_ptr;
2292 if(s->pict_type==FF_B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2298 offset[0]= y * s->linesize;
2300 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2306 s->avctx->draw_horiz_band(s->avctx, src, offset,
2307 y, s->picture_structure, h);
2311 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2312 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2313 const int uvlinesize= s->current_picture.linesize[1];
2314 const int mb_size= 4 - s->avctx->lowres;
2316 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2317 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2318 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2319 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2320 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2321 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2322 //block_index is not used by mpeg2, so it is not affected by chroma_format
2324 s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
2325 s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2326 s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2328 if(!(s->pict_type==FF_B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2330 if(s->picture_structure==PICT_FRAME){
2331 s->dest[0] += s->mb_y * linesize << mb_size;
2332 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2333 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2335 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2336 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2337 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2338 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2343 void ff_mpeg_flush(AVCodecContext *avctx){
2345 MpegEncContext *s = avctx->priv_data;
2347 if(s==NULL || s->picture==NULL)
2350 for(i=0; i<s->picture_count; i++){
2351 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
2352 || s->picture[i].type == FF_BUFFER_TYPE_USER))
2353 free_frame_buffer(s, &s->picture[i]);
2355 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2357 s->mb_x= s->mb_y= 0;
2360 s->parse_context.state= -1;
2361 s->parse_context.frame_start_found= 0;
2362 s->parse_context.overread= 0;
2363 s->parse_context.overread_index= 0;
2364 s->parse_context.index= 0;
2365 s->parse_context.last_index= 0;
2366 s->bitstream_buffer_size=0;
2370 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2371 DCTELEM *block, int n, int qscale)
2373 int i, level, nCoeffs;
2374 const uint16_t *quant_matrix;
2376 nCoeffs= s->block_last_index[n];
2379 block[0] = block[0] * s->y_dc_scale;
2381 block[0] = block[0] * s->c_dc_scale;
2382 /* XXX: only mpeg1 */
2383 quant_matrix = s->intra_matrix;
2384 for(i=1;i<=nCoeffs;i++) {
2385 int j= s->intra_scantable.permutated[i];
2390 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2391 level = (level - 1) | 1;
2394 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2395 level = (level - 1) | 1;
2402 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2403 DCTELEM *block, int n, int qscale)
2405 int i, level, nCoeffs;
2406 const uint16_t *quant_matrix;
2408 nCoeffs= s->block_last_index[n];
2410 quant_matrix = s->inter_matrix;
2411 for(i=0; i<=nCoeffs; i++) {
2412 int j= s->intra_scantable.permutated[i];
2417 level = (((level << 1) + 1) * qscale *
2418 ((int) (quant_matrix[j]))) >> 4;
2419 level = (level - 1) | 1;
2422 level = (((level << 1) + 1) * qscale *
2423 ((int) (quant_matrix[j]))) >> 4;
2424 level = (level - 1) | 1;
2431 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2432 DCTELEM *block, int n, int qscale)
2434 int i, level, nCoeffs;
2435 const uint16_t *quant_matrix;
2437 if(s->alternate_scan) nCoeffs= 63;
2438 else nCoeffs= s->block_last_index[n];
2441 block[0] = block[0] * s->y_dc_scale;
2443 block[0] = block[0] * s->c_dc_scale;
2444 quant_matrix = s->intra_matrix;
2445 for(i=1;i<=nCoeffs;i++) {
2446 int j= s->intra_scantable.permutated[i];
2451 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2454 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2461 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2462 DCTELEM *block, int n, int qscale)
2464 int i, level, nCoeffs;
2465 const uint16_t *quant_matrix;
2468 if(s->alternate_scan) nCoeffs= 63;
2469 else nCoeffs= s->block_last_index[n];
2472 block[0] = block[0] * s->y_dc_scale;
2474 block[0] = block[0] * s->c_dc_scale;
2475 quant_matrix = s->intra_matrix;
2476 for(i=1;i<=nCoeffs;i++) {
2477 int j= s->intra_scantable.permutated[i];
2482 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2485 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2494 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2495 DCTELEM *block, int n, int qscale)
2497 int i, level, nCoeffs;
2498 const uint16_t *quant_matrix;
2501 if(s->alternate_scan) nCoeffs= 63;
2502 else nCoeffs= s->block_last_index[n];
2504 quant_matrix = s->inter_matrix;
2505 for(i=0; i<=nCoeffs; i++) {
2506 int j= s->intra_scantable.permutated[i];
2511 level = (((level << 1) + 1) * qscale *
2512 ((int) (quant_matrix[j]))) >> 4;
2515 level = (((level << 1) + 1) * qscale *
2516 ((int) (quant_matrix[j]))) >> 4;
2525 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2526 DCTELEM *block, int n, int qscale)
2528 int i, level, qmul, qadd;
2531 assert(s->block_last_index[n]>=0);
2537 block[0] = block[0] * s->y_dc_scale;
2539 block[0] = block[0] * s->c_dc_scale;
2540 qadd = (qscale - 1) | 1;
2547 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2549 for(i=1; i<=nCoeffs; i++) {
2553 level = level * qmul - qadd;
2555 level = level * qmul + qadd;
2562 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2563 DCTELEM *block, int n, int qscale)
2565 int i, level, qmul, qadd;
2568 assert(s->block_last_index[n]>=0);
2570 qadd = (qscale - 1) | 1;
2573 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2575 for(i=0; i<=nCoeffs; i++) {
2579 level = level * qmul - qadd;
2581 level = level * qmul + qadd;
2589 * set qscale and update qscale dependent variables.
2591 void ff_set_qscale(MpegEncContext * s, int qscale)
2595 else if (qscale > 31)
2599 s->chroma_qscale= s->chroma_qscale_table[qscale];
2601 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2602 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2605 void MPV_report_decode_progress(MpegEncContext *s)
2607 if (s->pict_type != FF_B_TYPE && !s->partitioned_frame)
2608 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);