2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32]={
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
74 const uint8_t ff_mpeg1_dc_scale_table[128]={
75 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 static const uint8_t mpeg2_dc_scale_table1[128]={
83 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128]={
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 static const uint8_t mpeg2_dc_scale_table3[128]={
99 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
107 ff_mpeg1_dc_scale_table,
108 mpeg2_dc_scale_table1,
109 mpeg2_dc_scale_table2,
110 mpeg2_dc_scale_table3,
113 const enum PixelFormat ff_pixfmt_list_420[] = {
118 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
125 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
133 uint32_t tmp= *state << 8;
134 *state= tmp + *(p++);
135 if(tmp == 0x100 || p==end)
140 if (p[-1] > 1 ) p+= 3;
141 else if(p[-2] ) p+= 2;
142 else if(p[-3]|(p[-1]-1)) p++;
155 /* init common dct for both encoder and decoder */
156 av_cold int ff_dct_common_init(MpegEncContext *s)
158 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
159 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
160 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
161 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
162 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
163 if(s->flags & CODEC_FLAG_BITEXACT)
164 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
165 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
168 MPV_common_init_mmx(s);
170 MPV_common_init_axp(s);
172 MPV_common_init_mlib(s);
174 MPV_common_init_mmi(s);
176 MPV_common_init_arm(s);
178 MPV_common_init_altivec(s);
180 MPV_common_init_bfin(s);
183 /* load & permutate scantables
184 note: only wmv uses different ones
186 if(s->alternate_scan){
187 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
190 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
194 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
199 void ff_copy_picture(Picture *dst, Picture *src){
201 dst->type= FF_BUFFER_TYPE_COPY;
205 * Release a frame buffer
207 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
209 ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
210 av_freep(&pic->hwaccel_picture_private);
214 * Allocate a frame buffer
216 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
220 if (s->avctx->hwaccel) {
221 assert(!pic->hwaccel_picture_private);
222 if (s->avctx->hwaccel->priv_data_size) {
223 pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
224 if (!pic->hwaccel_picture_private) {
225 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
231 r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
233 if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
234 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
235 av_freep(&pic->hwaccel_picture_private);
239 if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
240 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
241 free_frame_buffer(s, pic);
245 if (pic->linesize[1] != pic->linesize[2]) {
246 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
247 free_frame_buffer(s, pic);
255 * allocates a Picture
256 * The pixels are allocated/set by calling get_buffer() if shared=0
258 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
259 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
260 const int mb_array_size= s->mb_stride*s->mb_height;
261 const int b8_array_size= s->b8_stride*s->mb_height*2;
262 const int b4_array_size= s->b4_stride*s->mb_height*4;
267 assert(pic->data[0]);
268 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
269 pic->type= FF_BUFFER_TYPE_SHARED;
271 assert(!pic->data[0]);
273 if (alloc_frame_buffer(s, pic) < 0)
276 s->linesize = pic->linesize[0];
277 s->uvlinesize= pic->linesize[1];
280 if(pic->qscale_table==NULL){
282 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
283 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
284 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
287 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check
288 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table , mb_array_size * sizeof(uint8_t) , fail)
289 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
290 pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
291 if(s->out_format == FMT_H264){
293 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
294 pic->motion_val[i]= pic->motion_val_base[i]+4;
295 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
297 pic->motion_subsample_log2= 2;
298 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
300 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
301 pic->motion_val[i]= pic->motion_val_base[i]+4;
302 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
304 pic->motion_subsample_log2= 3;
306 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
307 FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
309 pic->qstride= s->mb_stride;
310 FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
313 /* It might be nicer if the application would keep track of these
314 * but it would require an API change. */
315 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
316 s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
317 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == AV_PICTURE_TYPE_B)
318 pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
322 fail: //for the FF_ALLOCZ_OR_GOTO macro
324 free_frame_buffer(s, pic);
329 * deallocates a picture
331 static void free_picture(MpegEncContext *s, Picture *pic){
334 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
335 free_frame_buffer(s, pic);
338 av_freep(&pic->mb_var);
339 av_freep(&pic->mc_mb_var);
340 av_freep(&pic->mb_mean);
341 av_freep(&pic->mbskip_table);
342 av_freep(&pic->qscale_table);
343 av_freep(&pic->mb_type_base);
344 av_freep(&pic->dct_coeff);
345 av_freep(&pic->pan_scan);
348 av_freep(&pic->motion_val_base[i]);
349 av_freep(&pic->ref_index[i]);
352 if(pic->type == FF_BUFFER_TYPE_SHARED){
361 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
362 int y_size = s->b8_stride * (2 * s->mb_height + 1);
363 int c_size = s->mb_stride * (s->mb_height + 1);
364 int yc_size = y_size + 2 * c_size;
367 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
368 FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
369 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
371 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
372 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
373 s->me.temp= s->me.scratchpad;
374 s->rd_scratchpad= s->me.scratchpad;
375 s->b_scratchpad= s->me.scratchpad;
376 s->obmc_scratchpad= s->me.scratchpad + 16;
378 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
379 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
380 if(s->avctx->noise_reduction){
381 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
384 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
385 s->block= s->blocks[0];
388 s->pblocks[i] = &s->block[i];
391 if (s->out_format == FMT_H263) {
393 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
394 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
395 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
396 s->ac_val[2] = s->ac_val[1] + c_size;
401 return -1; //free() through MPV_common_end()
404 static void free_duplicate_context(MpegEncContext *s){
407 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
408 av_freep(&s->me.scratchpad);
412 s->obmc_scratchpad= NULL;
414 av_freep(&s->dct_error_sum);
415 av_freep(&s->me.map);
416 av_freep(&s->me.score_map);
417 av_freep(&s->blocks);
418 av_freep(&s->ac_val_base);
422 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
423 #define COPY(a) bak->a= src->a
424 COPY(allocated_edge_emu_buffer);
425 COPY(edge_emu_buffer);
430 COPY(obmc_scratchpad);
437 COPY(me.map_generation);
449 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
452 //FIXME copy only needed parts
454 backup_duplicate_context(&bak, dst);
455 memcpy(dst, src, sizeof(MpegEncContext));
456 backup_duplicate_context(dst, &bak);
458 dst->pblocks[i] = &dst->block[i];
460 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
463 int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
465 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
467 if(dst == src || !s1->context_initialized) return 0;
469 //FIXME can parameters change on I-frames? in that case dst may need a reinit
470 if(!s->context_initialized){
471 memcpy(s, s1, sizeof(MpegEncContext));
474 s->picture_range_start += MAX_PICTURE_COUNT;
475 s->picture_range_end += MAX_PICTURE_COUNT;
476 s->bitstream_buffer = NULL;
477 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
482 s->avctx->coded_height = s1->avctx->coded_height;
483 s->avctx->coded_width = s1->avctx->coded_width;
484 s->avctx->width = s1->avctx->width;
485 s->avctx->height = s1->avctx->height;
487 s->coded_picture_number = s1->coded_picture_number;
488 s->picture_number = s1->picture_number;
489 s->input_picture_number = s1->input_picture_number;
491 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
492 memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture);
494 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
495 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
496 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
498 memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
500 //Error/bug resilience
501 s->next_p_frame_damaged = s1->next_p_frame_damaged;
502 s->workaround_bugs = s1->workaround_bugs;
505 memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits);
508 s->max_b_frames = s1->max_b_frames;
509 s->low_delay = s1->low_delay;
510 s->dropable = s1->dropable;
512 //DivX handling (doesn't work)
513 s->divx_packed = s1->divx_packed;
515 if(s1->bitstream_buffer){
516 if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
517 av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size);
518 s->bitstream_buffer_size = s1->bitstream_buffer_size;
519 memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size);
520 memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
523 //MPEG2/interlacing info
524 memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence);
526 if(!s1->first_field){
527 s->last_pict_type= s1->pict_type;
528 if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->quality;
530 if(s1->pict_type!=FF_B_TYPE){
531 s->last_non_b_pict_type= s1->pict_type;
539 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
540 * the changed fields will not depend upon the prior state of the MpegEncContext.
542 void MPV_common_defaults(MpegEncContext *s){
544 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
545 s->chroma_qscale_table= ff_default_chroma_qscale_table;
546 s->progressive_frame= 1;
547 s->progressive_sequence= 1;
548 s->picture_structure= PICT_FRAME;
550 s->coded_picture_number = 0;
551 s->picture_number = 0;
552 s->input_picture_number = 0;
554 s->picture_in_gop_number = 0;
559 s->picture_range_start = 0;
560 s->picture_range_end = MAX_PICTURE_COUNT;
564 * sets the given MpegEncContext to defaults for decoding.
565 * the changed fields will not depend upon the prior state of the MpegEncContext.
567 void MPV_decode_defaults(MpegEncContext *s){
568 MPV_common_defaults(s);
572 * init common structure for both encoder and decoder.
573 * this assumes that some variables like width/height are already set
575 av_cold int MPV_common_init(MpegEncContext *s)
577 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
579 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
580 s->mb_height = (s->height + 31) / 32 * 2;
581 else if (s->codec_id != CODEC_ID_H264)
582 s->mb_height = (s->height + 15) / 16;
584 if(s->avctx->pix_fmt == PIX_FMT_NONE){
585 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
589 if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
590 (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
591 av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
595 if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
598 dsputil_init(&s->dsp, s->avctx);
599 ff_dct_common_init(s);
601 s->flags= s->avctx->flags;
602 s->flags2= s->avctx->flags2;
604 s->mb_width = (s->width + 15) / 16;
605 s->mb_stride = s->mb_width + 1;
606 s->b8_stride = s->mb_width*2 + 1;
607 s->b4_stride = s->mb_width*4 + 1;
608 mb_array_size= s->mb_height * s->mb_stride;
609 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
611 /* set chroma shifts */
612 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
613 &(s->chroma_y_shift) );
615 /* set default edge pos, will be overriden in decode_header if needed */
616 s->h_edge_pos= s->mb_width*16;
617 s->v_edge_pos= s->mb_height*16;
619 s->mb_num = s->mb_width * s->mb_height;
624 s->block_wrap[3]= s->b8_stride;
626 s->block_wrap[5]= s->mb_stride;
628 y_size = s->b8_stride * (2 * s->mb_height + 1);
629 c_size = s->mb_stride * (s->mb_height + 1);
630 yc_size = y_size + 2 * c_size;
632 /* convert fourcc to upper case */
633 s->codec_tag = ff_toupper4(s->avctx->codec_tag);
635 s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
637 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
639 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
640 for(y=0; y<s->mb_height; y++){
641 for(x=0; x<s->mb_width; x++){
642 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
645 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
648 /* Allocate MV tables */
649 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
650 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
651 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
652 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
653 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
654 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
655 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
656 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
657 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
658 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
659 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
660 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
662 if(s->msmpeg4_version){
663 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
665 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
667 /* Allocate MB type table */
668 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
670 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
672 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
673 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
674 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
675 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
676 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
677 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
679 if(s->avctx->noise_reduction){
680 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
684 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
685 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
686 for(i = 0; i < s->picture_count; i++) {
687 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
690 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
692 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
693 /* interlaced direct mode decoding tables */
698 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
699 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
701 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
702 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
703 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
705 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
708 if (s->out_format == FMT_H263) {
710 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
711 s->coded_block= s->coded_block_base + s->b8_stride + 1;
713 /* cbp, ac_pred, pred_dir */
714 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
715 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
718 if (s->h263_pred || s->h263_plus || !s->encoding) {
720 //MN: we need these for error resilience of intra-frames
721 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
722 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
723 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
724 s->dc_val[2] = s->dc_val[1] + c_size;
725 for(i=0;i<yc_size;i++)
726 s->dc_val_base[i] = 1024;
729 /* which mb is a intra block */
730 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
731 memset(s->mbintra_table, 1, mb_array_size);
733 /* init macroblock skip table */
734 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
735 //Note the +1 is for a quicker mpeg4 slice_end detection
736 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
738 s->parse_context.state= -1;
739 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
740 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
741 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
742 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
745 s->context_initialized = 1;
746 s->thread_context[0]= s;
748 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
749 threads = s->avctx->thread_count;
751 for(i=1; i<threads; i++){
752 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
753 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
756 for(i=0; i<threads; i++){
757 if(init_duplicate_context(s->thread_context[i], s) < 0)
759 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
760 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
763 if(init_duplicate_context(s, s) < 0) goto fail;
765 s->end_mb_y = s->mb_height;
775 /* init common structure for both encoder and decoder */
776 void MPV_common_end(MpegEncContext *s)
780 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
781 for(i=0; i<s->avctx->thread_count; i++){
782 free_duplicate_context(s->thread_context[i]);
784 for(i=1; i<s->avctx->thread_count; i++){
785 av_freep(&s->thread_context[i]);
787 } else free_duplicate_context(s);
789 av_freep(&s->parse_context.buffer);
790 s->parse_context.buffer_size=0;
792 av_freep(&s->mb_type);
793 av_freep(&s->p_mv_table_base);
794 av_freep(&s->b_forw_mv_table_base);
795 av_freep(&s->b_back_mv_table_base);
796 av_freep(&s->b_bidir_forw_mv_table_base);
797 av_freep(&s->b_bidir_back_mv_table_base);
798 av_freep(&s->b_direct_mv_table_base);
800 s->b_forw_mv_table= NULL;
801 s->b_back_mv_table= NULL;
802 s->b_bidir_forw_mv_table= NULL;
803 s->b_bidir_back_mv_table= NULL;
804 s->b_direct_mv_table= NULL;
808 av_freep(&s->b_field_mv_table_base[i][j][k]);
809 s->b_field_mv_table[i][j][k]=NULL;
811 av_freep(&s->b_field_select_table[i][j]);
812 av_freep(&s->p_field_mv_table_base[i][j]);
813 s->p_field_mv_table[i][j]=NULL;
815 av_freep(&s->p_field_select_table[i]);
818 av_freep(&s->dc_val_base);
819 av_freep(&s->coded_block_base);
820 av_freep(&s->mbintra_table);
821 av_freep(&s->cbp_table);
822 av_freep(&s->pred_dir_table);
824 av_freep(&s->mbskip_table);
825 av_freep(&s->prev_pict_types);
826 av_freep(&s->bitstream_buffer);
827 s->allocated_bitstream_buffer_size=0;
829 av_freep(&s->avctx->stats_out);
830 av_freep(&s->ac_stats);
831 av_freep(&s->error_status_table);
832 av_freep(&s->mb_index2xy);
833 av_freep(&s->lambda_table);
834 av_freep(&s->q_intra_matrix);
835 av_freep(&s->q_inter_matrix);
836 av_freep(&s->q_intra_matrix16);
837 av_freep(&s->q_inter_matrix16);
838 av_freep(&s->input_picture);
839 av_freep(&s->reordered_input_picture);
840 av_freep(&s->dct_offset);
842 if(s->picture && !s->avctx->is_copy){
843 for(i=0; i<s->picture_count; i++){
844 free_picture(s, &s->picture[i]);
847 av_freep(&s->picture);
848 s->context_initialized = 0;
851 s->current_picture_ptr= NULL;
852 s->linesize= s->uvlinesize= 0;
855 av_freep(&s->visualization_buffer[i]);
857 if(!(s->avctx->active_thread_type&FF_THREAD_FRAME))
858 avcodec_default_free_buffers(s->avctx);
861 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
863 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
864 uint8_t index_run[MAX_RUN+1];
865 int last, run, level, start, end, i;
867 /* If table is static, we can quit if rl->max_level[0] is not NULL */
868 if(static_store && rl->max_level[0])
871 /* compute max_level[], max_run[] and index_run[] */
872 for(last=0;last<2;last++) {
881 memset(max_level, 0, MAX_RUN + 1);
882 memset(max_run, 0, MAX_LEVEL + 1);
883 memset(index_run, rl->n, MAX_RUN + 1);
884 for(i=start;i<end;i++) {
885 run = rl->table_run[i];
886 level = rl->table_level[i];
887 if (index_run[run] == rl->n)
889 if (level > max_level[run])
890 max_level[run] = level;
891 if (run > max_run[level])
892 max_run[level] = run;
895 rl->max_level[last] = static_store[last];
897 rl->max_level[last] = av_malloc(MAX_RUN + 1);
898 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
900 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
902 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
903 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
905 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
907 rl->index_run[last] = av_malloc(MAX_RUN + 1);
908 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
912 void init_vlc_rl(RLTable *rl)
924 for(i=0; i<rl->vlc.table_size; i++){
925 int code= rl->vlc.table[i][0];
926 int len = rl->vlc.table[i][1];
929 if(len==0){ // illegal code
932 }else if(len<0){ //more bits needed
936 if(code==rl->n){ //esc
940 run= rl->table_run [code] + 1;
941 level= rl->table_level[code] * qmul + qadd;
942 if(code >= rl->last) run+=192;
945 rl->rl_vlc[q][i].len= len;
946 rl->rl_vlc[q][i].level= level;
947 rl->rl_vlc[q][i].run= run;
952 void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
956 /* release non reference frames */
957 for(i=0; i<s->picture_count; i++){
958 if(s->picture[i].data[0] && !s->picture[i].reference
959 && (!s->picture[i].owner2 || s->picture[i].owner2 == s)
960 && (remove_current || &s->picture[i] != s->current_picture_ptr)
961 /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
962 free_frame_buffer(s, &s->picture[i]);
967 int ff_find_unused_picture(MpegEncContext *s, int shared){
971 for(i=s->picture_range_start; i<s->picture_range_end; i++){
972 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
975 for(i=s->picture_range_start; i<s->picture_range_end; i++){
976 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
978 for(i=s->picture_range_start; i<s->picture_range_end; i++){
979 if(s->picture[i].data[0]==NULL) return i;
983 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
984 /* We could return -1, but the codec would crash trying to draw into a
985 * non-existing frame anyway. This is safer than waiting for a random crash.
986 * Also the return of this is never useful, an encoder must only allocate
987 * as much as allowed in the specification. This has no relationship to how
988 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
989 * enough for such valid streams).
990 * Plus, a decoder has to check stream validity and remove frames if too
991 * many reference frames are around. Waiting for "OOM" is not correct at
992 * all. Similarly, missing reference frames have to be replaced by
993 * interpolated/MC frames, anything else is a bug in the codec ...
999 static void update_noise_reduction(MpegEncContext *s){
1002 for(intra=0; intra<2; intra++){
1003 if(s->dct_count[intra] > (1<<16)){
1004 for(i=0; i<64; i++){
1005 s->dct_error_sum[intra][i] >>=1;
1007 s->dct_count[intra] >>= 1;
1010 for(i=0; i<64; i++){
1011 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1017 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1019 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1025 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1027 /* mark&release old frames */
1028 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
1029 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1030 free_frame_buffer(s, s->last_picture_ptr);
1032 /* release forgotten pictures */
1033 /* if(mpeg124/h263) */
1035 for(i=0; i<s->picture_count; i++){
1036 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
1037 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1038 free_frame_buffer(s, &s->picture[i]);
1046 ff_release_unused_pictures(s, 1);
1048 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
1049 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
1051 i= ff_find_unused_picture(s, 0);
1052 pic= &s->picture[i];
1057 if (s->codec_id == CODEC_ID_H264)
1058 pic->reference = s->picture_structure;
1059 else if (s->pict_type != AV_PICTURE_TYPE_B)
1063 pic->coded_picture_number= s->coded_picture_number++;
1065 if(ff_alloc_picture(s, pic, 0) < 0)
1068 s->current_picture_ptr= pic;
1069 //FIXME use only the vars from current_pic
1070 s->current_picture_ptr->top_field_first= s->top_field_first;
1071 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1072 if(s->picture_structure != PICT_FRAME)
1073 s->current_picture_ptr->top_field_first= (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1075 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
1076 s->current_picture_ptr->field_picture= s->picture_structure != PICT_FRAME;
1079 s->current_picture_ptr->pict_type= s->pict_type;
1080 // if(s->flags && CODEC_FLAG_QSCALE)
1081 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1082 s->current_picture_ptr->key_frame= s->pict_type == AV_PICTURE_TYPE_I;
1084 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1086 if (s->pict_type != AV_PICTURE_TYPE_B) {
1087 s->last_picture_ptr= s->next_picture_ptr;
1089 s->next_picture_ptr= s->current_picture_ptr;
1091 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1092 s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
1093 s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
1094 s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
1095 s->pict_type, s->dropable);*/
1097 if(s->codec_id != CODEC_ID_H264){
1098 if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) &&
1099 (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
1100 if (s->pict_type != AV_PICTURE_TYPE_I)
1101 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1102 else if (s->picture_structure != PICT_FRAME)
1103 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
1105 /* Allocate a dummy frame */
1106 i= ff_find_unused_picture(s, 0);
1107 s->last_picture_ptr= &s->picture[i];
1108 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1110 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
1111 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
1113 if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==AV_PICTURE_TYPE_B){
1114 /* Allocate a dummy frame */
1115 i= ff_find_unused_picture(s, 0);
1116 s->next_picture_ptr= &s->picture[i];
1117 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1119 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
1120 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
1124 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1125 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1127 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1129 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1132 if(s->picture_structure == PICT_BOTTOM_FIELD){
1133 s->current_picture.data[i] += s->current_picture.linesize[i];
1135 s->current_picture.linesize[i] *= 2;
1136 s->last_picture.linesize[i] *=2;
1137 s->next_picture.linesize[i] *=2;
1141 s->error_recognition= avctx->error_recognition;
1143 /* set dequantizer, we can't do it during init as it might change for mpeg4
1144 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1145 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1146 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1147 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1148 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1149 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1150 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1152 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1153 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1156 if(s->dct_error_sum){
1157 assert(s->avctx->noise_reduction && s->encoding);
1159 update_noise_reduction(s);
1162 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1163 return ff_xvmc_field_start(s, avctx);
1168 /* generic function for encode/decode called after a frame has been coded/decoded */
1169 void MPV_frame_end(MpegEncContext *s)
1172 /* redraw edges for the frame if decoding didn't complete */
1173 //just to make sure that all data is rendered.
1174 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1175 ff_xvmc_field_end(s);
1176 }else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND))
1177 && !s->avctx->hwaccel
1178 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1179 && s->unrestricted_mv
1180 && s->current_picture.reference
1182 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1183 int edges = EDGE_BOTTOM | EDGE_TOP, h = s->v_edge_pos;
1185 s->dsp.draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , h , EDGE_WIDTH , edges);
1186 s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, h>>1, EDGE_WIDTH/2, edges);
1187 s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, h>>1, EDGE_WIDTH/2, edges);
1193 s->last_pict_type = s->pict_type;
1194 s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
1195 if(s->pict_type!=AV_PICTURE_TYPE_B){
1196 s->last_non_b_pict_type= s->pict_type;
1199 /* copy back current_picture variables */
1200 for(i=0; i<MAX_PICTURE_COUNT; i++){
1201 if(s->picture[i].data[0] == s->current_picture.data[0]){
1202 s->picture[i]= s->current_picture;
1206 assert(i<MAX_PICTURE_COUNT);
1210 /* release non-reference frames */
1211 for(i=0; i<s->picture_count; i++){
1212 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1213 free_frame_buffer(s, &s->picture[i]);
1217 // clear copies, to avoid confusion
1219 memset(&s->last_picture, 0, sizeof(Picture));
1220 memset(&s->next_picture, 0, sizeof(Picture));
1221 memset(&s->current_picture, 0, sizeof(Picture));
1223 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1225 if (s->codec_id != CODEC_ID_H264 && s->current_picture.reference) {
1226 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
1231 * draws an line from (ex, ey) -> (sx, sy).
1232 * @param w width of the image
1233 * @param h height of the image
1234 * @param stride stride/linesize of the image
1235 * @param color color of the arrow
1237 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1240 sx= av_clip(sx, 0, w-1);
1241 sy= av_clip(sy, 0, h-1);
1242 ex= av_clip(ex, 0, w-1);
1243 ey= av_clip(ey, 0, h-1);
1245 buf[sy*stride + sx]+= color;
1247 if(FFABS(ex - sx) > FFABS(ey - sy)){
1249 FFSWAP(int, sx, ex);
1250 FFSWAP(int, sy, ey);
1252 buf+= sx + sy*stride;
1254 f= ((ey-sy)<<16)/ex;
1255 for(x= 0; x <= ex; x++){
1258 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1259 buf[(y+1)*stride + x]+= (color* fr )>>16;
1263 FFSWAP(int, sx, ex);
1264 FFSWAP(int, sy, ey);
1266 buf+= sx + sy*stride;
1268 if(ey) f= ((ex-sx)<<16)/ey;
1270 for(y= 0; y <= ey; y++){
1273 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1274 buf[y*stride + x+1]+= (color* fr )>>16;
1280 * draws an arrow from (ex, ey) -> (sx, sy).
1281 * @param w width of the image
1282 * @param h height of the image
1283 * @param stride stride/linesize of the image
1284 * @param color color of the arrow
1286 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1289 sx= av_clip(sx, -100, w+100);
1290 sy= av_clip(sy, -100, h+100);
1291 ex= av_clip(ex, -100, w+100);
1292 ey= av_clip(ey, -100, h+100);
1297 if(dx*dx + dy*dy > 3*3){
1300 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1302 //FIXME subpixel accuracy
1303 rx= ROUNDED_DIV(rx*3<<4, length);
1304 ry= ROUNDED_DIV(ry*3<<4, length);
1306 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1307 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1309 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1313 * prints debuging info for the given picture.
1315 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1317 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1319 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1322 av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1323 av_get_picture_type_char(pict->pict_type));
1324 for(y=0; y<s->mb_height; y++){
1325 for(x=0; x<s->mb_width; x++){
1326 if(s->avctx->debug&FF_DEBUG_SKIP){
1327 int count= s->mbskip_table[x + y*s->mb_stride];
1328 if(count>9) count=9;
1329 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1331 if(s->avctx->debug&FF_DEBUG_QP){
1332 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1334 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1335 int mb_type= pict->mb_type[x + y*s->mb_stride];
1336 //Type & MV direction
1338 av_log(s->avctx, AV_LOG_DEBUG, "P");
1339 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1340 av_log(s->avctx, AV_LOG_DEBUG, "A");
1341 else if(IS_INTRA4x4(mb_type))
1342 av_log(s->avctx, AV_LOG_DEBUG, "i");
1343 else if(IS_INTRA16x16(mb_type))
1344 av_log(s->avctx, AV_LOG_DEBUG, "I");
1345 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1346 av_log(s->avctx, AV_LOG_DEBUG, "d");
1347 else if(IS_DIRECT(mb_type))
1348 av_log(s->avctx, AV_LOG_DEBUG, "D");
1349 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1350 av_log(s->avctx, AV_LOG_DEBUG, "g");
1351 else if(IS_GMC(mb_type))
1352 av_log(s->avctx, AV_LOG_DEBUG, "G");
1353 else if(IS_SKIP(mb_type))
1354 av_log(s->avctx, AV_LOG_DEBUG, "S");
1355 else if(!USES_LIST(mb_type, 1))
1356 av_log(s->avctx, AV_LOG_DEBUG, ">");
1357 else if(!USES_LIST(mb_type, 0))
1358 av_log(s->avctx, AV_LOG_DEBUG, "<");
1360 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1361 av_log(s->avctx, AV_LOG_DEBUG, "X");
1366 av_log(s->avctx, AV_LOG_DEBUG, "+");
1367 else if(IS_16X8(mb_type))
1368 av_log(s->avctx, AV_LOG_DEBUG, "-");
1369 else if(IS_8X16(mb_type))
1370 av_log(s->avctx, AV_LOG_DEBUG, "|");
1371 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1372 av_log(s->avctx, AV_LOG_DEBUG, " ");
1374 av_log(s->avctx, AV_LOG_DEBUG, "?");
1377 if(IS_INTERLACED(mb_type))
1378 av_log(s->avctx, AV_LOG_DEBUG, "=");
1380 av_log(s->avctx, AV_LOG_DEBUG, " ");
1382 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1384 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1388 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1389 const int shift= 1 + s->quarter_sample;
1393 int h_chroma_shift, v_chroma_shift, block_height;
1394 const int width = s->avctx->width;
1395 const int height= s->avctx->height;
1396 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1397 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1398 s->low_delay=0; //needed to see the vectors without trashing the buffers
1400 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1402 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1403 pict->data[i]= s->visualization_buffer[i];
1405 pict->type= FF_BUFFER_TYPE_COPY;
1408 block_height = 16>>v_chroma_shift;
1410 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1412 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1413 const int mb_index= mb_x + mb_y*s->mb_stride;
1414 if((s->avctx->debug_mv) && pict->motion_val){
1416 for(type=0; type<3; type++){
1419 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
1423 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1427 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1432 if(!USES_LIST(pict->mb_type[mb_index], direction))
1435 if(IS_8X8(pict->mb_type[mb_index])){
1438 int sx= mb_x*16 + 4 + 8*(i&1);
1439 int sy= mb_y*16 + 4 + 8*(i>>1);
1440 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1441 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1442 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1443 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1445 }else if(IS_16X8(pict->mb_type[mb_index])){
1449 int sy=mb_y*16 + 4 + 8*i;
1450 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1451 int mx=(pict->motion_val[direction][xy][0]>>shift);
1452 int my=(pict->motion_val[direction][xy][1]>>shift);
1454 if(IS_INTERLACED(pict->mb_type[mb_index]))
1457 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1459 }else if(IS_8X16(pict->mb_type[mb_index])){
1462 int sx=mb_x*16 + 4 + 8*i;
1464 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1465 int mx=(pict->motion_val[direction][xy][0]>>shift);
1466 int my=(pict->motion_val[direction][xy][1]>>shift);
1468 if(IS_INTERLACED(pict->mb_type[mb_index]))
1471 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1474 int sx= mb_x*16 + 8;
1475 int sy= mb_y*16 + 8;
1476 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1477 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1478 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1479 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1483 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1484 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1486 for(y=0; y<block_height; y++){
1487 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1488 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1491 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1492 int mb_type= pict->mb_type[mb_index];
1495 #define COLOR(theta, r)\
1496 u= (int)(128 + r*cos(theta*3.141592/180));\
1497 v= (int)(128 + r*sin(theta*3.141592/180));
1501 if(IS_PCM(mb_type)){
1503 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1505 }else if(IS_INTRA4x4(mb_type)){
1507 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1509 }else if(IS_DIRECT(mb_type)){
1511 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1513 }else if(IS_GMC(mb_type)){
1515 }else if(IS_SKIP(mb_type)){
1517 }else if(!USES_LIST(mb_type, 1)){
1519 }else if(!USES_LIST(mb_type, 0)){
1522 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1526 u*= 0x0101010101010101ULL;
1527 v*= 0x0101010101010101ULL;
1528 for(y=0; y<block_height; y++){
1529 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1530 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1534 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1535 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1536 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1538 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1540 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1542 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1543 int dm= 1 << (mv_sample_log2-2);
1545 int sx= mb_x*16 + 8*(i&1);
1546 int sy= mb_y*16 + 8*(i>>1);
1547 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1549 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1550 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1552 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1553 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1554 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1558 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1562 s->mbskip_table[mb_index]=0;
1568 static inline int hpel_motion_lowres(MpegEncContext *s,
1569 uint8_t *dest, uint8_t *src,
1570 int field_based, int field_select,
1571 int src_x, int src_y,
1572 int width, int height, int stride,
1573 int h_edge_pos, int v_edge_pos,
1574 int w, int h, h264_chroma_mc_func *pix_op,
1575 int motion_x, int motion_y)
1577 const int lowres= s->avctx->lowres;
1578 const int op_index= FFMIN(lowres, 2);
1579 const int s_mask= (2<<lowres)-1;
1583 if(s->quarter_sample){
1588 sx= motion_x & s_mask;
1589 sy= motion_y & s_mask;
1590 src_x += motion_x >> (lowres+1);
1591 src_y += motion_y >> (lowres+1);
1593 src += src_y * stride + src_x;
1595 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1596 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1597 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1598 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1599 src= s->edge_emu_buffer;
1603 sx= (sx << 2) >> lowres;
1604 sy= (sy << 2) >> lowres;
1607 pix_op[op_index](dest, src, stride, h, sx, sy);
1611 /* apply one mpeg motion vector to the three components */
1612 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1613 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1614 int field_based, int bottom_field, int field_select,
1615 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1616 int motion_x, int motion_y, int h, int mb_y)
1618 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1619 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1620 const int lowres= s->avctx->lowres;
1621 const int op_index= FFMIN(lowres-1+s->chroma_x_shift, 2);
1622 const int block_s= 8>>lowres;
1623 const int s_mask= (2<<lowres)-1;
1624 const int h_edge_pos = s->h_edge_pos >> lowres;
1625 const int v_edge_pos = s->v_edge_pos >> lowres;
1626 linesize = s->current_picture.linesize[0] << field_based;
1627 uvlinesize = s->current_picture.linesize[1] << field_based;
1629 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1635 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1638 sx= motion_x & s_mask;
1639 sy= motion_y & s_mask;
1640 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1641 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1643 if (s->out_format == FMT_H263) {
1644 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1645 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1648 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1651 uvsx = (2*mx) & s_mask;
1652 uvsy = (2*my) & s_mask;
1653 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1654 uvsrc_y = mb_y*block_s + (my >> lowres);
1656 if(s->chroma_y_shift){
1661 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1662 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1664 if(s->chroma_x_shift){
1668 uvsy = motion_y & s_mask;
1670 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1673 uvsx = motion_x & s_mask;
1674 uvsy = motion_y & s_mask;
1681 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1682 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1683 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1685 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1686 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1687 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1688 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1689 ptr_y = s->edge_emu_buffer;
1690 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1691 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1692 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1693 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1694 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1695 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1701 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
1702 dest_y += s->linesize;
1703 dest_cb+= s->uvlinesize;
1704 dest_cr+= s->uvlinesize;
1708 ptr_y += s->linesize;
1709 ptr_cb+= s->uvlinesize;
1710 ptr_cr+= s->uvlinesize;
1713 sx= (sx << 2) >> lowres;
1714 sy= (sy << 2) >> lowres;
1715 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1717 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1718 uvsx= (uvsx << 2) >> lowres;
1719 uvsy= (uvsy << 2) >> lowres;
1720 if(h >> s->chroma_y_shift){
1721 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1722 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1725 //FIXME h261 lowres loop filter
1728 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1729 uint8_t *dest_cb, uint8_t *dest_cr,
1730 uint8_t **ref_picture,
1731 h264_chroma_mc_func *pix_op,
1733 const int lowres= s->avctx->lowres;
1734 const int op_index= FFMIN(lowres, 2);
1735 const int block_s= 8>>lowres;
1736 const int s_mask= (2<<lowres)-1;
1737 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1738 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1739 int emu=0, src_x, src_y, offset, sx, sy;
1742 if(s->quarter_sample){
1747 /* In case of 8X8, we construct a single chroma motion vector
1748 with a special rounding */
1749 mx= ff_h263_round_chroma(mx);
1750 my= ff_h263_round_chroma(my);
1754 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1755 src_y = s->mb_y*block_s + (my >> (lowres+1));
1757 offset = src_y * s->uvlinesize + src_x;
1758 ptr = ref_picture[1] + offset;
1759 if(s->flags&CODEC_FLAG_EMU_EDGE){
1760 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1761 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1762 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1763 ptr= s->edge_emu_buffer;
1767 sx= (sx << 2) >> lowres;
1768 sy= (sy << 2) >> lowres;
1769 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1771 ptr = ref_picture[2] + offset;
1773 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1774 ptr= s->edge_emu_buffer;
1776 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1780 * motion compensation of a single macroblock
1782 * @param dest_y luma destination pointer
1783 * @param dest_cb chroma cb/u destination pointer
1784 * @param dest_cr chroma cr/v destination pointer
1785 * @param dir direction (0->forward, 1->backward)
1786 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1787 * @param pix_op halfpel motion compensation function (average or put normally)
1788 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1790 static inline void MPV_motion_lowres(MpegEncContext *s,
1791 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1792 int dir, uint8_t **ref_picture,
1793 h264_chroma_mc_func *pix_op)
1797 const int lowres= s->avctx->lowres;
1798 const int block_s= 8>>lowres;
1803 switch(s->mv_type) {
1805 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1807 ref_picture, pix_op,
1808 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1814 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1815 ref_picture[0], 0, 0,
1816 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1817 s->width, s->height, s->linesize,
1818 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1819 block_s, block_s, pix_op,
1820 s->mv[dir][i][0], s->mv[dir][i][1]);
1822 mx += s->mv[dir][i][0];
1823 my += s->mv[dir][i][1];
1826 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1827 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1830 if (s->picture_structure == PICT_FRAME) {
1832 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1833 1, 0, s->field_select[dir][0],
1834 ref_picture, pix_op,
1835 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1837 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1838 1, 1, s->field_select[dir][1],
1839 ref_picture, pix_op,
1840 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1842 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
1843 ref_picture= s->current_picture_ptr->data;
1846 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1847 0, 0, s->field_select[dir][0],
1848 ref_picture, pix_op,
1849 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1854 uint8_t ** ref2picture;
1856 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
1857 ref2picture= ref_picture;
1859 ref2picture= s->current_picture_ptr->data;
1862 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1863 0, 0, s->field_select[dir][i],
1864 ref2picture, pix_op,
1865 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1867 dest_y += 2*block_s*s->linesize;
1868 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1869 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1873 if(s->picture_structure == PICT_FRAME){
1877 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1879 ref_picture, pix_op,
1880 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1882 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1886 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1887 0, 0, s->picture_structure != i+1,
1888 ref_picture, pix_op,
1889 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1891 // after put we make avg of the same block
1892 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1894 //opposite parity is always in the same frame if this is second field
1895 if(!s->first_field){
1896 ref_picture = s->current_picture_ptr->data;
1906 * find the lowest MB row referenced in the MVs
1908 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1910 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1911 int my, off, i, mvs;
1913 if (s->picture_structure != PICT_FRAME) goto unhandled;
1915 switch (s->mv_type) {
1929 for (i = 0; i < mvs; i++) {
1930 my = s->mv[dir][i][1]<<qpel_shift;
1931 my_max = FFMAX(my_max, my);
1932 my_min = FFMIN(my_min, my);
1935 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1937 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1939 return s->mb_height-1;
1942 /* put block[] to dest[] */
1943 static inline void put_dct(MpegEncContext *s,
1944 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1946 s->dct_unquantize_intra(s, block, i, qscale);
1947 s->dsp.idct_put (dest, line_size, block);
1950 /* add block[] to dest[] */
1951 static inline void add_dct(MpegEncContext *s,
1952 DCTELEM *block, int i, uint8_t *dest, int line_size)
1954 if (s->block_last_index[i] >= 0) {
1955 s->dsp.idct_add (dest, line_size, block);
1959 static inline void add_dequant_dct(MpegEncContext *s,
1960 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1962 if (s->block_last_index[i] >= 0) {
1963 s->dct_unquantize_inter(s, block, i, qscale);
1965 s->dsp.idct_add (dest, line_size, block);
1970 * cleans dc, ac, coded_block for the current non intra MB
1972 void ff_clean_intra_table_entries(MpegEncContext *s)
1974 int wrap = s->b8_stride;
1975 int xy = s->block_index[0];
1978 s->dc_val[0][xy + 1 ] =
1979 s->dc_val[0][xy + wrap] =
1980 s->dc_val[0][xy + 1 + wrap] = 1024;
1982 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1983 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1984 if (s->msmpeg4_version>=3) {
1985 s->coded_block[xy ] =
1986 s->coded_block[xy + 1 ] =
1987 s->coded_block[xy + wrap] =
1988 s->coded_block[xy + 1 + wrap] = 0;
1991 wrap = s->mb_stride;
1992 xy = s->mb_x + s->mb_y * wrap;
1994 s->dc_val[2][xy] = 1024;
1996 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1997 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1999 s->mbintra_table[xy]= 0;
2002 /* generic function called after a macroblock has been parsed by the
2003 decoder or after it has been encoded by the encoder.
2005 Important variables used:
2006 s->mb_intra : true if intra macroblock
2007 s->mv_dir : motion vector direction
2008 s->mv_type : motion vector type
2009 s->mv : motion vector
2010 s->interlaced_dct : true if interlaced dct used (mpeg2)
2012 static av_always_inline
2013 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2014 int lowres_flag, int is_mpeg12)
2016 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2017 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2018 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2022 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2023 /* save DCT coefficients */
2025 DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
2026 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2028 for(j=0; j<64; j++){
2029 *dct++ = block[i][s->dsp.idct_permutation[j]];
2030 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2032 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2036 s->current_picture.qscale_table[mb_xy]= s->qscale;
2038 /* update DC predictors for P macroblocks */
2040 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2041 if(s->mbintra_table[mb_xy])
2042 ff_clean_intra_table_entries(s);
2046 s->last_dc[2] = 128 << s->intra_dc_precision;
2049 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2050 s->mbintra_table[mb_xy]=1;
2052 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2053 uint8_t *dest_y, *dest_cb, *dest_cr;
2054 int dct_linesize, dct_offset;
2055 op_pixels_func (*op_pix)[4];
2056 qpel_mc_func (*op_qpix)[16];
2057 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2058 const int uvlinesize= s->current_picture.linesize[1];
2059 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2060 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2062 /* avoid copy if macroblock skipped in last frame too */
2063 /* skip only during decoding as we might trash the buffers during encoding a bit */
2065 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2066 const int age= s->current_picture.age;
2070 if (s->mb_skipped) {
2072 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2074 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
2075 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2077 /* if previous was skipped too, then nothing to do ! */
2078 if (*mbskip_ptr >= age && s->current_picture.reference){
2081 } else if(!s->current_picture.reference){
2082 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2083 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2085 *mbskip_ptr = 0; /* not skipped */
2089 dct_linesize = linesize << s->interlaced_dct;
2090 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
2094 dest_cb= s->dest[1];
2095 dest_cr= s->dest[2];
2097 dest_y = s->b_scratchpad;
2098 dest_cb= s->b_scratchpad+16*linesize;
2099 dest_cr= s->b_scratchpad+32*linesize;
2103 /* motion handling */
2104 /* decoding or more than one mb_type (MC was already done otherwise) */
2107 if(HAVE_PTHREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2108 if (s->mv_dir & MV_DIR_FORWARD) {
2109 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
2111 if (s->mv_dir & MV_DIR_BACKWARD) {
2112 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
2117 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2119 if (s->mv_dir & MV_DIR_FORWARD) {
2120 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
2121 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2123 if (s->mv_dir & MV_DIR_BACKWARD) {
2124 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
2127 op_qpix= s->me.qpel_put;
2128 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2129 op_pix = s->dsp.put_pixels_tab;
2131 op_pix = s->dsp.put_no_rnd_pixels_tab;
2133 if (s->mv_dir & MV_DIR_FORWARD) {
2134 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2135 op_pix = s->dsp.avg_pixels_tab;
2136 op_qpix= s->me.qpel_avg;
2138 if (s->mv_dir & MV_DIR_BACKWARD) {
2139 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2144 /* skip dequant / idct if we are really late ;) */
2145 if(s->avctx->skip_idct){
2146 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2147 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2148 || s->avctx->skip_idct >= AVDISCARD_ALL)
2152 /* add dct residue */
2153 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2154 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2155 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2156 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2157 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2158 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2160 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2161 if (s->chroma_y_shift){
2162 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2163 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2167 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2168 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2169 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2170 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2173 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2174 add_dct(s, block[0], 0, dest_y , dct_linesize);
2175 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2176 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2177 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2179 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2180 if(s->chroma_y_shift){//Chroma420
2181 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2182 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2185 dct_linesize = uvlinesize << s->interlaced_dct;
2186 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
2188 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2189 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2190 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2191 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2192 if(!s->chroma_x_shift){//Chroma444
2193 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2194 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2195 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2196 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2201 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2202 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2205 /* dct only in intra block */
2206 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2207 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2208 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2209 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2210 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2212 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2213 if(s->chroma_y_shift){
2214 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2215 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2219 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2220 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2221 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2222 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2226 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2227 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2228 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2229 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2231 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2232 if(s->chroma_y_shift){
2233 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2234 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2237 dct_linesize = uvlinesize << s->interlaced_dct;
2238 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
2240 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2241 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2242 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2243 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2244 if(!s->chroma_x_shift){//Chroma444
2245 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2246 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2247 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2248 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2256 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2257 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2258 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2263 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2265 if(s->out_format == FMT_MPEG1) {
2266 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2267 else MPV_decode_mb_internal(s, block, 0, 1);
2270 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2271 else MPV_decode_mb_internal(s, block, 0, 0);
2276 * @param h is the normal height, this will be reduced automatically if needed for the last row
2278 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2279 const int field_pic= s->picture_structure != PICT_FRAME;
2285 if (!s->avctx->hwaccel
2286 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2287 && s->unrestricted_mv
2288 && s->current_picture.reference
2290 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2291 int sides = 0, edge_h;
2292 if (y==0) sides |= EDGE_TOP;
2293 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2295 edge_h= FFMIN(h, s->v_edge_pos - y);
2297 s->dsp.draw_edges(s->current_picture_ptr->data[0] + y *s->linesize , s->linesize , s->h_edge_pos , edge_h , EDGE_WIDTH , sides);
2298 s->dsp.draw_edges(s->current_picture_ptr->data[1] + (y>>1)*s->uvlinesize, s->uvlinesize, s->h_edge_pos>>1, edge_h>>1, EDGE_WIDTH/2, sides);
2299 s->dsp.draw_edges(s->current_picture_ptr->data[2] + (y>>1)*s->uvlinesize, s->uvlinesize, s->h_edge_pos>>1, edge_h>>1, EDGE_WIDTH/2, sides);
2302 h= FFMIN(h, s->avctx->height - y);
2304 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2306 if (s->avctx->draw_horiz_band) {
2310 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2311 src= (AVFrame*)s->current_picture_ptr;
2312 else if(s->last_picture_ptr)
2313 src= (AVFrame*)s->last_picture_ptr;
2317 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2323 offset[0]= y * s->linesize;
2325 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2331 s->avctx->draw_horiz_band(s->avctx, src, offset,
2332 y, s->picture_structure, h);
2336 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2337 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2338 const int uvlinesize= s->current_picture.linesize[1];
2339 const int mb_size= 4 - s->avctx->lowres;
2341 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2342 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2343 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2344 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2345 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2346 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2347 //block_index is not used by mpeg2, so it is not affected by chroma_format
2349 s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
2350 s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2351 s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2353 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2355 if(s->picture_structure==PICT_FRAME){
2356 s->dest[0] += s->mb_y * linesize << mb_size;
2357 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2358 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2360 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2361 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2362 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2363 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2368 void ff_mpeg_flush(AVCodecContext *avctx){
2370 MpegEncContext *s = avctx->priv_data;
2372 if(s==NULL || s->picture==NULL)
2375 for(i=0; i<s->picture_count; i++){
2376 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
2377 || s->picture[i].type == FF_BUFFER_TYPE_USER))
2378 free_frame_buffer(s, &s->picture[i]);
2380 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2382 s->mb_x= s->mb_y= 0;
2385 s->parse_context.state= -1;
2386 s->parse_context.frame_start_found= 0;
2387 s->parse_context.overread= 0;
2388 s->parse_context.overread_index= 0;
2389 s->parse_context.index= 0;
2390 s->parse_context.last_index= 0;
2391 s->bitstream_buffer_size=0;
2395 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2396 DCTELEM *block, int n, int qscale)
2398 int i, level, nCoeffs;
2399 const uint16_t *quant_matrix;
2401 nCoeffs= s->block_last_index[n];
2404 block[0] = block[0] * s->y_dc_scale;
2406 block[0] = block[0] * s->c_dc_scale;
2407 /* XXX: only mpeg1 */
2408 quant_matrix = s->intra_matrix;
2409 for(i=1;i<=nCoeffs;i++) {
2410 int j= s->intra_scantable.permutated[i];
2415 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2416 level = (level - 1) | 1;
2419 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2420 level = (level - 1) | 1;
2427 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2428 DCTELEM *block, int n, int qscale)
2430 int i, level, nCoeffs;
2431 const uint16_t *quant_matrix;
2433 nCoeffs= s->block_last_index[n];
2435 quant_matrix = s->inter_matrix;
2436 for(i=0; i<=nCoeffs; i++) {
2437 int j= s->intra_scantable.permutated[i];
2442 level = (((level << 1) + 1) * qscale *
2443 ((int) (quant_matrix[j]))) >> 4;
2444 level = (level - 1) | 1;
2447 level = (((level << 1) + 1) * qscale *
2448 ((int) (quant_matrix[j]))) >> 4;
2449 level = (level - 1) | 1;
2456 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2457 DCTELEM *block, int n, int qscale)
2459 int i, level, nCoeffs;
2460 const uint16_t *quant_matrix;
2462 if(s->alternate_scan) nCoeffs= 63;
2463 else nCoeffs= s->block_last_index[n];
2466 block[0] = block[0] * s->y_dc_scale;
2468 block[0] = block[0] * s->c_dc_scale;
2469 quant_matrix = s->intra_matrix;
2470 for(i=1;i<=nCoeffs;i++) {
2471 int j= s->intra_scantable.permutated[i];
2476 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2479 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2486 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2487 DCTELEM *block, int n, int qscale)
2489 int i, level, nCoeffs;
2490 const uint16_t *quant_matrix;
2493 if(s->alternate_scan) nCoeffs= 63;
2494 else nCoeffs= s->block_last_index[n];
2497 block[0] = block[0] * s->y_dc_scale;
2499 block[0] = block[0] * s->c_dc_scale;
2500 quant_matrix = s->intra_matrix;
2501 for(i=1;i<=nCoeffs;i++) {
2502 int j= s->intra_scantable.permutated[i];
2507 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2510 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2519 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2520 DCTELEM *block, int n, int qscale)
2522 int i, level, nCoeffs;
2523 const uint16_t *quant_matrix;
2526 if(s->alternate_scan) nCoeffs= 63;
2527 else nCoeffs= s->block_last_index[n];
2529 quant_matrix = s->inter_matrix;
2530 for(i=0; i<=nCoeffs; i++) {
2531 int j= s->intra_scantable.permutated[i];
2536 level = (((level << 1) + 1) * qscale *
2537 ((int) (quant_matrix[j]))) >> 4;
2540 level = (((level << 1) + 1) * qscale *
2541 ((int) (quant_matrix[j]))) >> 4;
2550 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2551 DCTELEM *block, int n, int qscale)
2553 int i, level, qmul, qadd;
2556 assert(s->block_last_index[n]>=0);
2562 block[0] = block[0] * s->y_dc_scale;
2564 block[0] = block[0] * s->c_dc_scale;
2565 qadd = (qscale - 1) | 1;
2572 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2574 for(i=1; i<=nCoeffs; i++) {
2578 level = level * qmul - qadd;
2580 level = level * qmul + qadd;
2587 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2588 DCTELEM *block, int n, int qscale)
2590 int i, level, qmul, qadd;
2593 assert(s->block_last_index[n]>=0);
2595 qadd = (qscale - 1) | 1;
2598 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2600 for(i=0; i<=nCoeffs; i++) {
2604 level = level * qmul - qadd;
2606 level = level * qmul + qadd;
2614 * set qscale and update qscale dependent variables.
2616 void ff_set_qscale(MpegEncContext * s, int qscale)
2620 else if (qscale > 31)
2624 s->chroma_qscale= s->chroma_qscale_table[qscale];
2626 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2627 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2630 void MPV_report_decode_progress(MpegEncContext *s)
2632 if (s->pict_type != FF_B_TYPE && !s->partitioned_frame && !s->error_occurred)
2633 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);