2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32]={
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
74 const uint8_t ff_mpeg1_dc_scale_table[128]={
75 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 static const uint8_t mpeg2_dc_scale_table1[128]={
83 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128]={
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 static const uint8_t mpeg2_dc_scale_table3[128]={
99 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
107 ff_mpeg1_dc_scale_table,
108 mpeg2_dc_scale_table1,
109 mpeg2_dc_scale_table2,
110 mpeg2_dc_scale_table3,
113 const enum PixelFormat ff_pixfmt_list_420[] = {
118 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
125 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
133 uint32_t tmp= *state << 8;
134 *state= tmp + *(p++);
135 if(tmp == 0x100 || p==end)
140 if (p[-1] > 1 ) p+= 3;
141 else if(p[-2] ) p+= 2;
142 else if(p[-3]|(p[-1]-1)) p++;
155 /* init common dct for both encoder and decoder */
156 av_cold int ff_dct_common_init(MpegEncContext *s)
158 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
159 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
160 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
161 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
162 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
163 if(s->flags & CODEC_FLAG_BITEXACT)
164 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
165 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
168 MPV_common_init_mmx(s);
170 MPV_common_init_axp(s);
172 MPV_common_init_mlib(s);
174 MPV_common_init_mmi(s);
176 MPV_common_init_arm(s);
178 MPV_common_init_altivec(s);
180 MPV_common_init_bfin(s);
183 /* load & permutate scantables
184 note: only wmv uses different ones
186 if(s->alternate_scan){
187 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
190 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
194 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
199 void ff_copy_picture(Picture *dst, Picture *src){
201 dst->type= FF_BUFFER_TYPE_COPY;
205 * Release a frame buffer
207 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
209 ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
210 av_freep(&pic->hwaccel_picture_private);
214 * Allocate a frame buffer
216 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
220 if (s->avctx->hwaccel) {
221 assert(!pic->hwaccel_picture_private);
222 if (s->avctx->hwaccel->priv_data_size) {
223 pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
224 if (!pic->hwaccel_picture_private) {
225 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
231 r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
233 if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
234 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
235 av_freep(&pic->hwaccel_picture_private);
239 if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
240 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
241 free_frame_buffer(s, pic);
245 if (pic->linesize[1] != pic->linesize[2]) {
246 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
247 free_frame_buffer(s, pic);
255 * allocates a Picture
256 * The pixels are allocated/set by calling get_buffer() if shared=0
258 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
259 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
260 const int mb_array_size= s->mb_stride*s->mb_height;
261 const int b8_array_size= s->b8_stride*s->mb_height*2;
262 const int b4_array_size= s->b4_stride*s->mb_height*4;
267 assert(pic->data[0]);
268 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
269 pic->type= FF_BUFFER_TYPE_SHARED;
271 assert(!pic->data[0]);
273 if (alloc_frame_buffer(s, pic) < 0)
276 s->linesize = pic->linesize[0];
277 s->uvlinesize= pic->linesize[1];
280 if(pic->qscale_table==NULL){
282 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
283 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
284 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
287 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check
288 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table , mb_array_size * sizeof(uint8_t) , fail)
289 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
290 pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
291 if(s->out_format == FMT_H264){
293 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
294 pic->motion_val[i]= pic->motion_val_base[i]+4;
295 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
297 pic->motion_subsample_log2= 2;
298 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
300 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
301 pic->motion_val[i]= pic->motion_val_base[i]+4;
302 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
304 pic->motion_subsample_log2= 3;
306 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
307 FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
309 pic->qstride= s->mb_stride;
310 FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
313 /* It might be nicer if the application would keep track of these
314 * but it would require an API change. */
315 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
316 s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
317 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == AV_PICTURE_TYPE_B)
318 pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
322 fail: //for the FF_ALLOCZ_OR_GOTO macro
324 free_frame_buffer(s, pic);
329 * deallocates a picture
331 static void free_picture(MpegEncContext *s, Picture *pic){
334 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
335 free_frame_buffer(s, pic);
338 av_freep(&pic->mb_var);
339 av_freep(&pic->mc_mb_var);
340 av_freep(&pic->mb_mean);
341 av_freep(&pic->mbskip_table);
342 av_freep(&pic->qscale_table);
343 av_freep(&pic->mb_type_base);
344 av_freep(&pic->dct_coeff);
345 av_freep(&pic->pan_scan);
348 av_freep(&pic->motion_val_base[i]);
349 av_freep(&pic->ref_index[i]);
352 if(pic->type == FF_BUFFER_TYPE_SHARED){
361 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
362 int y_size = s->b8_stride * (2 * s->mb_height + 1);
363 int c_size = s->mb_stride * (s->mb_height + 1);
364 int yc_size = y_size + 2 * c_size;
367 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
368 FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
369 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
371 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
372 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
373 s->me.temp= s->me.scratchpad;
374 s->rd_scratchpad= s->me.scratchpad;
375 s->b_scratchpad= s->me.scratchpad;
376 s->obmc_scratchpad= s->me.scratchpad + 16;
378 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
379 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
380 if(s->avctx->noise_reduction){
381 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
384 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
385 s->block= s->blocks[0];
388 s->pblocks[i] = &s->block[i];
391 if (s->out_format == FMT_H263) {
393 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
394 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
395 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
396 s->ac_val[2] = s->ac_val[1] + c_size;
401 return -1; //free() through MPV_common_end()
404 static void free_duplicate_context(MpegEncContext *s){
407 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
408 av_freep(&s->me.scratchpad);
412 s->obmc_scratchpad= NULL;
414 av_freep(&s->dct_error_sum);
415 av_freep(&s->me.map);
416 av_freep(&s->me.score_map);
417 av_freep(&s->blocks);
418 av_freep(&s->ac_val_base);
422 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
423 #define COPY(a) bak->a= src->a
424 COPY(allocated_edge_emu_buffer);
425 COPY(edge_emu_buffer);
430 COPY(obmc_scratchpad);
437 COPY(me.map_generation);
449 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
452 //FIXME copy only needed parts
454 backup_duplicate_context(&bak, dst);
455 memcpy(dst, src, sizeof(MpegEncContext));
456 backup_duplicate_context(dst, &bak);
458 dst->pblocks[i] = &dst->block[i];
460 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
463 int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
465 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
467 if(dst == src || !s1->context_initialized) return 0;
469 //FIXME can parameters change on I-frames? in that case dst may need a reinit
470 if(!s->context_initialized){
471 memcpy(s, s1, sizeof(MpegEncContext));
474 s->picture_range_start += MAX_PICTURE_COUNT;
475 s->picture_range_end += MAX_PICTURE_COUNT;
476 s->bitstream_buffer = NULL;
477 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
482 s->avctx->coded_height = s1->avctx->coded_height;
483 s->avctx->coded_width = s1->avctx->coded_width;
484 s->avctx->width = s1->avctx->width;
485 s->avctx->height = s1->avctx->height;
487 s->coded_picture_number = s1->coded_picture_number;
488 s->picture_number = s1->picture_number;
489 s->input_picture_number = s1->input_picture_number;
491 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
492 memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture);
494 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
495 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
496 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
498 memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
500 //Error/bug resilience
501 s->next_p_frame_damaged = s1->next_p_frame_damaged;
502 s->workaround_bugs = s1->workaround_bugs;
505 memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits);
508 s->max_b_frames = s1->max_b_frames;
509 s->low_delay = s1->low_delay;
510 s->dropable = s1->dropable;
512 //DivX handling (doesn't work)
513 s->divx_packed = s1->divx_packed;
515 if(s1->bitstream_buffer){
516 if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
517 av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size);
518 s->bitstream_buffer_size = s1->bitstream_buffer_size;
519 memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size);
520 memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
523 //MPEG2/interlacing info
524 memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence);
526 if(!s1->first_field){
527 s->last_pict_type= s1->pict_type;
528 if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->quality;
530 if(s1->pict_type!=AV_PICTURE_TYPE_B){
531 s->last_non_b_pict_type= s1->pict_type;
539 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
540 * the changed fields will not depend upon the prior state of the MpegEncContext.
542 void MPV_common_defaults(MpegEncContext *s){
544 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
545 s->chroma_qscale_table= ff_default_chroma_qscale_table;
546 s->progressive_frame= 1;
547 s->progressive_sequence= 1;
548 s->picture_structure= PICT_FRAME;
550 s->coded_picture_number = 0;
551 s->picture_number = 0;
552 s->input_picture_number = 0;
554 s->picture_in_gop_number = 0;
559 s->picture_range_start = 0;
560 s->picture_range_end = MAX_PICTURE_COUNT;
564 * sets the given MpegEncContext to defaults for decoding.
565 * the changed fields will not depend upon the prior state of the MpegEncContext.
567 void MPV_decode_defaults(MpegEncContext *s){
568 MPV_common_defaults(s);
572 * init common structure for both encoder and decoder.
573 * this assumes that some variables like width/height are already set
575 av_cold int MPV_common_init(MpegEncContext *s)
577 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
579 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
580 s->mb_height = (s->height + 31) / 32 * 2;
581 else if (s->codec_id != CODEC_ID_H264)
582 s->mb_height = (s->height + 15) / 16;
584 if(s->avctx->pix_fmt == PIX_FMT_NONE){
585 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
589 if(s->avctx->active_thread_type&FF_THREAD_SLICE &&
590 (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
591 av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
595 if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
598 dsputil_init(&s->dsp, s->avctx);
599 ff_dct_common_init(s);
601 s->flags= s->avctx->flags;
602 s->flags2= s->avctx->flags2;
604 s->mb_width = (s->width + 15) / 16;
605 s->mb_stride = s->mb_width + 1;
606 s->b8_stride = s->mb_width*2 + 1;
607 s->b4_stride = s->mb_width*4 + 1;
608 mb_array_size= s->mb_height * s->mb_stride;
609 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
611 /* set chroma shifts */
612 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
613 &(s->chroma_y_shift) );
615 /* set default edge pos, will be overriden in decode_header if needed */
616 s->h_edge_pos= s->mb_width*16;
617 s->v_edge_pos= s->mb_height*16;
619 s->mb_num = s->mb_width * s->mb_height;
624 s->block_wrap[3]= s->b8_stride;
626 s->block_wrap[5]= s->mb_stride;
628 y_size = s->b8_stride * (2 * s->mb_height + 1);
629 c_size = s->mb_stride * (s->mb_height + 1);
630 yc_size = y_size + 2 * c_size;
632 /* convert fourcc to upper case */
633 s->codec_tag = ff_toupper4(s->avctx->codec_tag);
635 s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
637 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
639 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
640 for(y=0; y<s->mb_height; y++){
641 for(x=0; x<s->mb_width; x++){
642 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
645 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
648 /* Allocate MV tables */
649 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
650 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
651 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
652 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
653 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
654 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
655 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
656 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
657 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
658 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
659 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
660 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
662 if(s->msmpeg4_version){
663 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
665 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
667 /* Allocate MB type table */
668 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
670 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
672 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
673 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
674 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
675 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
676 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
677 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
679 if(s->avctx->noise_reduction){
680 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
683 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
684 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
685 for(i = 0; i < s->picture_count; i++) {
686 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
689 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
691 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
692 /* interlaced direct mode decoding tables */
697 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
698 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
700 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
701 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
702 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
704 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
707 if (s->out_format == FMT_H263) {
709 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
710 s->coded_block= s->coded_block_base + s->b8_stride + 1;
712 /* cbp, ac_pred, pred_dir */
713 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
714 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
717 if (s->h263_pred || s->h263_plus || !s->encoding) {
719 //MN: we need these for error resilience of intra-frames
720 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
721 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
722 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
723 s->dc_val[2] = s->dc_val[1] + c_size;
724 for(i=0;i<yc_size;i++)
725 s->dc_val_base[i] = 1024;
728 /* which mb is a intra block */
729 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
730 memset(s->mbintra_table, 1, mb_array_size);
732 /* init macroblock skip table */
733 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
734 //Note the +1 is for a quicker mpeg4 slice_end detection
735 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
737 s->parse_context.state= -1;
738 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
739 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
740 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
741 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
744 s->context_initialized = 1;
745 s->thread_context[0]= s;
747 if (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE) {
748 threads = s->avctx->thread_count;
750 for(i=1; i<threads; i++){
751 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
752 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
755 for(i=0; i<threads; i++){
756 if(init_duplicate_context(s->thread_context[i], s) < 0)
758 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
759 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
762 if(init_duplicate_context(s, s) < 0) goto fail;
764 s->end_mb_y = s->mb_height;
773 /* init common structure for both encoder and decoder */
774 void MPV_common_end(MpegEncContext *s)
778 if (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE) {
779 for(i=0; i<s->avctx->thread_count; i++){
780 free_duplicate_context(s->thread_context[i]);
782 for(i=1; i<s->avctx->thread_count; i++){
783 av_freep(&s->thread_context[i]);
785 } else free_duplicate_context(s);
787 av_freep(&s->parse_context.buffer);
788 s->parse_context.buffer_size=0;
790 av_freep(&s->mb_type);
791 av_freep(&s->p_mv_table_base);
792 av_freep(&s->b_forw_mv_table_base);
793 av_freep(&s->b_back_mv_table_base);
794 av_freep(&s->b_bidir_forw_mv_table_base);
795 av_freep(&s->b_bidir_back_mv_table_base);
796 av_freep(&s->b_direct_mv_table_base);
798 s->b_forw_mv_table= NULL;
799 s->b_back_mv_table= NULL;
800 s->b_bidir_forw_mv_table= NULL;
801 s->b_bidir_back_mv_table= NULL;
802 s->b_direct_mv_table= NULL;
806 av_freep(&s->b_field_mv_table_base[i][j][k]);
807 s->b_field_mv_table[i][j][k]=NULL;
809 av_freep(&s->b_field_select_table[i][j]);
810 av_freep(&s->p_field_mv_table_base[i][j]);
811 s->p_field_mv_table[i][j]=NULL;
813 av_freep(&s->p_field_select_table[i]);
816 av_freep(&s->dc_val_base);
817 av_freep(&s->coded_block_base);
818 av_freep(&s->mbintra_table);
819 av_freep(&s->cbp_table);
820 av_freep(&s->pred_dir_table);
822 av_freep(&s->mbskip_table);
823 av_freep(&s->prev_pict_types);
824 av_freep(&s->bitstream_buffer);
825 s->allocated_bitstream_buffer_size=0;
827 av_freep(&s->avctx->stats_out);
828 av_freep(&s->ac_stats);
829 av_freep(&s->error_status_table);
830 av_freep(&s->mb_index2xy);
831 av_freep(&s->lambda_table);
832 av_freep(&s->q_intra_matrix);
833 av_freep(&s->q_inter_matrix);
834 av_freep(&s->q_intra_matrix16);
835 av_freep(&s->q_inter_matrix16);
836 av_freep(&s->input_picture);
837 av_freep(&s->reordered_input_picture);
838 av_freep(&s->dct_offset);
840 if(s->picture && !s->avctx->is_copy){
841 for(i=0; i<s->picture_count; i++){
842 free_picture(s, &s->picture[i]);
845 av_freep(&s->picture);
846 s->context_initialized = 0;
849 s->current_picture_ptr= NULL;
850 s->linesize= s->uvlinesize= 0;
853 av_freep(&s->visualization_buffer[i]);
855 if(!(s->avctx->active_thread_type&FF_THREAD_FRAME))
856 avcodec_default_free_buffers(s->avctx);
859 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
861 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
862 uint8_t index_run[MAX_RUN+1];
863 int last, run, level, start, end, i;
865 /* If table is static, we can quit if rl->max_level[0] is not NULL */
866 if(static_store && rl->max_level[0])
869 /* compute max_level[], max_run[] and index_run[] */
870 for(last=0;last<2;last++) {
879 memset(max_level, 0, MAX_RUN + 1);
880 memset(max_run, 0, MAX_LEVEL + 1);
881 memset(index_run, rl->n, MAX_RUN + 1);
882 for(i=start;i<end;i++) {
883 run = rl->table_run[i];
884 level = rl->table_level[i];
885 if (index_run[run] == rl->n)
887 if (level > max_level[run])
888 max_level[run] = level;
889 if (run > max_run[level])
890 max_run[level] = run;
893 rl->max_level[last] = static_store[last];
895 rl->max_level[last] = av_malloc(MAX_RUN + 1);
896 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
898 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
900 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
901 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
903 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
905 rl->index_run[last] = av_malloc(MAX_RUN + 1);
906 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
910 void init_vlc_rl(RLTable *rl)
922 for(i=0; i<rl->vlc.table_size; i++){
923 int code= rl->vlc.table[i][0];
924 int len = rl->vlc.table[i][1];
927 if(len==0){ // illegal code
930 }else if(len<0){ //more bits needed
934 if(code==rl->n){ //esc
938 run= rl->table_run [code] + 1;
939 level= rl->table_level[code] * qmul + qadd;
940 if(code >= rl->last) run+=192;
943 rl->rl_vlc[q][i].len= len;
944 rl->rl_vlc[q][i].level= level;
945 rl->rl_vlc[q][i].run= run;
950 void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
954 /* release non reference frames */
955 for(i=0; i<s->picture_count; i++){
956 if(s->picture[i].data[0] && !s->picture[i].reference
957 && s->picture[i].owner2 == s
958 && (remove_current || &s->picture[i] != s->current_picture_ptr)
959 /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
960 free_frame_buffer(s, &s->picture[i]);
965 int ff_find_unused_picture(MpegEncContext *s, int shared){
969 for(i=s->picture_range_start; i<s->picture_range_end; i++){
970 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
973 for(i=s->picture_range_start; i<s->picture_range_end; i++){
974 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
976 for(i=s->picture_range_start; i<s->picture_range_end; i++){
977 if(s->picture[i].data[0]==NULL) return i;
981 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
982 /* We could return -1, but the codec would crash trying to draw into a
983 * non-existing frame anyway. This is safer than waiting for a random crash.
984 * Also the return of this is never useful, an encoder must only allocate
985 * as much as allowed in the specification. This has no relationship to how
986 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
987 * enough for such valid streams).
988 * Plus, a decoder has to check stream validity and remove frames if too
989 * many reference frames are around. Waiting for "OOM" is not correct at
990 * all. Similarly, missing reference frames have to be replaced by
991 * interpolated/MC frames, anything else is a bug in the codec ...
997 static void update_noise_reduction(MpegEncContext *s){
1000 for(intra=0; intra<2; intra++){
1001 if(s->dct_count[intra] > (1<<16)){
1002 for(i=0; i<64; i++){
1003 s->dct_error_sum[intra][i] >>=1;
1005 s->dct_count[intra] >>= 1;
1008 for(i=0; i<64; i++){
1009 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1015 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1017 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1023 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1025 /* mark&release old frames */
1026 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
1027 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1028 free_frame_buffer(s, s->last_picture_ptr);
1030 /* release forgotten pictures */
1031 /* if(mpeg124/h263) */
1033 for(i=0; i<s->picture_count; i++){
1034 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
1035 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1036 free_frame_buffer(s, &s->picture[i]);
1044 ff_release_unused_pictures(s, 1);
1046 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
1047 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
1049 i= ff_find_unused_picture(s, 0);
1050 pic= &s->picture[i];
1055 if (s->codec_id == CODEC_ID_H264)
1056 pic->reference = s->picture_structure;
1057 else if (s->pict_type != AV_PICTURE_TYPE_B)
1061 pic->coded_picture_number= s->coded_picture_number++;
1063 if(ff_alloc_picture(s, pic, 0) < 0)
1066 s->current_picture_ptr= pic;
1067 //FIXME use only the vars from current_pic
1068 s->current_picture_ptr->top_field_first= s->top_field_first;
1069 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1070 if(s->picture_structure != PICT_FRAME)
1071 s->current_picture_ptr->top_field_first= (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1073 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
1074 s->current_picture_ptr->field_picture= s->picture_structure != PICT_FRAME;
1077 s->current_picture_ptr->pict_type= s->pict_type;
1078 // if(s->flags && CODEC_FLAG_QSCALE)
1079 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1080 s->current_picture_ptr->key_frame= s->pict_type == AV_PICTURE_TYPE_I;
1082 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1084 if (s->pict_type != AV_PICTURE_TYPE_B) {
1085 s->last_picture_ptr= s->next_picture_ptr;
1087 s->next_picture_ptr= s->current_picture_ptr;
1089 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1090 s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
1091 s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
1092 s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
1093 s->pict_type, s->dropable);*/
1095 if(s->codec_id != CODEC_ID_H264){
1096 if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && s->pict_type!=AV_PICTURE_TYPE_I){
1097 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1098 /* Allocate a dummy frame */
1099 i= ff_find_unused_picture(s, 0);
1100 s->last_picture_ptr= &s->picture[i];
1101 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1103 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
1104 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
1106 if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==AV_PICTURE_TYPE_B){
1107 /* Allocate a dummy frame */
1108 i= ff_find_unused_picture(s, 0);
1109 s->next_picture_ptr= &s->picture[i];
1110 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1112 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
1113 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
1117 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1118 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1120 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1122 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1125 if(s->picture_structure == PICT_BOTTOM_FIELD){
1126 s->current_picture.data[i] += s->current_picture.linesize[i];
1128 s->current_picture.linesize[i] *= 2;
1129 s->last_picture.linesize[i] *=2;
1130 s->next_picture.linesize[i] *=2;
1134 s->error_recognition= avctx->error_recognition;
1136 /* set dequantizer, we can't do it during init as it might change for mpeg4
1137 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1138 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1139 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1140 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1141 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1142 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1143 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1145 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1146 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1149 if(s->dct_error_sum){
1150 assert(s->avctx->noise_reduction && s->encoding);
1152 update_noise_reduction(s);
1155 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1156 return ff_xvmc_field_start(s, avctx);
1161 /* generic function for encode/decode called after a frame has been coded/decoded */
1162 void MPV_frame_end(MpegEncContext *s)
1165 /* redraw edges for the frame if decoding didn't complete */
1166 //just to make sure that all data is rendered.
1167 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1168 ff_xvmc_field_end(s);
1169 }else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND))
1170 && !s->avctx->hwaccel
1171 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1172 && s->unrestricted_mv
1173 && s->current_picture.reference
1175 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1176 int edges = EDGE_BOTTOM | EDGE_TOP, h = s->v_edge_pos;
1178 s->dsp.draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , h , EDGE_WIDTH , edges);
1179 s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, h>>1, EDGE_WIDTH/2, edges);
1180 s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, h>>1, EDGE_WIDTH/2, edges);
1186 s->last_pict_type = s->pict_type;
1187 s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
1188 if(s->pict_type!=AV_PICTURE_TYPE_B){
1189 s->last_non_b_pict_type= s->pict_type;
1192 /* copy back current_picture variables */
1193 for(i=0; i<MAX_PICTURE_COUNT; i++){
1194 if(s->picture[i].data[0] == s->current_picture.data[0]){
1195 s->picture[i]= s->current_picture;
1199 assert(i<MAX_PICTURE_COUNT);
1203 /* release non-reference frames */
1204 for(i=0; i<s->picture_count; i++){
1205 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1206 free_frame_buffer(s, &s->picture[i]);
1210 // clear copies, to avoid confusion
1212 memset(&s->last_picture, 0, sizeof(Picture));
1213 memset(&s->next_picture, 0, sizeof(Picture));
1214 memset(&s->current_picture, 0, sizeof(Picture));
1216 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1218 if (s->codec_id != CODEC_ID_H264 && s->current_picture.reference) {
1219 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
1224 * draws an line from (ex, ey) -> (sx, sy).
1225 * @param w width of the image
1226 * @param h height of the image
1227 * @param stride stride/linesize of the image
1228 * @param color color of the arrow
1230 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1233 sx= av_clip(sx, 0, w-1);
1234 sy= av_clip(sy, 0, h-1);
1235 ex= av_clip(ex, 0, w-1);
1236 ey= av_clip(ey, 0, h-1);
1238 buf[sy*stride + sx]+= color;
1240 if(FFABS(ex - sx) > FFABS(ey - sy)){
1242 FFSWAP(int, sx, ex);
1243 FFSWAP(int, sy, ey);
1245 buf+= sx + sy*stride;
1247 f= ((ey-sy)<<16)/ex;
1248 for(x= 0; x <= ex; x++){
1251 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1252 buf[(y+1)*stride + x]+= (color* fr )>>16;
1256 FFSWAP(int, sx, ex);
1257 FFSWAP(int, sy, ey);
1259 buf+= sx + sy*stride;
1261 if(ey) f= ((ex-sx)<<16)/ey;
1263 for(y= 0; y <= ey; y++){
1266 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1267 buf[y*stride + x+1]+= (color* fr )>>16;
1273 * draws an arrow from (ex, ey) -> (sx, sy).
1274 * @param w width of the image
1275 * @param h height of the image
1276 * @param stride stride/linesize of the image
1277 * @param color color of the arrow
1279 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1282 sx= av_clip(sx, -100, w+100);
1283 sy= av_clip(sy, -100, h+100);
1284 ex= av_clip(ex, -100, w+100);
1285 ey= av_clip(ey, -100, h+100);
1290 if(dx*dx + dy*dy > 3*3){
1293 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1295 //FIXME subpixel accuracy
1296 rx= ROUNDED_DIV(rx*3<<4, length);
1297 ry= ROUNDED_DIV(ry*3<<4, length);
1299 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1300 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1302 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1306 * prints debuging info for the given picture.
1308 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1310 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1312 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1315 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1316 switch (pict->pict_type) {
1317 case AV_PICTURE_TYPE_I: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1318 case AV_PICTURE_TYPE_P: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1319 case AV_PICTURE_TYPE_B: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1320 case AV_PICTURE_TYPE_S: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1321 case AV_PICTURE_TYPE_SI: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1322 case AV_PICTURE_TYPE_SP: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1324 for(y=0; y<s->mb_height; y++){
1325 for(x=0; x<s->mb_width; x++){
1326 if(s->avctx->debug&FF_DEBUG_SKIP){
1327 int count= s->mbskip_table[x + y*s->mb_stride];
1328 if(count>9) count=9;
1329 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1331 if(s->avctx->debug&FF_DEBUG_QP){
1332 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1334 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1335 int mb_type= pict->mb_type[x + y*s->mb_stride];
1336 //Type & MV direction
1338 av_log(s->avctx, AV_LOG_DEBUG, "P");
1339 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1340 av_log(s->avctx, AV_LOG_DEBUG, "A");
1341 else if(IS_INTRA4x4(mb_type))
1342 av_log(s->avctx, AV_LOG_DEBUG, "i");
1343 else if(IS_INTRA16x16(mb_type))
1344 av_log(s->avctx, AV_LOG_DEBUG, "I");
1345 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1346 av_log(s->avctx, AV_LOG_DEBUG, "d");
1347 else if(IS_DIRECT(mb_type))
1348 av_log(s->avctx, AV_LOG_DEBUG, "D");
1349 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1350 av_log(s->avctx, AV_LOG_DEBUG, "g");
1351 else if(IS_GMC(mb_type))
1352 av_log(s->avctx, AV_LOG_DEBUG, "G");
1353 else if(IS_SKIP(mb_type))
1354 av_log(s->avctx, AV_LOG_DEBUG, "S");
1355 else if(!USES_LIST(mb_type, 1))
1356 av_log(s->avctx, AV_LOG_DEBUG, ">");
1357 else if(!USES_LIST(mb_type, 0))
1358 av_log(s->avctx, AV_LOG_DEBUG, "<");
1360 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1361 av_log(s->avctx, AV_LOG_DEBUG, "X");
1366 av_log(s->avctx, AV_LOG_DEBUG, "+");
1367 else if(IS_16X8(mb_type))
1368 av_log(s->avctx, AV_LOG_DEBUG, "-");
1369 else if(IS_8X16(mb_type))
1370 av_log(s->avctx, AV_LOG_DEBUG, "|");
1371 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1372 av_log(s->avctx, AV_LOG_DEBUG, " ");
1374 av_log(s->avctx, AV_LOG_DEBUG, "?");
1377 if(IS_INTERLACED(mb_type))
1378 av_log(s->avctx, AV_LOG_DEBUG, "=");
1380 av_log(s->avctx, AV_LOG_DEBUG, " ");
1382 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1384 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1388 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1389 const int shift= 1 + s->quarter_sample;
1393 int h_chroma_shift, v_chroma_shift, block_height;
1394 const int width = s->avctx->width;
1395 const int height= s->avctx->height;
1396 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1397 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1398 s->low_delay=0; //needed to see the vectors without trashing the buffers
1400 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1402 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1403 pict->data[i]= s->visualization_buffer[i];
1405 pict->type= FF_BUFFER_TYPE_COPY;
1407 block_height = 16>>v_chroma_shift;
1409 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1411 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1412 const int mb_index= mb_x + mb_y*s->mb_stride;
1413 if((s->avctx->debug_mv) && pict->motion_val){
1415 for(type=0; type<3; type++){
1418 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
1422 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1426 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1431 if(!USES_LIST(pict->mb_type[mb_index], direction))
1434 if(IS_8X8(pict->mb_type[mb_index])){
1437 int sx= mb_x*16 + 4 + 8*(i&1);
1438 int sy= mb_y*16 + 4 + 8*(i>>1);
1439 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1440 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1441 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1442 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1444 }else if(IS_16X8(pict->mb_type[mb_index])){
1448 int sy=mb_y*16 + 4 + 8*i;
1449 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1450 int mx=(pict->motion_val[direction][xy][0]>>shift);
1451 int my=(pict->motion_val[direction][xy][1]>>shift);
1453 if(IS_INTERLACED(pict->mb_type[mb_index]))
1456 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1458 }else if(IS_8X16(pict->mb_type[mb_index])){
1461 int sx=mb_x*16 + 4 + 8*i;
1463 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1464 int mx=(pict->motion_val[direction][xy][0]>>shift);
1465 int my=(pict->motion_val[direction][xy][1]>>shift);
1467 if(IS_INTERLACED(pict->mb_type[mb_index]))
1470 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1473 int sx= mb_x*16 + 8;
1474 int sy= mb_y*16 + 8;
1475 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1476 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1477 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1478 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1482 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1483 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1485 for(y=0; y<block_height; y++){
1486 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1487 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1490 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1491 int mb_type= pict->mb_type[mb_index];
1494 #define COLOR(theta, r)\
1495 u= (int)(128 + r*cos(theta*3.141592/180));\
1496 v= (int)(128 + r*sin(theta*3.141592/180));
1500 if(IS_PCM(mb_type)){
1502 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1504 }else if(IS_INTRA4x4(mb_type)){
1506 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1508 }else if(IS_DIRECT(mb_type)){
1510 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1512 }else if(IS_GMC(mb_type)){
1514 }else if(IS_SKIP(mb_type)){
1516 }else if(!USES_LIST(mb_type, 1)){
1518 }else if(!USES_LIST(mb_type, 0)){
1521 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1525 u*= 0x0101010101010101ULL;
1526 v*= 0x0101010101010101ULL;
1527 for(y=0; y<block_height; y++){
1528 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1529 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1533 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1534 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1535 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1537 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1539 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1541 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1542 int dm= 1 << (mv_sample_log2-2);
1544 int sx= mb_x*16 + 8*(i&1);
1545 int sy= mb_y*16 + 8*(i>>1);
1546 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1548 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1549 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1551 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1552 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1553 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1557 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1561 s->mbskip_table[mb_index]=0;
1567 static inline int hpel_motion_lowres(MpegEncContext *s,
1568 uint8_t *dest, uint8_t *src,
1569 int field_based, int field_select,
1570 int src_x, int src_y,
1571 int width, int height, int stride,
1572 int h_edge_pos, int v_edge_pos,
1573 int w, int h, h264_chroma_mc_func *pix_op,
1574 int motion_x, int motion_y)
1576 const int lowres= s->avctx->lowres;
1577 const int op_index= FFMIN(lowres, 2);
1578 const int s_mask= (2<<lowres)-1;
1582 if(s->quarter_sample){
1587 sx= motion_x & s_mask;
1588 sy= motion_y & s_mask;
1589 src_x += motion_x >> (lowres+1);
1590 src_y += motion_y >> (lowres+1);
1592 src += src_y * stride + src_x;
1594 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1595 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1596 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1597 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1598 src= s->edge_emu_buffer;
1602 sx= (sx << 2) >> lowres;
1603 sy= (sy << 2) >> lowres;
1606 pix_op[op_index](dest, src, stride, h, sx, sy);
1610 /* apply one mpeg motion vector to the three components */
1611 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1612 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1613 int field_based, int bottom_field, int field_select,
1614 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1615 int motion_x, int motion_y, int h, int mb_y)
1617 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1618 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1619 const int lowres= s->avctx->lowres;
1620 const int op_index= FFMIN(lowres, 2);
1621 const int block_s= 8>>lowres;
1622 const int s_mask= (2<<lowres)-1;
1623 const int h_edge_pos = s->h_edge_pos >> lowres;
1624 const int v_edge_pos = s->v_edge_pos >> lowres;
1625 linesize = s->current_picture.linesize[0] << field_based;
1626 uvlinesize = s->current_picture.linesize[1] << field_based;
1628 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1634 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1637 sx= motion_x & s_mask;
1638 sy= motion_y & s_mask;
1639 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1640 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1642 if (s->out_format == FMT_H263) {
1643 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1644 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1647 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1650 uvsx = (2*mx) & s_mask;
1651 uvsy = (2*my) & s_mask;
1652 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1653 uvsrc_y = mb_y*block_s + (my >> lowres);
1659 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1660 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1663 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1664 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1665 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1667 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1668 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1669 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1670 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1671 ptr_y = s->edge_emu_buffer;
1672 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1673 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1674 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1675 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1676 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1677 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1683 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
1684 dest_y += s->linesize;
1685 dest_cb+= s->uvlinesize;
1686 dest_cr+= s->uvlinesize;
1690 ptr_y += s->linesize;
1691 ptr_cb+= s->uvlinesize;
1692 ptr_cr+= s->uvlinesize;
1695 sx= (sx << 2) >> lowres;
1696 sy= (sy << 2) >> lowres;
1697 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1699 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1700 uvsx= (uvsx << 2) >> lowres;
1701 uvsy= (uvsy << 2) >> lowres;
1702 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1703 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1705 //FIXME h261 lowres loop filter
1708 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1709 uint8_t *dest_cb, uint8_t *dest_cr,
1710 uint8_t **ref_picture,
1711 h264_chroma_mc_func *pix_op,
1713 const int lowres= s->avctx->lowres;
1714 const int op_index= FFMIN(lowres, 2);
1715 const int block_s= 8>>lowres;
1716 const int s_mask= (2<<lowres)-1;
1717 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1718 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1719 int emu=0, src_x, src_y, offset, sx, sy;
1722 if(s->quarter_sample){
1727 /* In case of 8X8, we construct a single chroma motion vector
1728 with a special rounding */
1729 mx= ff_h263_round_chroma(mx);
1730 my= ff_h263_round_chroma(my);
1734 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1735 src_y = s->mb_y*block_s + (my >> (lowres+1));
1737 offset = src_y * s->uvlinesize + src_x;
1738 ptr = ref_picture[1] + offset;
1739 if(s->flags&CODEC_FLAG_EMU_EDGE){
1740 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1741 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1742 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1743 ptr= s->edge_emu_buffer;
1747 sx= (sx << 2) >> lowres;
1748 sy= (sy << 2) >> lowres;
1749 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1751 ptr = ref_picture[2] + offset;
1753 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1754 ptr= s->edge_emu_buffer;
1756 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1760 * motion compensation of a single macroblock
1762 * @param dest_y luma destination pointer
1763 * @param dest_cb chroma cb/u destination pointer
1764 * @param dest_cr chroma cr/v destination pointer
1765 * @param dir direction (0->forward, 1->backward)
1766 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1767 * @param pix_op halfpel motion compensation function (average or put normally)
1768 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1770 static inline void MPV_motion_lowres(MpegEncContext *s,
1771 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1772 int dir, uint8_t **ref_picture,
1773 h264_chroma_mc_func *pix_op)
1777 const int lowres= s->avctx->lowres;
1778 const int block_s= 8>>lowres;
1783 switch(s->mv_type) {
1785 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1787 ref_picture, pix_op,
1788 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1794 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1795 ref_picture[0], 0, 0,
1796 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1797 s->width, s->height, s->linesize,
1798 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1799 block_s, block_s, pix_op,
1800 s->mv[dir][i][0], s->mv[dir][i][1]);
1802 mx += s->mv[dir][i][0];
1803 my += s->mv[dir][i][1];
1806 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1807 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1810 if (s->picture_structure == PICT_FRAME) {
1812 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1813 1, 0, s->field_select[dir][0],
1814 ref_picture, pix_op,
1815 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1817 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1818 1, 1, s->field_select[dir][1],
1819 ref_picture, pix_op,
1820 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1822 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
1823 ref_picture= s->current_picture_ptr->data;
1826 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1827 0, 0, s->field_select[dir][0],
1828 ref_picture, pix_op,
1829 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1834 uint8_t ** ref2picture;
1836 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
1837 ref2picture= ref_picture;
1839 ref2picture= s->current_picture_ptr->data;
1842 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1843 0, 0, s->field_select[dir][i],
1844 ref2picture, pix_op,
1845 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1847 dest_y += 2*block_s*s->linesize;
1848 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1849 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1853 if(s->picture_structure == PICT_FRAME){
1857 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1859 ref_picture, pix_op,
1860 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1862 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1866 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1867 0, 0, s->picture_structure != i+1,
1868 ref_picture, pix_op,
1869 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1871 // after put we make avg of the same block
1872 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1874 //opposite parity is always in the same frame if this is second field
1875 if(!s->first_field){
1876 ref_picture = s->current_picture_ptr->data;
1886 * find the lowest MB row referenced in the MVs
1888 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1890 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1891 int my, off, i, mvs;
1893 if (s->picture_structure != PICT_FRAME) goto unhandled;
1895 switch (s->mv_type) {
1909 for (i = 0; i < mvs; i++) {
1910 my = s->mv[dir][i][1]<<qpel_shift;
1911 my_max = FFMAX(my_max, my);
1912 my_min = FFMIN(my_min, my);
1915 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1917 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1919 return s->mb_height-1;
1922 /* put block[] to dest[] */
1923 static inline void put_dct(MpegEncContext *s,
1924 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1926 s->dct_unquantize_intra(s, block, i, qscale);
1927 s->dsp.idct_put (dest, line_size, block);
1930 /* add block[] to dest[] */
1931 static inline void add_dct(MpegEncContext *s,
1932 DCTELEM *block, int i, uint8_t *dest, int line_size)
1934 if (s->block_last_index[i] >= 0) {
1935 s->dsp.idct_add (dest, line_size, block);
1939 static inline void add_dequant_dct(MpegEncContext *s,
1940 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1942 if (s->block_last_index[i] >= 0) {
1943 s->dct_unquantize_inter(s, block, i, qscale);
1945 s->dsp.idct_add (dest, line_size, block);
1950 * cleans dc, ac, coded_block for the current non intra MB
1952 void ff_clean_intra_table_entries(MpegEncContext *s)
1954 int wrap = s->b8_stride;
1955 int xy = s->block_index[0];
1958 s->dc_val[0][xy + 1 ] =
1959 s->dc_val[0][xy + wrap] =
1960 s->dc_val[0][xy + 1 + wrap] = 1024;
1962 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1963 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1964 if (s->msmpeg4_version>=3) {
1965 s->coded_block[xy ] =
1966 s->coded_block[xy + 1 ] =
1967 s->coded_block[xy + wrap] =
1968 s->coded_block[xy + 1 + wrap] = 0;
1971 wrap = s->mb_stride;
1972 xy = s->mb_x + s->mb_y * wrap;
1974 s->dc_val[2][xy] = 1024;
1976 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1977 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1979 s->mbintra_table[xy]= 0;
1982 /* generic function called after a macroblock has been parsed by the
1983 decoder or after it has been encoded by the encoder.
1985 Important variables used:
1986 s->mb_intra : true if intra macroblock
1987 s->mv_dir : motion vector direction
1988 s->mv_type : motion vector type
1989 s->mv : motion vector
1990 s->interlaced_dct : true if interlaced dct used (mpeg2)
1992 static av_always_inline
1993 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
1994 int lowres_flag, int is_mpeg12)
1996 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1997 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1998 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2002 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2003 /* save DCT coefficients */
2005 DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
2006 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2008 for(j=0; j<64; j++){
2009 *dct++ = block[i][s->dsp.idct_permutation[j]];
2010 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2012 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2016 s->current_picture.qscale_table[mb_xy]= s->qscale;
2018 /* update DC predictors for P macroblocks */
2020 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2021 if(s->mbintra_table[mb_xy])
2022 ff_clean_intra_table_entries(s);
2026 s->last_dc[2] = 128 << s->intra_dc_precision;
2029 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2030 s->mbintra_table[mb_xy]=1;
2032 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2033 uint8_t *dest_y, *dest_cb, *dest_cr;
2034 int dct_linesize, dct_offset;
2035 op_pixels_func (*op_pix)[4];
2036 qpel_mc_func (*op_qpix)[16];
2037 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2038 const int uvlinesize= s->current_picture.linesize[1];
2039 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2040 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2042 /* avoid copy if macroblock skipped in last frame too */
2043 /* skip only during decoding as we might trash the buffers during encoding a bit */
2045 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2046 const int age= s->current_picture.age;
2050 if (s->mb_skipped) {
2052 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2054 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
2055 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2057 /* if previous was skipped too, then nothing to do ! */
2058 if (*mbskip_ptr >= age && s->current_picture.reference){
2061 } else if(!s->current_picture.reference){
2062 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2063 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2065 *mbskip_ptr = 0; /* not skipped */
2069 dct_linesize = linesize << s->interlaced_dct;
2070 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
2074 dest_cb= s->dest[1];
2075 dest_cr= s->dest[2];
2077 dest_y = s->b_scratchpad;
2078 dest_cb= s->b_scratchpad+16*linesize;
2079 dest_cr= s->b_scratchpad+32*linesize;
2083 /* motion handling */
2084 /* decoding or more than one mb_type (MC was already done otherwise) */
2087 if(HAVE_PTHREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2088 if (s->mv_dir & MV_DIR_FORWARD) {
2089 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
2091 if (s->mv_dir & MV_DIR_BACKWARD) {
2092 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
2097 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2099 if (s->mv_dir & MV_DIR_FORWARD) {
2100 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
2101 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2103 if (s->mv_dir & MV_DIR_BACKWARD) {
2104 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
2107 op_qpix= s->me.qpel_put;
2108 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2109 op_pix = s->dsp.put_pixels_tab;
2111 op_pix = s->dsp.put_no_rnd_pixels_tab;
2113 if (s->mv_dir & MV_DIR_FORWARD) {
2114 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2115 op_pix = s->dsp.avg_pixels_tab;
2116 op_qpix= s->me.qpel_avg;
2118 if (s->mv_dir & MV_DIR_BACKWARD) {
2119 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2124 /* skip dequant / idct if we are really late ;) */
2125 if(s->avctx->skip_idct){
2126 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2127 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2128 || s->avctx->skip_idct >= AVDISCARD_ALL)
2132 /* add dct residue */
2133 if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2134 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2135 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2136 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2137 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2138 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2140 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2141 if (s->chroma_y_shift){
2142 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2143 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2147 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2148 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2149 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2150 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2153 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2154 add_dct(s, block[0], 0, dest_y , dct_linesize);
2155 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2156 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2157 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2159 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2160 if(s->chroma_y_shift){//Chroma420
2161 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2162 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2165 dct_linesize = uvlinesize << s->interlaced_dct;
2166 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2168 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2169 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2170 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2171 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2172 if(!s->chroma_x_shift){//Chroma444
2173 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2174 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2175 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2176 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2181 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2182 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2185 /* dct only in intra block */
2186 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2187 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2188 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2189 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2190 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2192 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2193 if(s->chroma_y_shift){
2194 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2195 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2199 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2200 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2201 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2202 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2206 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2207 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2208 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2209 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2211 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2212 if(s->chroma_y_shift){
2213 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2214 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2217 dct_linesize = uvlinesize << s->interlaced_dct;
2218 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2220 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2221 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2222 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2223 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2224 if(!s->chroma_x_shift){//Chroma444
2225 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2226 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2227 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2228 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2236 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2237 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2238 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2243 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2245 if(s->out_format == FMT_MPEG1) {
2246 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2247 else MPV_decode_mb_internal(s, block, 0, 1);
2250 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2251 else MPV_decode_mb_internal(s, block, 0, 0);
2256 * @param h is the normal height, this will be reduced automatically if needed for the last row
2258 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2259 const int field_pic= s->picture_structure != PICT_FRAME;
2265 if (!s->avctx->hwaccel
2266 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2267 && s->unrestricted_mv
2268 && s->current_picture.reference
2270 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2271 int sides = 0, edge_h;
2272 if (y==0) sides |= EDGE_TOP;
2273 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2275 edge_h= FFMIN(h, s->v_edge_pos - y);
2277 s->dsp.draw_edges(s->current_picture_ptr->data[0] + y *s->linesize , s->linesize , s->h_edge_pos , edge_h , EDGE_WIDTH , sides);
2278 s->dsp.draw_edges(s->current_picture_ptr->data[1] + (y>>1)*s->uvlinesize, s->uvlinesize, s->h_edge_pos>>1, edge_h>>1, EDGE_WIDTH/2, sides);
2279 s->dsp.draw_edges(s->current_picture_ptr->data[2] + (y>>1)*s->uvlinesize, s->uvlinesize, s->h_edge_pos>>1, edge_h>>1, EDGE_WIDTH/2, sides);
2282 h= FFMIN(h, s->avctx->height - y);
2284 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2286 if (s->avctx->draw_horiz_band) {
2290 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2291 src= (AVFrame*)s->current_picture_ptr;
2292 else if(s->last_picture_ptr)
2293 src= (AVFrame*)s->last_picture_ptr;
2297 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2303 offset[0]= y * s->linesize;
2305 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2311 s->avctx->draw_horiz_band(s->avctx, src, offset,
2312 y, s->picture_structure, h);
2316 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2317 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2318 const int uvlinesize= s->current_picture.linesize[1];
2319 const int mb_size= 4 - s->avctx->lowres;
2321 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2322 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2323 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2324 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2325 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2326 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2327 //block_index is not used by mpeg2, so it is not affected by chroma_format
2329 s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
2330 s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2331 s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2333 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2335 if(s->picture_structure==PICT_FRAME){
2336 s->dest[0] += s->mb_y * linesize << mb_size;
2337 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2338 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2340 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2341 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2342 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2343 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2348 void ff_mpeg_flush(AVCodecContext *avctx){
2350 MpegEncContext *s = avctx->priv_data;
2352 if(s==NULL || s->picture==NULL)
2355 for(i=0; i<s->picture_count; i++){
2356 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
2357 || s->picture[i].type == FF_BUFFER_TYPE_USER))
2358 free_frame_buffer(s, &s->picture[i]);
2360 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2362 s->mb_x= s->mb_y= 0;
2365 s->parse_context.state= -1;
2366 s->parse_context.frame_start_found= 0;
2367 s->parse_context.overread= 0;
2368 s->parse_context.overread_index= 0;
2369 s->parse_context.index= 0;
2370 s->parse_context.last_index= 0;
2371 s->bitstream_buffer_size=0;
2375 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2376 DCTELEM *block, int n, int qscale)
2378 int i, level, nCoeffs;
2379 const uint16_t *quant_matrix;
2381 nCoeffs= s->block_last_index[n];
2384 block[0] = block[0] * s->y_dc_scale;
2386 block[0] = block[0] * s->c_dc_scale;
2387 /* XXX: only mpeg1 */
2388 quant_matrix = s->intra_matrix;
2389 for(i=1;i<=nCoeffs;i++) {
2390 int j= s->intra_scantable.permutated[i];
2395 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2396 level = (level - 1) | 1;
2399 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2400 level = (level - 1) | 1;
2407 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2408 DCTELEM *block, int n, int qscale)
2410 int i, level, nCoeffs;
2411 const uint16_t *quant_matrix;
2413 nCoeffs= s->block_last_index[n];
2415 quant_matrix = s->inter_matrix;
2416 for(i=0; i<=nCoeffs; i++) {
2417 int j= s->intra_scantable.permutated[i];
2422 level = (((level << 1) + 1) * qscale *
2423 ((int) (quant_matrix[j]))) >> 4;
2424 level = (level - 1) | 1;
2427 level = (((level << 1) + 1) * qscale *
2428 ((int) (quant_matrix[j]))) >> 4;
2429 level = (level - 1) | 1;
2436 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2437 DCTELEM *block, int n, int qscale)
2439 int i, level, nCoeffs;
2440 const uint16_t *quant_matrix;
2442 if(s->alternate_scan) nCoeffs= 63;
2443 else nCoeffs= s->block_last_index[n];
2446 block[0] = block[0] * s->y_dc_scale;
2448 block[0] = block[0] * s->c_dc_scale;
2449 quant_matrix = s->intra_matrix;
2450 for(i=1;i<=nCoeffs;i++) {
2451 int j= s->intra_scantable.permutated[i];
2456 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2459 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2466 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2467 DCTELEM *block, int n, int qscale)
2469 int i, level, nCoeffs;
2470 const uint16_t *quant_matrix;
2473 if(s->alternate_scan) nCoeffs= 63;
2474 else nCoeffs= s->block_last_index[n];
2477 block[0] = block[0] * s->y_dc_scale;
2479 block[0] = block[0] * s->c_dc_scale;
2480 quant_matrix = s->intra_matrix;
2481 for(i=1;i<=nCoeffs;i++) {
2482 int j= s->intra_scantable.permutated[i];
2487 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2490 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2499 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2500 DCTELEM *block, int n, int qscale)
2502 int i, level, nCoeffs;
2503 const uint16_t *quant_matrix;
2506 if(s->alternate_scan) nCoeffs= 63;
2507 else nCoeffs= s->block_last_index[n];
2509 quant_matrix = s->inter_matrix;
2510 for(i=0; i<=nCoeffs; i++) {
2511 int j= s->intra_scantable.permutated[i];
2516 level = (((level << 1) + 1) * qscale *
2517 ((int) (quant_matrix[j]))) >> 4;
2520 level = (((level << 1) + 1) * qscale *
2521 ((int) (quant_matrix[j]))) >> 4;
2530 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2531 DCTELEM *block, int n, int qscale)
2533 int i, level, qmul, qadd;
2536 assert(s->block_last_index[n]>=0);
2542 block[0] = block[0] * s->y_dc_scale;
2544 block[0] = block[0] * s->c_dc_scale;
2545 qadd = (qscale - 1) | 1;
2552 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2554 for(i=1; i<=nCoeffs; i++) {
2558 level = level * qmul - qadd;
2560 level = level * qmul + qadd;
2567 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2568 DCTELEM *block, int n, int qscale)
2570 int i, level, qmul, qadd;
2573 assert(s->block_last_index[n]>=0);
2575 qadd = (qscale - 1) | 1;
2578 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2580 for(i=0; i<=nCoeffs; i++) {
2584 level = level * qmul - qadd;
2586 level = level * qmul + qadd;
2594 * set qscale and update qscale dependent variables.
2596 void ff_set_qscale(MpegEncContext * s, int qscale)
2600 else if (qscale > 31)
2604 s->chroma_qscale= s->chroma_qscale_table[qscale];
2606 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2607 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2610 void MPV_report_decode_progress(MpegEncContext *s)
2612 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame)
2613 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);