2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32]={
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
74 const uint8_t ff_mpeg1_dc_scale_table[128]={
75 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 static const uint8_t mpeg2_dc_scale_table1[128]={
83 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128]={
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 static const uint8_t mpeg2_dc_scale_table3[128]={
99 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
107 ff_mpeg1_dc_scale_table,
108 mpeg2_dc_scale_table1,
109 mpeg2_dc_scale_table2,
110 mpeg2_dc_scale_table3,
113 const enum PixelFormat ff_pixfmt_list_420[] = {
118 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
125 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
133 uint32_t tmp= *state << 8;
134 *state= tmp + *(p++);
135 if(tmp == 0x100 || p==end)
140 if (p[-1] > 1 ) p+= 3;
141 else if(p[-2] ) p+= 2;
142 else if(p[-3]|(p[-1]-1)) p++;
155 /* init common dct for both encoder and decoder */
156 av_cold int ff_dct_common_init(MpegEncContext *s)
158 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
159 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
160 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
161 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
162 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
163 if(s->flags & CODEC_FLAG_BITEXACT)
164 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
165 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
168 MPV_common_init_mmx(s);
170 MPV_common_init_axp(s);
172 MPV_common_init_mlib(s);
174 MPV_common_init_mmi(s);
176 MPV_common_init_arm(s);
178 MPV_common_init_altivec(s);
180 MPV_common_init_bfin(s);
183 /* load & permutate scantables
184 note: only wmv uses different ones
186 if(s->alternate_scan){
187 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
190 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
194 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
199 void ff_copy_picture(Picture *dst, Picture *src){
201 dst->type= FF_BUFFER_TYPE_COPY;
205 * Release a frame buffer
207 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
209 ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
210 av_freep(&pic->hwaccel_picture_private);
214 * Allocate a frame buffer
216 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
220 if (s->avctx->hwaccel) {
221 assert(!pic->hwaccel_picture_private);
222 if (s->avctx->hwaccel->priv_data_size) {
223 pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
224 if (!pic->hwaccel_picture_private) {
225 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
231 r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
233 if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
234 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
235 av_freep(&pic->hwaccel_picture_private);
239 if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
240 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
241 free_frame_buffer(s, pic);
245 if (pic->linesize[1] != pic->linesize[2]) {
246 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
247 free_frame_buffer(s, pic);
255 * allocates a Picture
256 * The pixels are allocated/set by calling get_buffer() if shared=0
258 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
259 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
260 const int mb_array_size= s->mb_stride*s->mb_height;
261 const int b8_array_size= s->b8_stride*s->mb_height*2;
262 const int b4_array_size= s->b4_stride*s->mb_height*4;
267 assert(pic->data[0]);
268 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
269 pic->type= FF_BUFFER_TYPE_SHARED;
271 assert(!pic->data[0]);
273 if (alloc_frame_buffer(s, pic) < 0)
276 s->linesize = pic->linesize[0];
277 s->uvlinesize= pic->linesize[1];
280 if(pic->qscale_table==NULL){
282 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
283 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
284 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
287 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check
288 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t) , fail)
289 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
290 pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
291 pic->qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1;
292 if(s->out_format == FMT_H264){
294 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
295 pic->motion_val[i]= pic->motion_val_base[i]+4;
296 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
298 pic->motion_subsample_log2= 2;
299 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
301 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
302 pic->motion_val[i]= pic->motion_val_base[i]+4;
303 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
305 pic->motion_subsample_log2= 3;
307 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
308 FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
310 pic->qstride= s->mb_stride;
311 FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
314 /* It might be nicer if the application would keep track of these
315 * but it would require an API change. */
316 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
317 s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
318 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == AV_PICTURE_TYPE_B)
319 pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
323 fail: //for the FF_ALLOCZ_OR_GOTO macro
325 free_frame_buffer(s, pic);
330 * deallocates a picture
332 static void free_picture(MpegEncContext *s, Picture *pic){
335 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
336 free_frame_buffer(s, pic);
339 av_freep(&pic->mb_var);
340 av_freep(&pic->mc_mb_var);
341 av_freep(&pic->mb_mean);
342 av_freep(&pic->mbskip_table);
343 av_freep(&pic->qscale_table_base);
344 av_freep(&pic->mb_type_base);
345 av_freep(&pic->dct_coeff);
346 av_freep(&pic->pan_scan);
349 av_freep(&pic->motion_val_base[i]);
350 av_freep(&pic->ref_index[i]);
353 if(pic->type == FF_BUFFER_TYPE_SHARED){
362 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
363 int y_size = s->b8_stride * (2 * s->mb_height + 1);
364 int c_size = s->mb_stride * (s->mb_height + 1);
365 int yc_size = y_size + 2 * c_size;
368 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
369 FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
370 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
372 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
373 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
374 s->me.temp= s->me.scratchpad;
375 s->rd_scratchpad= s->me.scratchpad;
376 s->b_scratchpad= s->me.scratchpad;
377 s->obmc_scratchpad= s->me.scratchpad + 16;
379 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
380 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
381 if(s->avctx->noise_reduction){
382 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
385 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
386 s->block= s->blocks[0];
389 s->pblocks[i] = &s->block[i];
392 if (s->out_format == FMT_H263) {
394 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
395 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
396 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
397 s->ac_val[2] = s->ac_val[1] + c_size;
402 return -1; //free() through MPV_common_end()
405 static void free_duplicate_context(MpegEncContext *s){
408 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
409 av_freep(&s->me.scratchpad);
413 s->obmc_scratchpad= NULL;
415 av_freep(&s->dct_error_sum);
416 av_freep(&s->me.map);
417 av_freep(&s->me.score_map);
418 av_freep(&s->blocks);
419 av_freep(&s->ac_val_base);
423 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
424 #define COPY(a) bak->a= src->a
425 COPY(allocated_edge_emu_buffer);
426 COPY(edge_emu_buffer);
431 COPY(obmc_scratchpad);
438 COPY(me.map_generation);
450 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
453 //FIXME copy only needed parts
455 backup_duplicate_context(&bak, dst);
456 memcpy(dst, src, sizeof(MpegEncContext));
457 backup_duplicate_context(dst, &bak);
459 dst->pblocks[i] = &dst->block[i];
461 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
464 int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
466 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
468 if(dst == src || !s1->context_initialized) return 0;
470 //FIXME can parameters change on I-frames? in that case dst may need a reinit
471 if(!s->context_initialized){
472 memcpy(s, s1, sizeof(MpegEncContext));
475 s->picture_range_start += MAX_PICTURE_COUNT;
476 s->picture_range_end += MAX_PICTURE_COUNT;
477 s->bitstream_buffer = NULL;
478 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
483 s->avctx->coded_height = s1->avctx->coded_height;
484 s->avctx->coded_width = s1->avctx->coded_width;
485 s->avctx->width = s1->avctx->width;
486 s->avctx->height = s1->avctx->height;
488 s->coded_picture_number = s1->coded_picture_number;
489 s->picture_number = s1->picture_number;
490 s->input_picture_number = s1->input_picture_number;
492 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
493 memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture);
495 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
496 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
497 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
499 memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
501 //Error/bug resilience
502 s->next_p_frame_damaged = s1->next_p_frame_damaged;
503 s->workaround_bugs = s1->workaround_bugs;
506 memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits);
509 s->max_b_frames = s1->max_b_frames;
510 s->low_delay = s1->low_delay;
511 s->dropable = s1->dropable;
513 //DivX handling (doesn't work)
514 s->divx_packed = s1->divx_packed;
516 if(s1->bitstream_buffer){
517 if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
518 av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size);
519 s->bitstream_buffer_size = s1->bitstream_buffer_size;
520 memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size);
521 memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
524 //MPEG2/interlacing info
525 memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence);
527 if(!s1->first_field){
528 s->last_pict_type= s1->pict_type;
529 if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->quality;
531 if(s1->pict_type!=FF_B_TYPE){
532 s->last_non_b_pict_type= s1->pict_type;
540 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
541 * the changed fields will not depend upon the prior state of the MpegEncContext.
543 void MPV_common_defaults(MpegEncContext *s){
545 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
546 s->chroma_qscale_table= ff_default_chroma_qscale_table;
547 s->progressive_frame= 1;
548 s->progressive_sequence= 1;
549 s->picture_structure= PICT_FRAME;
551 s->coded_picture_number = 0;
552 s->picture_number = 0;
553 s->input_picture_number = 0;
555 s->picture_in_gop_number = 0;
560 s->picture_range_start = 0;
561 s->picture_range_end = MAX_PICTURE_COUNT;
565 * sets the given MpegEncContext to defaults for decoding.
566 * the changed fields will not depend upon the prior state of the MpegEncContext.
568 void MPV_decode_defaults(MpegEncContext *s){
569 MPV_common_defaults(s);
573 * init common structure for both encoder and decoder.
574 * this assumes that some variables like width/height are already set
576 av_cold int MPV_common_init(MpegEncContext *s)
578 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y,
579 threads = (s->encoding ||
581 s->avctx->active_thread_type & FF_THREAD_SLICE)) ?
582 s->avctx->thread_count : 1;
584 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
585 s->mb_height = (s->height + 31) / 32 * 2;
586 else if (s->codec_id != CODEC_ID_H264)
587 s->mb_height = (s->height + 15) / 16;
589 if(s->avctx->pix_fmt == PIX_FMT_NONE){
590 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
594 if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
595 (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
596 int max_threads = FFMIN(MAX_THREADS, s->mb_height);
597 av_log(s->avctx, AV_LOG_WARNING, "too many threads (%d), reducing to %d\n",
598 s->avctx->thread_count, max_threads);
599 threads = max_threads;
602 if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
605 dsputil_init(&s->dsp, s->avctx);
606 ff_dct_common_init(s);
608 s->flags= s->avctx->flags;
609 s->flags2= s->avctx->flags2;
611 if (s->width && s->height) {
612 s->mb_width = (s->width + 15) / 16;
613 s->mb_stride = s->mb_width + 1;
614 s->b8_stride = s->mb_width*2 + 1;
615 s->b4_stride = s->mb_width*4 + 1;
616 mb_array_size= s->mb_height * s->mb_stride;
617 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
619 /* set chroma shifts */
620 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
621 &(s->chroma_y_shift) );
623 /* set default edge pos, will be overriden in decode_header if needed */
624 s->h_edge_pos= s->mb_width*16;
625 s->v_edge_pos= s->mb_height*16;
627 s->mb_num = s->mb_width * s->mb_height;
632 s->block_wrap[3]= s->b8_stride;
634 s->block_wrap[5]= s->mb_stride;
636 y_size = s->b8_stride * (2 * s->mb_height + 1);
637 c_size = s->mb_stride * (s->mb_height + 1);
638 yc_size = y_size + 2 * c_size;
640 /* convert fourcc to upper case */
641 s->codec_tag = ff_toupper4(s->avctx->codec_tag);
643 s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
645 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
647 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
648 for(y=0; y<s->mb_height; y++){
649 for(x=0; x<s->mb_width; x++){
650 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
653 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
656 /* Allocate MV tables */
657 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
658 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
659 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
660 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
661 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
662 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
663 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
664 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
665 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
666 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
667 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
668 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
670 if(s->msmpeg4_version){
671 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
673 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
675 /* Allocate MB type table */
676 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
678 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
680 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
681 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
682 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
683 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
684 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
685 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
687 if(s->avctx->noise_reduction){
688 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
693 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
694 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
695 for(i = 0; i < s->picture_count; i++) {
696 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
699 if (s->width && s->height) {
700 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
702 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
703 /* interlaced direct mode decoding tables */
708 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
709 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
711 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
712 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
713 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
715 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
718 if (s->out_format == FMT_H263) {
720 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
721 s->coded_block= s->coded_block_base + s->b8_stride + 1;
723 /* cbp, ac_pred, pred_dir */
724 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
725 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
728 if (s->h263_pred || s->h263_plus || !s->encoding) {
730 //MN: we need these for error resilience of intra-frames
731 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
732 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
733 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
734 s->dc_val[2] = s->dc_val[1] + c_size;
735 for(i=0;i<yc_size;i++)
736 s->dc_val_base[i] = 1024;
739 /* which mb is a intra block */
740 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
741 memset(s->mbintra_table, 1, mb_array_size);
743 /* init macroblock skip table */
744 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
745 //Note the +1 is for a quicker mpeg4 slice_end detection
746 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
748 s->parse_context.state= -1;
749 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
750 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
751 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
752 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
756 s->context_initialized = 1;
757 s->thread_context[0]= s;
759 if (s->width && s->height) {
760 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
761 for(i=1; i<threads; i++){
762 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
763 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
766 for(i=0; i<threads; i++){
767 if(init_duplicate_context(s->thread_context[i], s) < 0)
769 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
770 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
773 if(init_duplicate_context(s, s) < 0) goto fail;
775 s->end_mb_y = s->mb_height;
785 /* init common structure for both encoder and decoder */
786 void MPV_common_end(MpegEncContext *s)
790 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
791 for(i=0; i<s->avctx->thread_count; i++){
792 free_duplicate_context(s->thread_context[i]);
794 for(i=1; i<s->avctx->thread_count; i++){
795 av_freep(&s->thread_context[i]);
797 } else free_duplicate_context(s);
799 av_freep(&s->parse_context.buffer);
800 s->parse_context.buffer_size=0;
802 av_freep(&s->mb_type);
803 av_freep(&s->p_mv_table_base);
804 av_freep(&s->b_forw_mv_table_base);
805 av_freep(&s->b_back_mv_table_base);
806 av_freep(&s->b_bidir_forw_mv_table_base);
807 av_freep(&s->b_bidir_back_mv_table_base);
808 av_freep(&s->b_direct_mv_table_base);
810 s->b_forw_mv_table= NULL;
811 s->b_back_mv_table= NULL;
812 s->b_bidir_forw_mv_table= NULL;
813 s->b_bidir_back_mv_table= NULL;
814 s->b_direct_mv_table= NULL;
818 av_freep(&s->b_field_mv_table_base[i][j][k]);
819 s->b_field_mv_table[i][j][k]=NULL;
821 av_freep(&s->b_field_select_table[i][j]);
822 av_freep(&s->p_field_mv_table_base[i][j]);
823 s->p_field_mv_table[i][j]=NULL;
825 av_freep(&s->p_field_select_table[i]);
828 av_freep(&s->dc_val_base);
829 av_freep(&s->coded_block_base);
830 av_freep(&s->mbintra_table);
831 av_freep(&s->cbp_table);
832 av_freep(&s->pred_dir_table);
834 av_freep(&s->mbskip_table);
835 av_freep(&s->prev_pict_types);
836 av_freep(&s->bitstream_buffer);
837 s->allocated_bitstream_buffer_size=0;
839 av_freep(&s->avctx->stats_out);
840 av_freep(&s->ac_stats);
841 av_freep(&s->error_status_table);
842 av_freep(&s->mb_index2xy);
843 av_freep(&s->lambda_table);
844 av_freep(&s->q_intra_matrix);
845 av_freep(&s->q_inter_matrix);
846 av_freep(&s->q_intra_matrix16);
847 av_freep(&s->q_inter_matrix16);
848 av_freep(&s->input_picture);
849 av_freep(&s->reordered_input_picture);
850 av_freep(&s->dct_offset);
852 if(s->picture && !s->avctx->is_copy){
853 for(i=0; i<s->picture_count; i++){
854 free_picture(s, &s->picture[i]);
857 av_freep(&s->picture);
858 s->context_initialized = 0;
861 s->current_picture_ptr= NULL;
862 s->linesize= s->uvlinesize= 0;
865 av_freep(&s->visualization_buffer[i]);
867 if(!(s->avctx->active_thread_type&FF_THREAD_FRAME))
868 avcodec_default_free_buffers(s->avctx);
871 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
873 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
874 uint8_t index_run[MAX_RUN+1];
875 int last, run, level, start, end, i;
877 /* If table is static, we can quit if rl->max_level[0] is not NULL */
878 if(static_store && rl->max_level[0])
881 /* compute max_level[], max_run[] and index_run[] */
882 for(last=0;last<2;last++) {
891 memset(max_level, 0, MAX_RUN + 1);
892 memset(max_run, 0, MAX_LEVEL + 1);
893 memset(index_run, rl->n, MAX_RUN + 1);
894 for(i=start;i<end;i++) {
895 run = rl->table_run[i];
896 level = rl->table_level[i];
897 if (index_run[run] == rl->n)
899 if (level > max_level[run])
900 max_level[run] = level;
901 if (run > max_run[level])
902 max_run[level] = run;
905 rl->max_level[last] = static_store[last];
907 rl->max_level[last] = av_malloc(MAX_RUN + 1);
908 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
910 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
912 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
913 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
915 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
917 rl->index_run[last] = av_malloc(MAX_RUN + 1);
918 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
922 void init_vlc_rl(RLTable *rl)
934 for(i=0; i<rl->vlc.table_size; i++){
935 int code= rl->vlc.table[i][0];
936 int len = rl->vlc.table[i][1];
939 if(len==0){ // illegal code
942 }else if(len<0){ //more bits needed
946 if(code==rl->n){ //esc
950 run= rl->table_run [code] + 1;
951 level= rl->table_level[code] * qmul + qadd;
952 if(code >= rl->last) run+=192;
955 rl->rl_vlc[q][i].len= len;
956 rl->rl_vlc[q][i].level= level;
957 rl->rl_vlc[q][i].run= run;
962 void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
966 /* release non reference frames */
967 for(i=0; i<s->picture_count; i++){
968 if(s->picture[i].data[0] && !s->picture[i].reference
969 && (!s->picture[i].owner2 || s->picture[i].owner2 == s)
970 && (remove_current || &s->picture[i] != s->current_picture_ptr)
971 /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
972 free_frame_buffer(s, &s->picture[i]);
977 int ff_find_unused_picture(MpegEncContext *s, int shared){
981 for(i=s->picture_range_start; i<s->picture_range_end; i++){
982 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
985 for(i=s->picture_range_start; i<s->picture_range_end; i++){
986 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
988 for(i=s->picture_range_start; i<s->picture_range_end; i++){
989 if(s->picture[i].data[0]==NULL) return i;
993 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
994 /* We could return -1, but the codec would crash trying to draw into a
995 * non-existing frame anyway. This is safer than waiting for a random crash.
996 * Also the return of this is never useful, an encoder must only allocate
997 * as much as allowed in the specification. This has no relationship to how
998 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
999 * enough for such valid streams).
1000 * Plus, a decoder has to check stream validity and remove frames if too
1001 * many reference frames are around. Waiting for "OOM" is not correct at
1002 * all. Similarly, missing reference frames have to be replaced by
1003 * interpolated/MC frames, anything else is a bug in the codec ...
1009 static void update_noise_reduction(MpegEncContext *s){
1012 for(intra=0; intra<2; intra++){
1013 if(s->dct_count[intra] > (1<<16)){
1014 for(i=0; i<64; i++){
1015 s->dct_error_sum[intra][i] >>=1;
1017 s->dct_count[intra] >>= 1;
1020 for(i=0; i<64; i++){
1021 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1027 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1029 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1035 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1037 /* mark&release old frames */
1038 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
1039 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1040 free_frame_buffer(s, s->last_picture_ptr);
1042 /* release forgotten pictures */
1043 /* if(mpeg124/h263) */
1045 for(i=0; i<s->picture_count; i++){
1046 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
1047 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1048 free_frame_buffer(s, &s->picture[i]);
1056 ff_release_unused_pictures(s, 1);
1058 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
1059 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
1061 i= ff_find_unused_picture(s, 0);
1062 pic= &s->picture[i];
1067 if (s->codec_id == CODEC_ID_H264)
1068 pic->reference = s->picture_structure;
1069 else if (s->pict_type != AV_PICTURE_TYPE_B)
1073 pic->coded_picture_number= s->coded_picture_number++;
1075 if(ff_alloc_picture(s, pic, 0) < 0)
1078 s->current_picture_ptr= pic;
1079 //FIXME use only the vars from current_pic
1080 s->current_picture_ptr->top_field_first= s->top_field_first;
1081 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1082 if(s->picture_structure != PICT_FRAME)
1083 s->current_picture_ptr->top_field_first= (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1085 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
1086 s->current_picture_ptr->field_picture= s->picture_structure != PICT_FRAME;
1089 s->current_picture_ptr->pict_type= s->pict_type;
1090 // if(s->flags && CODEC_FLAG_QSCALE)
1091 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1092 s->current_picture_ptr->key_frame= s->pict_type == AV_PICTURE_TYPE_I;
1094 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1096 if (s->pict_type != AV_PICTURE_TYPE_B) {
1097 s->last_picture_ptr= s->next_picture_ptr;
1099 s->next_picture_ptr= s->current_picture_ptr;
1101 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1102 s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
1103 s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
1104 s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
1105 s->pict_type, s->dropable);*/
1107 if(s->codec_id != CODEC_ID_H264){
1108 if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) &&
1109 (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
1110 if (s->pict_type != AV_PICTURE_TYPE_I)
1111 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1112 else if (s->picture_structure != PICT_FRAME)
1113 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
1115 /* Allocate a dummy frame */
1116 i= ff_find_unused_picture(s, 0);
1117 s->last_picture_ptr= &s->picture[i];
1118 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1120 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
1121 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
1123 if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==AV_PICTURE_TYPE_B){
1124 /* Allocate a dummy frame */
1125 i= ff_find_unused_picture(s, 0);
1126 s->next_picture_ptr= &s->picture[i];
1127 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1129 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
1130 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
1134 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1135 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1137 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1139 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1142 if(s->picture_structure == PICT_BOTTOM_FIELD){
1143 s->current_picture.data[i] += s->current_picture.linesize[i];
1145 s->current_picture.linesize[i] *= 2;
1146 s->last_picture.linesize[i] *=2;
1147 s->next_picture.linesize[i] *=2;
1151 s->error_recognition= avctx->error_recognition;
1153 /* set dequantizer, we can't do it during init as it might change for mpeg4
1154 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1155 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1156 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1157 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1158 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1159 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1160 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1162 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1163 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1166 if(s->dct_error_sum){
1167 assert(s->avctx->noise_reduction && s->encoding);
1169 update_noise_reduction(s);
1172 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1173 return ff_xvmc_field_start(s, avctx);
1178 /* generic function for encode/decode called after a frame has been coded/decoded */
1179 void MPV_frame_end(MpegEncContext *s)
1182 /* redraw edges for the frame if decoding didn't complete */
1183 //just to make sure that all data is rendered.
1184 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1185 ff_xvmc_field_end(s);
1186 }else if((s->error_count || s->encoding)
1187 && !s->avctx->hwaccel
1188 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1189 && s->unrestricted_mv
1190 && s->current_picture.reference
1192 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1193 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1194 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1195 s->dsp.draw_edges(s->current_picture.data[0], s->linesize ,
1196 s->h_edge_pos , s->v_edge_pos,
1197 EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
1198 s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize,
1199 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1200 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1201 s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize,
1202 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1203 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1208 s->last_pict_type = s->pict_type;
1209 s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
1210 if(s->pict_type!=AV_PICTURE_TYPE_B){
1211 s->last_non_b_pict_type= s->pict_type;
1214 /* copy back current_picture variables */
1215 for(i=0; i<MAX_PICTURE_COUNT; i++){
1216 if(s->picture[i].data[0] == s->current_picture.data[0]){
1217 s->picture[i]= s->current_picture;
1221 assert(i<MAX_PICTURE_COUNT);
1225 /* release non-reference frames */
1226 for(i=0; i<s->picture_count; i++){
1227 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1228 free_frame_buffer(s, &s->picture[i]);
1232 // clear copies, to avoid confusion
1234 memset(&s->last_picture, 0, sizeof(Picture));
1235 memset(&s->next_picture, 0, sizeof(Picture));
1236 memset(&s->current_picture, 0, sizeof(Picture));
1238 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1240 if (s->codec_id != CODEC_ID_H264 && s->current_picture.reference) {
1241 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
1246 * draws an line from (ex, ey) -> (sx, sy).
1247 * @param w width of the image
1248 * @param h height of the image
1249 * @param stride stride/linesize of the image
1250 * @param color color of the arrow
1252 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1255 sx= av_clip(sx, 0, w-1);
1256 sy= av_clip(sy, 0, h-1);
1257 ex= av_clip(ex, 0, w-1);
1258 ey= av_clip(ey, 0, h-1);
1260 buf[sy*stride + sx]+= color;
1262 if(FFABS(ex - sx) > FFABS(ey - sy)){
1264 FFSWAP(int, sx, ex);
1265 FFSWAP(int, sy, ey);
1267 buf+= sx + sy*stride;
1269 f= ((ey-sy)<<16)/ex;
1270 for(x= 0; x <= ex; x++){
1273 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1274 buf[(y+1)*stride + x]+= (color* fr )>>16;
1278 FFSWAP(int, sx, ex);
1279 FFSWAP(int, sy, ey);
1281 buf+= sx + sy*stride;
1283 if(ey) f= ((ex-sx)<<16)/ey;
1285 for(y= 0; y <= ey; y++){
1288 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1289 buf[y*stride + x+1]+= (color* fr )>>16;
1295 * draws an arrow from (ex, ey) -> (sx, sy).
1296 * @param w width of the image
1297 * @param h height of the image
1298 * @param stride stride/linesize of the image
1299 * @param color color of the arrow
1301 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1304 sx= av_clip(sx, -100, w+100);
1305 sy= av_clip(sy, -100, h+100);
1306 ex= av_clip(ex, -100, w+100);
1307 ey= av_clip(ey, -100, h+100);
1312 if(dx*dx + dy*dy > 3*3){
1315 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1317 //FIXME subpixel accuracy
1318 rx= ROUNDED_DIV(rx*3<<4, length);
1319 ry= ROUNDED_DIV(ry*3<<4, length);
1321 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1322 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1324 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1328 * prints debuging info for the given picture.
1330 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1332 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1334 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1337 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1338 switch (pict->pict_type) {
1339 case AV_PICTURE_TYPE_I: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1340 case AV_PICTURE_TYPE_P: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1341 case AV_PICTURE_TYPE_B: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1342 case AV_PICTURE_TYPE_S: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1343 case AV_PICTURE_TYPE_SI: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1344 case AV_PICTURE_TYPE_SP: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1346 for(y=0; y<s->mb_height; y++){
1347 for(x=0; x<s->mb_width; x++){
1348 if(s->avctx->debug&FF_DEBUG_SKIP){
1349 int count= s->mbskip_table[x + y*s->mb_stride];
1350 if(count>9) count=9;
1351 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1353 if(s->avctx->debug&FF_DEBUG_QP){
1354 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1356 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1357 int mb_type= pict->mb_type[x + y*s->mb_stride];
1358 //Type & MV direction
1360 av_log(s->avctx, AV_LOG_DEBUG, "P");
1361 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1362 av_log(s->avctx, AV_LOG_DEBUG, "A");
1363 else if(IS_INTRA4x4(mb_type))
1364 av_log(s->avctx, AV_LOG_DEBUG, "i");
1365 else if(IS_INTRA16x16(mb_type))
1366 av_log(s->avctx, AV_LOG_DEBUG, "I");
1367 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1368 av_log(s->avctx, AV_LOG_DEBUG, "d");
1369 else if(IS_DIRECT(mb_type))
1370 av_log(s->avctx, AV_LOG_DEBUG, "D");
1371 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1372 av_log(s->avctx, AV_LOG_DEBUG, "g");
1373 else if(IS_GMC(mb_type))
1374 av_log(s->avctx, AV_LOG_DEBUG, "G");
1375 else if(IS_SKIP(mb_type))
1376 av_log(s->avctx, AV_LOG_DEBUG, "S");
1377 else if(!USES_LIST(mb_type, 1))
1378 av_log(s->avctx, AV_LOG_DEBUG, ">");
1379 else if(!USES_LIST(mb_type, 0))
1380 av_log(s->avctx, AV_LOG_DEBUG, "<");
1382 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1383 av_log(s->avctx, AV_LOG_DEBUG, "X");
1388 av_log(s->avctx, AV_LOG_DEBUG, "+");
1389 else if(IS_16X8(mb_type))
1390 av_log(s->avctx, AV_LOG_DEBUG, "-");
1391 else if(IS_8X16(mb_type))
1392 av_log(s->avctx, AV_LOG_DEBUG, "|");
1393 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1394 av_log(s->avctx, AV_LOG_DEBUG, " ");
1396 av_log(s->avctx, AV_LOG_DEBUG, "?");
1399 if(IS_INTERLACED(mb_type))
1400 av_log(s->avctx, AV_LOG_DEBUG, "=");
1402 av_log(s->avctx, AV_LOG_DEBUG, " ");
1404 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1406 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1410 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1411 const int shift= 1 + s->quarter_sample;
1415 int h_chroma_shift, v_chroma_shift, block_height;
1416 const int width = s->avctx->width;
1417 const int height= s->avctx->height;
1418 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1419 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1420 s->low_delay=0; //needed to see the vectors without trashing the buffers
1422 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1424 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1425 pict->data[i]= s->visualization_buffer[i];
1427 pict->type= FF_BUFFER_TYPE_COPY;
1429 block_height = 16>>v_chroma_shift;
1431 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1433 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1434 const int mb_index= mb_x + mb_y*s->mb_stride;
1435 if((s->avctx->debug_mv) && pict->motion_val){
1437 for(type=0; type<3; type++){
1440 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
1444 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1448 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1453 if(!USES_LIST(pict->mb_type[mb_index], direction))
1456 if(IS_8X8(pict->mb_type[mb_index])){
1459 int sx= mb_x*16 + 4 + 8*(i&1);
1460 int sy= mb_y*16 + 4 + 8*(i>>1);
1461 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1462 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1463 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1464 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1466 }else if(IS_16X8(pict->mb_type[mb_index])){
1470 int sy=mb_y*16 + 4 + 8*i;
1471 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1472 int mx=(pict->motion_val[direction][xy][0]>>shift);
1473 int my=(pict->motion_val[direction][xy][1]>>shift);
1475 if(IS_INTERLACED(pict->mb_type[mb_index]))
1478 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1480 }else if(IS_8X16(pict->mb_type[mb_index])){
1483 int sx=mb_x*16 + 4 + 8*i;
1485 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1486 int mx=(pict->motion_val[direction][xy][0]>>shift);
1487 int my=(pict->motion_val[direction][xy][1]>>shift);
1489 if(IS_INTERLACED(pict->mb_type[mb_index]))
1492 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1495 int sx= mb_x*16 + 8;
1496 int sy= mb_y*16 + 8;
1497 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1498 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1499 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1500 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1504 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1505 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1507 for(y=0; y<block_height; y++){
1508 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1509 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1512 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1513 int mb_type= pict->mb_type[mb_index];
1516 #define COLOR(theta, r)\
1517 u= (int)(128 + r*cos(theta*3.141592/180));\
1518 v= (int)(128 + r*sin(theta*3.141592/180));
1522 if(IS_PCM(mb_type)){
1524 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1526 }else if(IS_INTRA4x4(mb_type)){
1528 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1530 }else if(IS_DIRECT(mb_type)){
1532 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1534 }else if(IS_GMC(mb_type)){
1536 }else if(IS_SKIP(mb_type)){
1538 }else if(!USES_LIST(mb_type, 1)){
1540 }else if(!USES_LIST(mb_type, 0)){
1543 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1547 u*= 0x0101010101010101ULL;
1548 v*= 0x0101010101010101ULL;
1549 for(y=0; y<block_height; y++){
1550 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1551 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1555 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1556 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1557 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1559 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1561 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1563 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1564 int dm= 1 << (mv_sample_log2-2);
1566 int sx= mb_x*16 + 8*(i&1);
1567 int sy= mb_y*16 + 8*(i>>1);
1568 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1570 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1571 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1573 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1574 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1575 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1579 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1583 s->mbskip_table[mb_index]=0;
1589 static inline int hpel_motion_lowres(MpegEncContext *s,
1590 uint8_t *dest, uint8_t *src,
1591 int field_based, int field_select,
1592 int src_x, int src_y,
1593 int width, int height, int stride,
1594 int h_edge_pos, int v_edge_pos,
1595 int w, int h, h264_chroma_mc_func *pix_op,
1596 int motion_x, int motion_y)
1598 const int lowres= s->avctx->lowres;
1599 const int op_index= FFMIN(lowres, 2);
1600 const int s_mask= (2<<lowres)-1;
1604 if(s->quarter_sample){
1609 sx= motion_x & s_mask;
1610 sy= motion_y & s_mask;
1611 src_x += motion_x >> (lowres+1);
1612 src_y += motion_y >> (lowres+1);
1614 src += src_y * stride + src_x;
1616 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1617 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1618 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1619 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1620 src= s->edge_emu_buffer;
1624 sx= (sx << 2) >> lowres;
1625 sy= (sy << 2) >> lowres;
1628 pix_op[op_index](dest, src, stride, h, sx, sy);
1632 /* apply one mpeg motion vector to the three components */
1633 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1634 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1635 int field_based, int bottom_field, int field_select,
1636 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1637 int motion_x, int motion_y, int h, int mb_y)
1639 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1640 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1641 const int lowres= s->avctx->lowres;
1642 const int op_index= FFMIN(lowres, 2);
1643 const int block_s= 8>>lowres;
1644 const int s_mask= (2<<lowres)-1;
1645 const int h_edge_pos = s->h_edge_pos >> lowres;
1646 const int v_edge_pos = s->v_edge_pos >> lowres;
1647 linesize = s->current_picture.linesize[0] << field_based;
1648 uvlinesize = s->current_picture.linesize[1] << field_based;
1650 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1656 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1659 sx= motion_x & s_mask;
1660 sy= motion_y & s_mask;
1661 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1662 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1664 if (s->out_format == FMT_H263) {
1665 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1666 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1669 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1672 uvsx = (2*mx) & s_mask;
1673 uvsy = (2*my) & s_mask;
1674 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1675 uvsrc_y = mb_y*block_s + (my >> lowres);
1681 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1682 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1685 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1686 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1687 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1689 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1690 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1691 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1692 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1693 ptr_y = s->edge_emu_buffer;
1694 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1695 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1696 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1697 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1698 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1699 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1705 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
1706 dest_y += s->linesize;
1707 dest_cb+= s->uvlinesize;
1708 dest_cr+= s->uvlinesize;
1712 ptr_y += s->linesize;
1713 ptr_cb+= s->uvlinesize;
1714 ptr_cr+= s->uvlinesize;
1717 sx= (sx << 2) >> lowres;
1718 sy= (sy << 2) >> lowres;
1719 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1721 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1722 uvsx= (uvsx << 2) >> lowres;
1723 uvsy= (uvsy << 2) >> lowres;
1724 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1725 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1727 //FIXME h261 lowres loop filter
1730 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1731 uint8_t *dest_cb, uint8_t *dest_cr,
1732 uint8_t **ref_picture,
1733 h264_chroma_mc_func *pix_op,
1735 const int lowres= s->avctx->lowres;
1736 const int op_index= FFMIN(lowres, 2);
1737 const int block_s= 8>>lowres;
1738 const int s_mask= (2<<lowres)-1;
1739 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1740 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1741 int emu=0, src_x, src_y, offset, sx, sy;
1744 if(s->quarter_sample){
1749 /* In case of 8X8, we construct a single chroma motion vector
1750 with a special rounding */
1751 mx= ff_h263_round_chroma(mx);
1752 my= ff_h263_round_chroma(my);
1756 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1757 src_y = s->mb_y*block_s + (my >> (lowres+1));
1759 offset = src_y * s->uvlinesize + src_x;
1760 ptr = ref_picture[1] + offset;
1761 if(s->flags&CODEC_FLAG_EMU_EDGE){
1762 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1763 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1764 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1765 ptr= s->edge_emu_buffer;
1769 sx= (sx << 2) >> lowres;
1770 sy= (sy << 2) >> lowres;
1771 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1773 ptr = ref_picture[2] + offset;
1775 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1776 ptr= s->edge_emu_buffer;
1778 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1782 * motion compensation of a single macroblock
1784 * @param dest_y luma destination pointer
1785 * @param dest_cb chroma cb/u destination pointer
1786 * @param dest_cr chroma cr/v destination pointer
1787 * @param dir direction (0->forward, 1->backward)
1788 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1789 * @param pix_op halfpel motion compensation function (average or put normally)
1790 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1792 static inline void MPV_motion_lowres(MpegEncContext *s,
1793 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1794 int dir, uint8_t **ref_picture,
1795 h264_chroma_mc_func *pix_op)
1799 const int lowres= s->avctx->lowres;
1800 const int block_s= 8>>lowres;
1805 switch(s->mv_type) {
1807 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1809 ref_picture, pix_op,
1810 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1816 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1817 ref_picture[0], 0, 0,
1818 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1819 s->width, s->height, s->linesize,
1820 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1821 block_s, block_s, pix_op,
1822 s->mv[dir][i][0], s->mv[dir][i][1]);
1824 mx += s->mv[dir][i][0];
1825 my += s->mv[dir][i][1];
1828 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1829 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1832 if (s->picture_structure == PICT_FRAME) {
1834 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1835 1, 0, s->field_select[dir][0],
1836 ref_picture, pix_op,
1837 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1839 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1840 1, 1, s->field_select[dir][1],
1841 ref_picture, pix_op,
1842 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1844 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
1845 ref_picture= s->current_picture_ptr->data;
1848 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1849 0, 0, s->field_select[dir][0],
1850 ref_picture, pix_op,
1851 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1856 uint8_t ** ref2picture;
1858 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
1859 ref2picture= ref_picture;
1861 ref2picture= s->current_picture_ptr->data;
1864 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1865 0, 0, s->field_select[dir][i],
1866 ref2picture, pix_op,
1867 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1869 dest_y += 2*block_s*s->linesize;
1870 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1871 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1875 if(s->picture_structure == PICT_FRAME){
1879 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1881 ref_picture, pix_op,
1882 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1884 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1888 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1889 0, 0, s->picture_structure != i+1,
1890 ref_picture, pix_op,
1891 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1893 // after put we make avg of the same block
1894 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1896 //opposite parity is always in the same frame if this is second field
1897 if(!s->first_field){
1898 ref_picture = s->current_picture_ptr->data;
1908 * find the lowest MB row referenced in the MVs
1910 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1912 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1913 int my, off, i, mvs;
1915 if (s->picture_structure != PICT_FRAME) goto unhandled;
1917 switch (s->mv_type) {
1931 for (i = 0; i < mvs; i++) {
1932 my = s->mv[dir][i][1]<<qpel_shift;
1933 my_max = FFMAX(my_max, my);
1934 my_min = FFMIN(my_min, my);
1937 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1939 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1941 return s->mb_height-1;
1944 /* put block[] to dest[] */
1945 static inline void put_dct(MpegEncContext *s,
1946 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1948 s->dct_unquantize_intra(s, block, i, qscale);
1949 s->dsp.idct_put (dest, line_size, block);
1952 /* add block[] to dest[] */
1953 static inline void add_dct(MpegEncContext *s,
1954 DCTELEM *block, int i, uint8_t *dest, int line_size)
1956 if (s->block_last_index[i] >= 0) {
1957 s->dsp.idct_add (dest, line_size, block);
1961 static inline void add_dequant_dct(MpegEncContext *s,
1962 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1964 if (s->block_last_index[i] >= 0) {
1965 s->dct_unquantize_inter(s, block, i, qscale);
1967 s->dsp.idct_add (dest, line_size, block);
1972 * cleans dc, ac, coded_block for the current non intra MB
1974 void ff_clean_intra_table_entries(MpegEncContext *s)
1976 int wrap = s->b8_stride;
1977 int xy = s->block_index[0];
1980 s->dc_val[0][xy + 1 ] =
1981 s->dc_val[0][xy + wrap] =
1982 s->dc_val[0][xy + 1 + wrap] = 1024;
1984 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1985 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1986 if (s->msmpeg4_version>=3) {
1987 s->coded_block[xy ] =
1988 s->coded_block[xy + 1 ] =
1989 s->coded_block[xy + wrap] =
1990 s->coded_block[xy + 1 + wrap] = 0;
1993 wrap = s->mb_stride;
1994 xy = s->mb_x + s->mb_y * wrap;
1996 s->dc_val[2][xy] = 1024;
1998 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1999 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2001 s->mbintra_table[xy]= 0;
2004 /* generic function called after a macroblock has been parsed by the
2005 decoder or after it has been encoded by the encoder.
2007 Important variables used:
2008 s->mb_intra : true if intra macroblock
2009 s->mv_dir : motion vector direction
2010 s->mv_type : motion vector type
2011 s->mv : motion vector
2012 s->interlaced_dct : true if interlaced dct used (mpeg2)
2014 static av_always_inline
2015 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2016 int lowres_flag, int is_mpeg12)
2018 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2019 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2020 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2024 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2025 /* save DCT coefficients */
2027 DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
2028 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2030 for(j=0; j<64; j++){
2031 *dct++ = block[i][s->dsp.idct_permutation[j]];
2032 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2034 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2038 s->current_picture.qscale_table[mb_xy]= s->qscale;
2040 /* update DC predictors for P macroblocks */
2042 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2043 if(s->mbintra_table[mb_xy])
2044 ff_clean_intra_table_entries(s);
2048 s->last_dc[2] = 128 << s->intra_dc_precision;
2051 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2052 s->mbintra_table[mb_xy]=1;
2054 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2055 uint8_t *dest_y, *dest_cb, *dest_cr;
2056 int dct_linesize, dct_offset;
2057 op_pixels_func (*op_pix)[4];
2058 qpel_mc_func (*op_qpix)[16];
2059 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2060 const int uvlinesize= s->current_picture.linesize[1];
2061 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2062 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2064 /* avoid copy if macroblock skipped in last frame too */
2065 /* skip only during decoding as we might trash the buffers during encoding a bit */
2067 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2068 const int age= s->current_picture.age;
2072 if (s->mb_skipped) {
2074 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2076 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
2077 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2079 /* if previous was skipped too, then nothing to do ! */
2080 if (*mbskip_ptr >= age && s->current_picture.reference){
2083 } else if(!s->current_picture.reference){
2084 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2085 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2087 *mbskip_ptr = 0; /* not skipped */
2091 dct_linesize = linesize << s->interlaced_dct;
2092 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
2096 dest_cb= s->dest[1];
2097 dest_cr= s->dest[2];
2099 dest_y = s->b_scratchpad;
2100 dest_cb= s->b_scratchpad+16*linesize;
2101 dest_cr= s->b_scratchpad+32*linesize;
2105 /* motion handling */
2106 /* decoding or more than one mb_type (MC was already done otherwise) */
2109 if(HAVE_PTHREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2110 if (s->mv_dir & MV_DIR_FORWARD) {
2111 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
2113 if (s->mv_dir & MV_DIR_BACKWARD) {
2114 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
2119 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2121 if (s->mv_dir & MV_DIR_FORWARD) {
2122 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
2123 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2125 if (s->mv_dir & MV_DIR_BACKWARD) {
2126 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
2129 op_qpix= s->me.qpel_put;
2130 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2131 op_pix = s->dsp.put_pixels_tab;
2133 op_pix = s->dsp.put_no_rnd_pixels_tab;
2135 if (s->mv_dir & MV_DIR_FORWARD) {
2136 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2137 op_pix = s->dsp.avg_pixels_tab;
2138 op_qpix= s->me.qpel_avg;
2140 if (s->mv_dir & MV_DIR_BACKWARD) {
2141 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2146 /* skip dequant / idct if we are really late ;) */
2147 if(s->avctx->skip_idct){
2148 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2149 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2150 || s->avctx->skip_idct >= AVDISCARD_ALL)
2154 /* add dct residue */
2155 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2156 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2157 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2158 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2159 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2160 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2162 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2163 if (s->chroma_y_shift){
2164 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2165 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2169 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2170 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2171 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2172 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2175 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2176 add_dct(s, block[0], 0, dest_y , dct_linesize);
2177 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2178 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2179 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2181 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2182 if(s->chroma_y_shift){//Chroma420
2183 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2184 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2187 dct_linesize = uvlinesize << s->interlaced_dct;
2188 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2190 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2191 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2192 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2193 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2194 if(!s->chroma_x_shift){//Chroma444
2195 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2196 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2197 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2198 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2203 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2204 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2207 /* dct only in intra block */
2208 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2209 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2210 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2211 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2212 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2214 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2215 if(s->chroma_y_shift){
2216 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2217 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2221 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2222 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2223 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2224 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2228 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2229 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2230 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2231 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2233 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2234 if(s->chroma_y_shift){
2235 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2236 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2239 dct_linesize = uvlinesize << s->interlaced_dct;
2240 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2242 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2243 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2244 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2245 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2246 if(!s->chroma_x_shift){//Chroma444
2247 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2248 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2249 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2250 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2258 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2259 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2260 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2265 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2267 if(s->out_format == FMT_MPEG1) {
2268 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2269 else MPV_decode_mb_internal(s, block, 0, 1);
2272 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2273 else MPV_decode_mb_internal(s, block, 0, 0);
2278 * @param h is the normal height, this will be reduced automatically if needed for the last row
2280 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2281 const int field_pic= s->picture_structure != PICT_FRAME;
2287 if (!s->avctx->hwaccel
2288 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2289 && s->unrestricted_mv
2290 && s->current_picture.reference
2292 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2293 int sides = 0, edge_h;
2294 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2295 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2296 if (y==0) sides |= EDGE_TOP;
2297 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2299 edge_h= FFMIN(h, s->v_edge_pos - y);
2301 s->dsp.draw_edges(s->current_picture_ptr->data[0] + y *s->linesize , s->linesize,
2302 s->h_edge_pos , edge_h , EDGE_WIDTH , EDGE_WIDTH , sides);
2303 s->dsp.draw_edges(s->current_picture_ptr->data[1] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
2304 s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2305 s->dsp.draw_edges(s->current_picture_ptr->data[2] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
2306 s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2309 h= FFMIN(h, s->avctx->height - y);
2311 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2313 if (s->avctx->draw_horiz_band) {
2317 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2318 src= (AVFrame*)s->current_picture_ptr;
2319 else if(s->last_picture_ptr)
2320 src= (AVFrame*)s->last_picture_ptr;
2324 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2330 offset[0]= y * s->linesize;
2332 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2338 s->avctx->draw_horiz_band(s->avctx, src, offset,
2339 y, s->picture_structure, h);
2343 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2344 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2345 const int uvlinesize= s->current_picture.linesize[1];
2346 const int mb_size= 4 - s->avctx->lowres;
2348 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2349 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2350 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2351 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2352 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2353 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2354 //block_index is not used by mpeg2, so it is not affected by chroma_format
2356 s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
2357 s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2358 s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2360 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2362 if(s->picture_structure==PICT_FRAME){
2363 s->dest[0] += s->mb_y * linesize << mb_size;
2364 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2365 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2367 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2368 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2369 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2370 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2375 void ff_mpeg_flush(AVCodecContext *avctx){
2377 MpegEncContext *s = avctx->priv_data;
2379 if(s==NULL || s->picture==NULL)
2382 for(i=0; i<s->picture_count; i++){
2383 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
2384 || s->picture[i].type == FF_BUFFER_TYPE_USER))
2385 free_frame_buffer(s, &s->picture[i]);
2387 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2389 s->mb_x= s->mb_y= 0;
2392 s->parse_context.state= -1;
2393 s->parse_context.frame_start_found= 0;
2394 s->parse_context.overread= 0;
2395 s->parse_context.overread_index= 0;
2396 s->parse_context.index= 0;
2397 s->parse_context.last_index= 0;
2398 s->bitstream_buffer_size=0;
2402 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2403 DCTELEM *block, int n, int qscale)
2405 int i, level, nCoeffs;
2406 const uint16_t *quant_matrix;
2408 nCoeffs= s->block_last_index[n];
2411 block[0] = block[0] * s->y_dc_scale;
2413 block[0] = block[0] * s->c_dc_scale;
2414 /* XXX: only mpeg1 */
2415 quant_matrix = s->intra_matrix;
2416 for(i=1;i<=nCoeffs;i++) {
2417 int j= s->intra_scantable.permutated[i];
2422 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2423 level = (level - 1) | 1;
2426 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2427 level = (level - 1) | 1;
2434 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2435 DCTELEM *block, int n, int qscale)
2437 int i, level, nCoeffs;
2438 const uint16_t *quant_matrix;
2440 nCoeffs= s->block_last_index[n];
2442 quant_matrix = s->inter_matrix;
2443 for(i=0; i<=nCoeffs; i++) {
2444 int j= s->intra_scantable.permutated[i];
2449 level = (((level << 1) + 1) * qscale *
2450 ((int) (quant_matrix[j]))) >> 4;
2451 level = (level - 1) | 1;
2454 level = (((level << 1) + 1) * qscale *
2455 ((int) (quant_matrix[j]))) >> 4;
2456 level = (level - 1) | 1;
2463 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2464 DCTELEM *block, int n, int qscale)
2466 int i, level, nCoeffs;
2467 const uint16_t *quant_matrix;
2469 if(s->alternate_scan) nCoeffs= 63;
2470 else nCoeffs= s->block_last_index[n];
2473 block[0] = block[0] * s->y_dc_scale;
2475 block[0] = block[0] * s->c_dc_scale;
2476 quant_matrix = s->intra_matrix;
2477 for(i=1;i<=nCoeffs;i++) {
2478 int j= s->intra_scantable.permutated[i];
2483 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2486 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2493 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2494 DCTELEM *block, int n, int qscale)
2496 int i, level, nCoeffs;
2497 const uint16_t *quant_matrix;
2500 if(s->alternate_scan) nCoeffs= 63;
2501 else nCoeffs= s->block_last_index[n];
2504 block[0] = block[0] * s->y_dc_scale;
2506 block[0] = block[0] * s->c_dc_scale;
2507 quant_matrix = s->intra_matrix;
2508 for(i=1;i<=nCoeffs;i++) {
2509 int j= s->intra_scantable.permutated[i];
2514 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2517 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2526 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2527 DCTELEM *block, int n, int qscale)
2529 int i, level, nCoeffs;
2530 const uint16_t *quant_matrix;
2533 if(s->alternate_scan) nCoeffs= 63;
2534 else nCoeffs= s->block_last_index[n];
2536 quant_matrix = s->inter_matrix;
2537 for(i=0; i<=nCoeffs; i++) {
2538 int j= s->intra_scantable.permutated[i];
2543 level = (((level << 1) + 1) * qscale *
2544 ((int) (quant_matrix[j]))) >> 4;
2547 level = (((level << 1) + 1) * qscale *
2548 ((int) (quant_matrix[j]))) >> 4;
2557 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2558 DCTELEM *block, int n, int qscale)
2560 int i, level, qmul, qadd;
2563 assert(s->block_last_index[n]>=0);
2569 block[0] = block[0] * s->y_dc_scale;
2571 block[0] = block[0] * s->c_dc_scale;
2572 qadd = (qscale - 1) | 1;
2579 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2581 for(i=1; i<=nCoeffs; i++) {
2585 level = level * qmul - qadd;
2587 level = level * qmul + qadd;
2594 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2595 DCTELEM *block, int n, int qscale)
2597 int i, level, qmul, qadd;
2600 assert(s->block_last_index[n]>=0);
2602 qadd = (qscale - 1) | 1;
2605 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2607 for(i=0; i<=nCoeffs; i++) {
2611 level = level * qmul - qadd;
2613 level = level * qmul + qadd;
2621 * set qscale and update qscale dependent variables.
2623 void ff_set_qscale(MpegEncContext * s, int qscale)
2627 else if (qscale > 31)
2631 s->chroma_qscale= s->chroma_qscale_table[qscale];
2633 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2634 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2637 void MPV_report_decode_progress(MpegEncContext *s)
2639 if (s->pict_type != FF_B_TYPE && !s->partitioned_frame && !s->error_occurred)
2640 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);