2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32]={
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
74 const uint8_t ff_mpeg1_dc_scale_table[128]={
75 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 static const uint8_t mpeg2_dc_scale_table1[128]={
83 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128]={
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 static const uint8_t mpeg2_dc_scale_table3[128]={
99 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
107 ff_mpeg1_dc_scale_table,
108 mpeg2_dc_scale_table1,
109 mpeg2_dc_scale_table2,
110 mpeg2_dc_scale_table3,
113 const enum PixelFormat ff_pixfmt_list_420[] = {
118 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
125 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
133 uint32_t tmp= *state << 8;
134 *state= tmp + *(p++);
135 if(tmp == 0x100 || p==end)
140 if (p[-1] > 1 ) p+= 3;
141 else if(p[-2] ) p+= 2;
142 else if(p[-3]|(p[-1]-1)) p++;
155 /* init common dct for both encoder and decoder */
156 av_cold int ff_dct_common_init(MpegEncContext *s)
158 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
159 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
160 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
161 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
162 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
163 if(s->flags & CODEC_FLAG_BITEXACT)
164 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
165 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
168 MPV_common_init_mmx(s);
170 MPV_common_init_axp(s);
172 MPV_common_init_mlib(s);
174 MPV_common_init_mmi(s);
176 MPV_common_init_arm(s);
178 MPV_common_init_altivec(s);
180 MPV_common_init_bfin(s);
183 /* load & permutate scantables
184 note: only wmv uses different ones
186 if(s->alternate_scan){
187 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
190 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
194 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
199 void ff_copy_picture(Picture *dst, Picture *src){
201 dst->f.type= FF_BUFFER_TYPE_COPY;
205 * Release a frame buffer
207 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
209 ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
210 av_freep(&pic->f.hwaccel_picture_private);
214 * Allocate a frame buffer
216 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
220 if (s->avctx->hwaccel) {
221 assert(!pic->hwaccel_picture_private);
222 if (s->avctx->hwaccel->priv_data_size) {
223 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
224 if (!pic->f.hwaccel_picture_private) {
225 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
231 r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
233 if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
234 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
235 r, pic->f.age, pic->f.type, pic->f.data[0]);
236 av_freep(&pic->f.hwaccel_picture_private);
240 if (s->linesize && (s->linesize != pic->f.linesize[0] || s->uvlinesize != pic->f.linesize[1])) {
241 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
242 free_frame_buffer(s, pic);
246 if (pic->f.linesize[1] != pic->f.linesize[2]) {
247 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
248 free_frame_buffer(s, pic);
256 * allocates a Picture
257 * The pixels are allocated/set by calling get_buffer() if shared=0
259 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
260 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
261 const int mb_array_size= s->mb_stride*s->mb_height;
262 const int b8_array_size= s->b8_stride*s->mb_height*2;
263 const int b4_array_size= s->b4_stride*s->mb_height*4;
268 assert(pic->f.data[0]);
269 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
270 pic->f.type = FF_BUFFER_TYPE_SHARED;
272 assert(!pic->f.data[0]);
274 if (alloc_frame_buffer(s, pic) < 0)
277 s->linesize = pic->f.linesize[0];
278 s->uvlinesize = pic->f.linesize[1];
281 if (pic->f.qscale_table == NULL) {
283 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
284 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
285 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
288 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table, mb_array_size * sizeof(uint8_t) + 2, fail) //the +2 is for the slice end check
289 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t) , fail)
290 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
291 pic->f.mb_type = pic->mb_type_base + 2*s->mb_stride + 1;
292 pic->f.qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1;
293 if(s->out_format == FMT_H264){
295 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
296 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
297 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
299 pic->f.motion_subsample_log2 = 2;
300 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
302 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
303 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
304 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
306 pic->f.motion_subsample_log2 = 3;
308 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
309 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff, 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
311 pic->f.qstride = s->mb_stride;
312 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan , 1 * sizeof(AVPanScan), fail)
315 /* It might be nicer if the application would keep track of these
316 * but it would require an API change. */
317 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
318 s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
319 if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B)
320 pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
324 fail: //for the FF_ALLOCZ_OR_GOTO macro
326 free_frame_buffer(s, pic);
331 * deallocates a picture
333 static void free_picture(MpegEncContext *s, Picture *pic){
336 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
337 free_frame_buffer(s, pic);
340 av_freep(&pic->mb_var);
341 av_freep(&pic->mc_mb_var);
342 av_freep(&pic->mb_mean);
343 av_freep(&pic->f.mbskip_table);
344 av_freep(&pic->qscale_table_base);
345 av_freep(&pic->mb_type_base);
346 av_freep(&pic->f.dct_coeff);
347 av_freep(&pic->f.pan_scan);
348 pic->f.mb_type = NULL;
350 av_freep(&pic->motion_val_base[i]);
351 av_freep(&pic->f.ref_index[i]);
354 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
357 pic->f.data[i] = NULL;
363 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
364 int y_size = s->b8_stride * (2 * s->mb_height + 1);
365 int c_size = s->mb_stride * (s->mb_height + 1);
366 int yc_size = y_size + 2 * c_size;
369 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
370 FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
371 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
373 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
374 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
375 s->me.temp= s->me.scratchpad;
376 s->rd_scratchpad= s->me.scratchpad;
377 s->b_scratchpad= s->me.scratchpad;
378 s->obmc_scratchpad= s->me.scratchpad + 16;
380 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
381 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
382 if(s->avctx->noise_reduction){
383 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
386 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
387 s->block= s->blocks[0];
390 s->pblocks[i] = &s->block[i];
393 if (s->out_format == FMT_H263) {
395 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
396 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
397 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
398 s->ac_val[2] = s->ac_val[1] + c_size;
403 return -1; //free() through MPV_common_end()
406 static void free_duplicate_context(MpegEncContext *s){
409 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
410 av_freep(&s->me.scratchpad);
414 s->obmc_scratchpad= NULL;
416 av_freep(&s->dct_error_sum);
417 av_freep(&s->me.map);
418 av_freep(&s->me.score_map);
419 av_freep(&s->blocks);
420 av_freep(&s->ac_val_base);
424 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
425 #define COPY(a) bak->a= src->a
426 COPY(allocated_edge_emu_buffer);
427 COPY(edge_emu_buffer);
432 COPY(obmc_scratchpad);
439 COPY(me.map_generation);
451 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
454 //FIXME copy only needed parts
456 backup_duplicate_context(&bak, dst);
457 memcpy(dst, src, sizeof(MpegEncContext));
458 backup_duplicate_context(dst, &bak);
460 dst->pblocks[i] = &dst->block[i];
462 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
465 int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
467 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
469 if(dst == src || !s1->context_initialized) return 0;
471 //FIXME can parameters change on I-frames? in that case dst may need a reinit
472 if(!s->context_initialized){
473 memcpy(s, s1, sizeof(MpegEncContext));
476 s->picture_range_start += MAX_PICTURE_COUNT;
477 s->picture_range_end += MAX_PICTURE_COUNT;
478 s->bitstream_buffer = NULL;
479 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
484 s->avctx->coded_height = s1->avctx->coded_height;
485 s->avctx->coded_width = s1->avctx->coded_width;
486 s->avctx->width = s1->avctx->width;
487 s->avctx->height = s1->avctx->height;
489 s->coded_picture_number = s1->coded_picture_number;
490 s->picture_number = s1->picture_number;
491 s->input_picture_number = s1->input_picture_number;
493 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
494 memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture);
496 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
497 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
498 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
500 memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
502 //Error/bug resilience
503 s->next_p_frame_damaged = s1->next_p_frame_damaged;
504 s->workaround_bugs = s1->workaround_bugs;
507 memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits);
510 s->max_b_frames = s1->max_b_frames;
511 s->low_delay = s1->low_delay;
512 s->dropable = s1->dropable;
514 //DivX handling (doesn't work)
515 s->divx_packed = s1->divx_packed;
517 if(s1->bitstream_buffer){
518 if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
519 av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size);
520 s->bitstream_buffer_size = s1->bitstream_buffer_size;
521 memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size);
522 memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
525 //MPEG2/interlacing info
526 memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence);
528 if(!s1->first_field){
529 s->last_pict_type= s1->pict_type;
530 if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
532 if(s1->pict_type!=FF_B_TYPE){
533 s->last_non_b_pict_type= s1->pict_type;
541 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
542 * the changed fields will not depend upon the prior state of the MpegEncContext.
544 void MPV_common_defaults(MpegEncContext *s){
546 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
547 s->chroma_qscale_table= ff_default_chroma_qscale_table;
548 s->progressive_frame= 1;
549 s->progressive_sequence= 1;
550 s->picture_structure= PICT_FRAME;
552 s->coded_picture_number = 0;
553 s->picture_number = 0;
554 s->input_picture_number = 0;
556 s->picture_in_gop_number = 0;
561 s->picture_range_start = 0;
562 s->picture_range_end = MAX_PICTURE_COUNT;
566 * sets the given MpegEncContext to defaults for decoding.
567 * the changed fields will not depend upon the prior state of the MpegEncContext.
569 void MPV_decode_defaults(MpegEncContext *s){
570 MPV_common_defaults(s);
574 * init common structure for both encoder and decoder.
575 * this assumes that some variables like width/height are already set
577 av_cold int MPV_common_init(MpegEncContext *s)
579 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y,
580 threads = (s->encoding ||
582 s->avctx->active_thread_type & FF_THREAD_SLICE)) ?
583 s->avctx->thread_count : 1;
585 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
586 s->mb_height = (s->height + 31) / 32 * 2;
587 else if (s->codec_id != CODEC_ID_H264)
588 s->mb_height = (s->height + 15) / 16;
590 if(s->avctx->pix_fmt == PIX_FMT_NONE){
591 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
595 if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
596 (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
597 int max_threads = FFMIN(MAX_THREADS, s->mb_height);
598 av_log(s->avctx, AV_LOG_WARNING, "too many threads (%d), reducing to %d\n",
599 s->avctx->thread_count, max_threads);
600 threads = max_threads;
603 if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
606 dsputil_init(&s->dsp, s->avctx);
607 ff_dct_common_init(s);
609 s->flags= s->avctx->flags;
610 s->flags2= s->avctx->flags2;
612 if (s->width && s->height) {
613 s->mb_width = (s->width + 15) / 16;
614 s->mb_stride = s->mb_width + 1;
615 s->b8_stride = s->mb_width*2 + 1;
616 s->b4_stride = s->mb_width*4 + 1;
617 mb_array_size= s->mb_height * s->mb_stride;
618 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
620 /* set chroma shifts */
621 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
622 &(s->chroma_y_shift) );
624 /* set default edge pos, will be overriden in decode_header if needed */
625 s->h_edge_pos= s->mb_width*16;
626 s->v_edge_pos= s->mb_height*16;
628 s->mb_num = s->mb_width * s->mb_height;
633 s->block_wrap[3]= s->b8_stride;
635 s->block_wrap[5]= s->mb_stride;
637 y_size = s->b8_stride * (2 * s->mb_height + 1);
638 c_size = s->mb_stride * (s->mb_height + 1);
639 yc_size = y_size + 2 * c_size;
641 /* convert fourcc to upper case */
642 s->codec_tag = ff_toupper4(s->avctx->codec_tag);
644 s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
646 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
648 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
649 for(y=0; y<s->mb_height; y++){
650 for(x=0; x<s->mb_width; x++){
651 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
654 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
657 /* Allocate MV tables */
658 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
659 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
660 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
661 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
662 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
663 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
664 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
665 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
666 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
667 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
668 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
669 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
671 if(s->msmpeg4_version){
672 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
674 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
676 /* Allocate MB type table */
677 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
679 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
681 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
682 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
683 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
684 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
685 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
686 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
688 if(s->avctx->noise_reduction){
689 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
694 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
695 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
696 for(i = 0; i < s->picture_count; i++) {
697 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
700 if (s->width && s->height) {
701 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
703 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
704 /* interlaced direct mode decoding tables */
709 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
710 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
712 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
713 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
714 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
716 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
719 if (s->out_format == FMT_H263) {
721 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
722 s->coded_block= s->coded_block_base + s->b8_stride + 1;
724 /* cbp, ac_pred, pred_dir */
725 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
726 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
729 if (s->h263_pred || s->h263_plus || !s->encoding) {
731 //MN: we need these for error resilience of intra-frames
732 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
733 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
734 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
735 s->dc_val[2] = s->dc_val[1] + c_size;
736 for(i=0;i<yc_size;i++)
737 s->dc_val_base[i] = 1024;
740 /* which mb is a intra block */
741 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
742 memset(s->mbintra_table, 1, mb_array_size);
744 /* init macroblock skip table */
745 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
746 //Note the +1 is for a quicker mpeg4 slice_end detection
747 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
749 s->parse_context.state= -1;
750 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
751 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
752 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
753 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
757 s->context_initialized = 1;
758 s->thread_context[0]= s;
760 if (s->width && s->height) {
761 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
762 for(i=1; i<threads; i++){
763 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
764 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
767 for(i=0; i<threads; i++){
768 if(init_duplicate_context(s->thread_context[i], s) < 0)
770 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
771 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
774 if(init_duplicate_context(s, s) < 0) goto fail;
776 s->end_mb_y = s->mb_height;
786 /* init common structure for both encoder and decoder */
787 void MPV_common_end(MpegEncContext *s)
791 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
792 for(i=0; i<s->avctx->thread_count; i++){
793 free_duplicate_context(s->thread_context[i]);
795 for(i=1; i<s->avctx->thread_count; i++){
796 av_freep(&s->thread_context[i]);
798 } else free_duplicate_context(s);
800 av_freep(&s->parse_context.buffer);
801 s->parse_context.buffer_size=0;
803 av_freep(&s->mb_type);
804 av_freep(&s->p_mv_table_base);
805 av_freep(&s->b_forw_mv_table_base);
806 av_freep(&s->b_back_mv_table_base);
807 av_freep(&s->b_bidir_forw_mv_table_base);
808 av_freep(&s->b_bidir_back_mv_table_base);
809 av_freep(&s->b_direct_mv_table_base);
811 s->b_forw_mv_table= NULL;
812 s->b_back_mv_table= NULL;
813 s->b_bidir_forw_mv_table= NULL;
814 s->b_bidir_back_mv_table= NULL;
815 s->b_direct_mv_table= NULL;
819 av_freep(&s->b_field_mv_table_base[i][j][k]);
820 s->b_field_mv_table[i][j][k]=NULL;
822 av_freep(&s->b_field_select_table[i][j]);
823 av_freep(&s->p_field_mv_table_base[i][j]);
824 s->p_field_mv_table[i][j]=NULL;
826 av_freep(&s->p_field_select_table[i]);
829 av_freep(&s->dc_val_base);
830 av_freep(&s->coded_block_base);
831 av_freep(&s->mbintra_table);
832 av_freep(&s->cbp_table);
833 av_freep(&s->pred_dir_table);
835 av_freep(&s->mbskip_table);
836 av_freep(&s->prev_pict_types);
837 av_freep(&s->bitstream_buffer);
838 s->allocated_bitstream_buffer_size=0;
840 av_freep(&s->avctx->stats_out);
841 av_freep(&s->ac_stats);
842 av_freep(&s->error_status_table);
843 av_freep(&s->mb_index2xy);
844 av_freep(&s->lambda_table);
845 av_freep(&s->q_intra_matrix);
846 av_freep(&s->q_inter_matrix);
847 av_freep(&s->q_intra_matrix16);
848 av_freep(&s->q_inter_matrix16);
849 av_freep(&s->input_picture);
850 av_freep(&s->reordered_input_picture);
851 av_freep(&s->dct_offset);
853 if(s->picture && !s->avctx->is_copy){
854 for(i=0; i<s->picture_count; i++){
855 free_picture(s, &s->picture[i]);
858 av_freep(&s->picture);
859 s->context_initialized = 0;
862 s->current_picture_ptr= NULL;
863 s->linesize= s->uvlinesize= 0;
866 av_freep(&s->visualization_buffer[i]);
868 if(!(s->avctx->active_thread_type&FF_THREAD_FRAME))
869 avcodec_default_free_buffers(s->avctx);
872 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
874 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
875 uint8_t index_run[MAX_RUN+1];
876 int last, run, level, start, end, i;
878 /* If table is static, we can quit if rl->max_level[0] is not NULL */
879 if(static_store && rl->max_level[0])
882 /* compute max_level[], max_run[] and index_run[] */
883 for(last=0;last<2;last++) {
892 memset(max_level, 0, MAX_RUN + 1);
893 memset(max_run, 0, MAX_LEVEL + 1);
894 memset(index_run, rl->n, MAX_RUN + 1);
895 for(i=start;i<end;i++) {
896 run = rl->table_run[i];
897 level = rl->table_level[i];
898 if (index_run[run] == rl->n)
900 if (level > max_level[run])
901 max_level[run] = level;
902 if (run > max_run[level])
903 max_run[level] = run;
906 rl->max_level[last] = static_store[last];
908 rl->max_level[last] = av_malloc(MAX_RUN + 1);
909 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
911 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
913 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
914 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
916 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
918 rl->index_run[last] = av_malloc(MAX_RUN + 1);
919 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
923 void init_vlc_rl(RLTable *rl)
935 for(i=0; i<rl->vlc.table_size; i++){
936 int code= rl->vlc.table[i][0];
937 int len = rl->vlc.table[i][1];
940 if(len==0){ // illegal code
943 }else if(len<0){ //more bits needed
947 if(code==rl->n){ //esc
951 run= rl->table_run [code] + 1;
952 level= rl->table_level[code] * qmul + qadd;
953 if(code >= rl->last) run+=192;
956 rl->rl_vlc[q][i].len= len;
957 rl->rl_vlc[q][i].level= level;
958 rl->rl_vlc[q][i].run= run;
963 void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
967 /* release non reference frames */
968 for(i=0; i<s->picture_count; i++){
969 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
970 && (!s->picture[i].owner2 || s->picture[i].owner2 == s)
971 && (remove_current || &s->picture[i] != s->current_picture_ptr)
972 /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
973 free_frame_buffer(s, &s->picture[i]);
978 int ff_find_unused_picture(MpegEncContext *s, int shared){
982 for(i=s->picture_range_start; i<s->picture_range_end; i++){
983 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
987 for(i=s->picture_range_start; i<s->picture_range_end; i++){
988 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
991 for(i=s->picture_range_start; i<s->picture_range_end; i++){
992 if (s->picture[i].f.data[0] == NULL)
997 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
998 /* We could return -1, but the codec would crash trying to draw into a
999 * non-existing frame anyway. This is safer than waiting for a random crash.
1000 * Also the return of this is never useful, an encoder must only allocate
1001 * as much as allowed in the specification. This has no relationship to how
1002 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1003 * enough for such valid streams).
1004 * Plus, a decoder has to check stream validity and remove frames if too
1005 * many reference frames are around. Waiting for "OOM" is not correct at
1006 * all. Similarly, missing reference frames have to be replaced by
1007 * interpolated/MC frames, anything else is a bug in the codec ...
1013 static void update_noise_reduction(MpegEncContext *s){
1016 for(intra=0; intra<2; intra++){
1017 if(s->dct_count[intra] > (1<<16)){
1018 for(i=0; i<64; i++){
1019 s->dct_error_sum[intra][i] >>=1;
1021 s->dct_count[intra] >>= 1;
1024 for(i=0; i<64; i++){
1025 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1031 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1033 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1039 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1041 /* mark&release old frames */
1042 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.data[0]) {
1043 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1044 free_frame_buffer(s, s->last_picture_ptr);
1046 /* release forgotten pictures */
1047 /* if(mpeg124/h263) */
1049 for(i=0; i<s->picture_count; i++){
1050 if (s->picture[i].f.data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].f.reference) {
1051 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1052 free_frame_buffer(s, &s->picture[i]);
1060 ff_release_unused_pictures(s, 1);
1062 if (s->current_picture_ptr && s->current_picture_ptr->f.data[0] == NULL)
1063 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
1065 i= ff_find_unused_picture(s, 0);
1066 pic= &s->picture[i];
1069 pic->f.reference = 0;
1071 if (s->codec_id == CODEC_ID_H264)
1072 pic->f.reference = s->picture_structure;
1073 else if (s->pict_type != AV_PICTURE_TYPE_B)
1074 pic->f.reference = 3;
1077 pic->f.coded_picture_number = s->coded_picture_number++;
1079 if(ff_alloc_picture(s, pic, 0) < 0)
1082 s->current_picture_ptr= pic;
1083 //FIXME use only the vars from current_pic
1084 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1085 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1086 if(s->picture_structure != PICT_FRAME)
1087 s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1089 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence;
1090 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1093 s->current_picture_ptr->f.pict_type = s->pict_type;
1094 // if(s->flags && CODEC_FLAG_QSCALE)
1095 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1096 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1098 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1100 if (s->pict_type != AV_PICTURE_TYPE_B) {
1101 s->last_picture_ptr= s->next_picture_ptr;
1103 s->next_picture_ptr= s->current_picture_ptr;
1105 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1106 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1107 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1108 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1109 s->pict_type, s->dropable);*/
1111 if(s->codec_id != CODEC_ID_H264){
1112 if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.data[0] == NULL) &&
1113 (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
1114 if (s->pict_type != AV_PICTURE_TYPE_I)
1115 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1116 else if (s->picture_structure != PICT_FRAME)
1117 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
1119 /* Allocate a dummy frame */
1120 i= ff_find_unused_picture(s, 0);
1121 s->last_picture_ptr= &s->picture[i];
1122 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1124 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
1125 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
1127 if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) {
1128 /* Allocate a dummy frame */
1129 i= ff_find_unused_picture(s, 0);
1130 s->next_picture_ptr= &s->picture[i];
1131 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1133 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
1134 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
1138 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1139 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1141 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.data[0]));
1143 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1146 if(s->picture_structure == PICT_BOTTOM_FIELD){
1147 s->current_picture.f.data[i] += s->current_picture.f.linesize[i];
1149 s->current_picture.f.linesize[i] *= 2;
1150 s->last_picture.f.linesize[i] *= 2;
1151 s->next_picture.f.linesize[i] *= 2;
1155 s->error_recognition= avctx->error_recognition;
1157 /* set dequantizer, we can't do it during init as it might change for mpeg4
1158 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1159 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1160 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1161 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1162 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1163 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1164 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1166 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1167 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1170 if(s->dct_error_sum){
1171 assert(s->avctx->noise_reduction && s->encoding);
1173 update_noise_reduction(s);
1176 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1177 return ff_xvmc_field_start(s, avctx);
1182 /* generic function for encode/decode called after a frame has been coded/decoded */
1183 void MPV_frame_end(MpegEncContext *s)
1186 /* redraw edges for the frame if decoding didn't complete */
1187 //just to make sure that all data is rendered.
1188 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1189 ff_xvmc_field_end(s);
1190 }else if((s->error_count || s->encoding)
1191 && !s->avctx->hwaccel
1192 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1193 && s->unrestricted_mv
1194 && s->current_picture.f.reference
1196 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1197 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1198 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1199 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1200 s->h_edge_pos , s->v_edge_pos,
1201 EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
1202 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1203 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1204 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1205 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1206 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1207 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1212 s->last_pict_type = s->pict_type;
1213 s->last_lambda_for[s->pict_type] = s->current_picture_ptr->f.quality;
1214 if(s->pict_type!=AV_PICTURE_TYPE_B){
1215 s->last_non_b_pict_type= s->pict_type;
1218 /* copy back current_picture variables */
1219 for(i=0; i<MAX_PICTURE_COUNT; i++){
1220 if(s->picture[i].f.data[0] == s->current_picture.f.data[0]){
1221 s->picture[i]= s->current_picture;
1225 assert(i<MAX_PICTURE_COUNT);
1229 /* release non-reference frames */
1230 for(i=0; i<s->picture_count; i++){
1231 if (s->picture[i].f.data[0] && !s->picture[i].f.reference /*&& s->picture[i].type != FF_BUFFER_TYPE_SHARED*/) {
1232 free_frame_buffer(s, &s->picture[i]);
1236 // clear copies, to avoid confusion
1238 memset(&s->last_picture, 0, sizeof(Picture));
1239 memset(&s->next_picture, 0, sizeof(Picture));
1240 memset(&s->current_picture, 0, sizeof(Picture));
1242 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1244 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1245 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
1250 * draws an line from (ex, ey) -> (sx, sy).
1251 * @param w width of the image
1252 * @param h height of the image
1253 * @param stride stride/linesize of the image
1254 * @param color color of the arrow
1256 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1259 sx= av_clip(sx, 0, w-1);
1260 sy= av_clip(sy, 0, h-1);
1261 ex= av_clip(ex, 0, w-1);
1262 ey= av_clip(ey, 0, h-1);
1264 buf[sy*stride + sx]+= color;
1266 if(FFABS(ex - sx) > FFABS(ey - sy)){
1268 FFSWAP(int, sx, ex);
1269 FFSWAP(int, sy, ey);
1271 buf+= sx + sy*stride;
1273 f= ((ey-sy)<<16)/ex;
1274 for(x= 0; x <= ex; x++){
1277 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1278 buf[(y+1)*stride + x]+= (color* fr )>>16;
1282 FFSWAP(int, sx, ex);
1283 FFSWAP(int, sy, ey);
1285 buf+= sx + sy*stride;
1287 if(ey) f= ((ex-sx)<<16)/ey;
1289 for(y= 0; y <= ey; y++){
1292 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1293 buf[y*stride + x+1]+= (color* fr )>>16;
1299 * draws an arrow from (ex, ey) -> (sx, sy).
1300 * @param w width of the image
1301 * @param h height of the image
1302 * @param stride stride/linesize of the image
1303 * @param color color of the arrow
1305 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1308 sx= av_clip(sx, -100, w+100);
1309 sy= av_clip(sy, -100, h+100);
1310 ex= av_clip(ex, -100, w+100);
1311 ey= av_clip(ey, -100, h+100);
1316 if(dx*dx + dy*dy > 3*3){
1319 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1321 //FIXME subpixel accuracy
1322 rx= ROUNDED_DIV(rx*3<<4, length);
1323 ry= ROUNDED_DIV(ry*3<<4, length);
1325 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1326 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1328 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1332 * prints debuging info for the given picture.
1334 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1336 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1338 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1341 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1342 switch (pict->pict_type) {
1343 case AV_PICTURE_TYPE_I: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1344 case AV_PICTURE_TYPE_P: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1345 case AV_PICTURE_TYPE_B: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1346 case AV_PICTURE_TYPE_S: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1347 case AV_PICTURE_TYPE_SI: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1348 case AV_PICTURE_TYPE_SP: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1350 for(y=0; y<s->mb_height; y++){
1351 for(x=0; x<s->mb_width; x++){
1352 if(s->avctx->debug&FF_DEBUG_SKIP){
1353 int count= s->mbskip_table[x + y*s->mb_stride];
1354 if(count>9) count=9;
1355 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1357 if(s->avctx->debug&FF_DEBUG_QP){
1358 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1360 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1361 int mb_type= pict->mb_type[x + y*s->mb_stride];
1362 //Type & MV direction
1364 av_log(s->avctx, AV_LOG_DEBUG, "P");
1365 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1366 av_log(s->avctx, AV_LOG_DEBUG, "A");
1367 else if(IS_INTRA4x4(mb_type))
1368 av_log(s->avctx, AV_LOG_DEBUG, "i");
1369 else if(IS_INTRA16x16(mb_type))
1370 av_log(s->avctx, AV_LOG_DEBUG, "I");
1371 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1372 av_log(s->avctx, AV_LOG_DEBUG, "d");
1373 else if(IS_DIRECT(mb_type))
1374 av_log(s->avctx, AV_LOG_DEBUG, "D");
1375 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1376 av_log(s->avctx, AV_LOG_DEBUG, "g");
1377 else if(IS_GMC(mb_type))
1378 av_log(s->avctx, AV_LOG_DEBUG, "G");
1379 else if(IS_SKIP(mb_type))
1380 av_log(s->avctx, AV_LOG_DEBUG, "S");
1381 else if(!USES_LIST(mb_type, 1))
1382 av_log(s->avctx, AV_LOG_DEBUG, ">");
1383 else if(!USES_LIST(mb_type, 0))
1384 av_log(s->avctx, AV_LOG_DEBUG, "<");
1386 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1387 av_log(s->avctx, AV_LOG_DEBUG, "X");
1392 av_log(s->avctx, AV_LOG_DEBUG, "+");
1393 else if(IS_16X8(mb_type))
1394 av_log(s->avctx, AV_LOG_DEBUG, "-");
1395 else if(IS_8X16(mb_type))
1396 av_log(s->avctx, AV_LOG_DEBUG, "|");
1397 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1398 av_log(s->avctx, AV_LOG_DEBUG, " ");
1400 av_log(s->avctx, AV_LOG_DEBUG, "?");
1403 if(IS_INTERLACED(mb_type))
1404 av_log(s->avctx, AV_LOG_DEBUG, "=");
1406 av_log(s->avctx, AV_LOG_DEBUG, " ");
1408 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1410 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1414 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1415 const int shift= 1 + s->quarter_sample;
1419 int h_chroma_shift, v_chroma_shift, block_height;
1420 const int width = s->avctx->width;
1421 const int height= s->avctx->height;
1422 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1423 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1424 s->low_delay=0; //needed to see the vectors without trashing the buffers
1426 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1428 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1429 pict->data[i]= s->visualization_buffer[i];
1431 pict->type= FF_BUFFER_TYPE_COPY;
1433 block_height = 16>>v_chroma_shift;
1435 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1437 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1438 const int mb_index= mb_x + mb_y*s->mb_stride;
1439 if((s->avctx->debug_mv) && pict->motion_val){
1441 for(type=0; type<3; type++){
1444 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
1448 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1452 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1457 if(!USES_LIST(pict->mb_type[mb_index], direction))
1460 if(IS_8X8(pict->mb_type[mb_index])){
1463 int sx= mb_x*16 + 4 + 8*(i&1);
1464 int sy= mb_y*16 + 4 + 8*(i>>1);
1465 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1466 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1467 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1468 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1470 }else if(IS_16X8(pict->mb_type[mb_index])){
1474 int sy=mb_y*16 + 4 + 8*i;
1475 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1476 int mx=(pict->motion_val[direction][xy][0]>>shift);
1477 int my=(pict->motion_val[direction][xy][1]>>shift);
1479 if(IS_INTERLACED(pict->mb_type[mb_index]))
1482 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1484 }else if(IS_8X16(pict->mb_type[mb_index])){
1487 int sx=mb_x*16 + 4 + 8*i;
1489 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1490 int mx=(pict->motion_val[direction][xy][0]>>shift);
1491 int my=(pict->motion_val[direction][xy][1]>>shift);
1493 if(IS_INTERLACED(pict->mb_type[mb_index]))
1496 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1499 int sx= mb_x*16 + 8;
1500 int sy= mb_y*16 + 8;
1501 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1502 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1503 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1504 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1508 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1509 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1511 for(y=0; y<block_height; y++){
1512 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1513 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1516 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1517 int mb_type= pict->mb_type[mb_index];
1520 #define COLOR(theta, r)\
1521 u= (int)(128 + r*cos(theta*3.141592/180));\
1522 v= (int)(128 + r*sin(theta*3.141592/180));
1526 if(IS_PCM(mb_type)){
1528 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1530 }else if(IS_INTRA4x4(mb_type)){
1532 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1534 }else if(IS_DIRECT(mb_type)){
1536 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1538 }else if(IS_GMC(mb_type)){
1540 }else if(IS_SKIP(mb_type)){
1542 }else if(!USES_LIST(mb_type, 1)){
1544 }else if(!USES_LIST(mb_type, 0)){
1547 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1551 u*= 0x0101010101010101ULL;
1552 v*= 0x0101010101010101ULL;
1553 for(y=0; y<block_height; y++){
1554 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1555 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1559 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1560 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1561 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1563 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1565 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1567 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1568 int dm= 1 << (mv_sample_log2-2);
1570 int sx= mb_x*16 + 8*(i&1);
1571 int sy= mb_y*16 + 8*(i>>1);
1572 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1574 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1575 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1577 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1578 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1579 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1583 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1587 s->mbskip_table[mb_index]=0;
1593 static inline int hpel_motion_lowres(MpegEncContext *s,
1594 uint8_t *dest, uint8_t *src,
1595 int field_based, int field_select,
1596 int src_x, int src_y,
1597 int width, int height, int stride,
1598 int h_edge_pos, int v_edge_pos,
1599 int w, int h, h264_chroma_mc_func *pix_op,
1600 int motion_x, int motion_y)
1602 const int lowres= s->avctx->lowres;
1603 const int op_index= FFMIN(lowres, 2);
1604 const int s_mask= (2<<lowres)-1;
1608 if(s->quarter_sample){
1613 sx= motion_x & s_mask;
1614 sy= motion_y & s_mask;
1615 src_x += motion_x >> (lowres+1);
1616 src_y += motion_y >> (lowres+1);
1618 src += src_y * stride + src_x;
1620 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1621 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1622 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1623 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1624 src= s->edge_emu_buffer;
1628 sx= (sx << 2) >> lowres;
1629 sy= (sy << 2) >> lowres;
1632 pix_op[op_index](dest, src, stride, h, sx, sy);
1636 /* apply one mpeg motion vector to the three components */
1637 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1638 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1639 int field_based, int bottom_field, int field_select,
1640 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1641 int motion_x, int motion_y, int h, int mb_y)
1643 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1644 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1645 const int lowres= s->avctx->lowres;
1646 const int op_index= FFMIN(lowres, 2);
1647 const int block_s= 8>>lowres;
1648 const int s_mask= (2<<lowres)-1;
1649 const int h_edge_pos = s->h_edge_pos >> lowres;
1650 const int v_edge_pos = s->v_edge_pos >> lowres;
1651 linesize = s->current_picture.f.linesize[0] << field_based;
1652 uvlinesize = s->current_picture.f.linesize[1] << field_based;
1654 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1660 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1663 sx= motion_x & s_mask;
1664 sy= motion_y & s_mask;
1665 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1666 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1668 if (s->out_format == FMT_H263) {
1669 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1670 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1673 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1676 uvsx = (2*mx) & s_mask;
1677 uvsy = (2*my) & s_mask;
1678 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1679 uvsrc_y = mb_y*block_s + (my >> lowres);
1685 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1686 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1689 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1690 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1691 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1693 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1694 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1695 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1696 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1697 ptr_y = s->edge_emu_buffer;
1698 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1699 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1700 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1701 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1702 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1703 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1709 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
1710 dest_y += s->linesize;
1711 dest_cb+= s->uvlinesize;
1712 dest_cr+= s->uvlinesize;
1716 ptr_y += s->linesize;
1717 ptr_cb+= s->uvlinesize;
1718 ptr_cr+= s->uvlinesize;
1721 sx= (sx << 2) >> lowres;
1722 sy= (sy << 2) >> lowres;
1723 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1725 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1726 uvsx= (uvsx << 2) >> lowres;
1727 uvsy= (uvsy << 2) >> lowres;
1728 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1729 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1731 //FIXME h261 lowres loop filter
1734 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1735 uint8_t *dest_cb, uint8_t *dest_cr,
1736 uint8_t **ref_picture,
1737 h264_chroma_mc_func *pix_op,
1739 const int lowres= s->avctx->lowres;
1740 const int op_index= FFMIN(lowres, 2);
1741 const int block_s= 8>>lowres;
1742 const int s_mask= (2<<lowres)-1;
1743 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1744 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1745 int emu=0, src_x, src_y, offset, sx, sy;
1748 if(s->quarter_sample){
1753 /* In case of 8X8, we construct a single chroma motion vector
1754 with a special rounding */
1755 mx= ff_h263_round_chroma(mx);
1756 my= ff_h263_round_chroma(my);
1760 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1761 src_y = s->mb_y*block_s + (my >> (lowres+1));
1763 offset = src_y * s->uvlinesize + src_x;
1764 ptr = ref_picture[1] + offset;
1765 if(s->flags&CODEC_FLAG_EMU_EDGE){
1766 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1767 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1768 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1769 ptr= s->edge_emu_buffer;
1773 sx= (sx << 2) >> lowres;
1774 sy= (sy << 2) >> lowres;
1775 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1777 ptr = ref_picture[2] + offset;
1779 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1780 ptr= s->edge_emu_buffer;
1782 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1786 * motion compensation of a single macroblock
1788 * @param dest_y luma destination pointer
1789 * @param dest_cb chroma cb/u destination pointer
1790 * @param dest_cr chroma cr/v destination pointer
1791 * @param dir direction (0->forward, 1->backward)
1792 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1793 * @param pix_op halfpel motion compensation function (average or put normally)
1794 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1796 static inline void MPV_motion_lowres(MpegEncContext *s,
1797 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1798 int dir, uint8_t **ref_picture,
1799 h264_chroma_mc_func *pix_op)
1803 const int lowres= s->avctx->lowres;
1804 const int block_s= 8>>lowres;
1809 switch(s->mv_type) {
1811 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1813 ref_picture, pix_op,
1814 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1820 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1821 ref_picture[0], 0, 0,
1822 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1823 s->width, s->height, s->linesize,
1824 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1825 block_s, block_s, pix_op,
1826 s->mv[dir][i][0], s->mv[dir][i][1]);
1828 mx += s->mv[dir][i][0];
1829 my += s->mv[dir][i][1];
1832 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1833 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1836 if (s->picture_structure == PICT_FRAME) {
1838 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1839 1, 0, s->field_select[dir][0],
1840 ref_picture, pix_op,
1841 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1843 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1844 1, 1, s->field_select[dir][1],
1845 ref_picture, pix_op,
1846 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1848 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
1849 ref_picture = s->current_picture_ptr->f.data;
1852 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1853 0, 0, s->field_select[dir][0],
1854 ref_picture, pix_op,
1855 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1860 uint8_t ** ref2picture;
1862 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
1863 ref2picture= ref_picture;
1865 ref2picture = s->current_picture_ptr->f.data;
1868 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1869 0, 0, s->field_select[dir][i],
1870 ref2picture, pix_op,
1871 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1873 dest_y += 2*block_s*s->linesize;
1874 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1875 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1879 if(s->picture_structure == PICT_FRAME){
1883 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1885 ref_picture, pix_op,
1886 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1888 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1892 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1893 0, 0, s->picture_structure != i+1,
1894 ref_picture, pix_op,
1895 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1897 // after put we make avg of the same block
1898 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1900 //opposite parity is always in the same frame if this is second field
1901 if(!s->first_field){
1902 ref_picture = s->current_picture_ptr->f.data;
1912 * find the lowest MB row referenced in the MVs
1914 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1916 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1917 int my, off, i, mvs;
1919 if (s->picture_structure != PICT_FRAME) goto unhandled;
1921 switch (s->mv_type) {
1935 for (i = 0; i < mvs; i++) {
1936 my = s->mv[dir][i][1]<<qpel_shift;
1937 my_max = FFMAX(my_max, my);
1938 my_min = FFMIN(my_min, my);
1941 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1943 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1945 return s->mb_height-1;
1948 /* put block[] to dest[] */
1949 static inline void put_dct(MpegEncContext *s,
1950 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1952 s->dct_unquantize_intra(s, block, i, qscale);
1953 s->dsp.idct_put (dest, line_size, block);
1956 /* add block[] to dest[] */
1957 static inline void add_dct(MpegEncContext *s,
1958 DCTELEM *block, int i, uint8_t *dest, int line_size)
1960 if (s->block_last_index[i] >= 0) {
1961 s->dsp.idct_add (dest, line_size, block);
1965 static inline void add_dequant_dct(MpegEncContext *s,
1966 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1968 if (s->block_last_index[i] >= 0) {
1969 s->dct_unquantize_inter(s, block, i, qscale);
1971 s->dsp.idct_add (dest, line_size, block);
1976 * cleans dc, ac, coded_block for the current non intra MB
1978 void ff_clean_intra_table_entries(MpegEncContext *s)
1980 int wrap = s->b8_stride;
1981 int xy = s->block_index[0];
1984 s->dc_val[0][xy + 1 ] =
1985 s->dc_val[0][xy + wrap] =
1986 s->dc_val[0][xy + 1 + wrap] = 1024;
1988 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1989 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1990 if (s->msmpeg4_version>=3) {
1991 s->coded_block[xy ] =
1992 s->coded_block[xy + 1 ] =
1993 s->coded_block[xy + wrap] =
1994 s->coded_block[xy + 1 + wrap] = 0;
1997 wrap = s->mb_stride;
1998 xy = s->mb_x + s->mb_y * wrap;
2000 s->dc_val[2][xy] = 1024;
2002 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2003 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2005 s->mbintra_table[xy]= 0;
2008 /* generic function called after a macroblock has been parsed by the
2009 decoder or after it has been encoded by the encoder.
2011 Important variables used:
2012 s->mb_intra : true if intra macroblock
2013 s->mv_dir : motion vector direction
2014 s->mv_type : motion vector type
2015 s->mv : motion vector
2016 s->interlaced_dct : true if interlaced dct used (mpeg2)
2018 static av_always_inline
2019 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2020 int lowres_flag, int is_mpeg12)
2022 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2023 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2024 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2028 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2029 /* save DCT coefficients */
2031 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2032 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2034 for(j=0; j<64; j++){
2035 *dct++ = block[i][s->dsp.idct_permutation[j]];
2036 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2038 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2042 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2044 /* update DC predictors for P macroblocks */
2046 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2047 if(s->mbintra_table[mb_xy])
2048 ff_clean_intra_table_entries(s);
2052 s->last_dc[2] = 128 << s->intra_dc_precision;
2055 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2056 s->mbintra_table[mb_xy]=1;
2058 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2059 uint8_t *dest_y, *dest_cb, *dest_cr;
2060 int dct_linesize, dct_offset;
2061 op_pixels_func (*op_pix)[4];
2062 qpel_mc_func (*op_qpix)[16];
2063 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2064 const int uvlinesize = s->current_picture.f.linesize[1];
2065 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2066 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2068 /* avoid copy if macroblock skipped in last frame too */
2069 /* skip only during decoding as we might trash the buffers during encoding a bit */
2071 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2072 const int age = s->current_picture.f.age;
2076 if (s->mb_skipped) {
2078 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2080 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
2081 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2083 /* if previous was skipped too, then nothing to do ! */
2084 if (*mbskip_ptr >= age && s->current_picture.f.reference){
2087 } else if(!s->current_picture.f.reference) {
2088 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2089 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2091 *mbskip_ptr = 0; /* not skipped */
2095 dct_linesize = linesize << s->interlaced_dct;
2096 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
2100 dest_cb= s->dest[1];
2101 dest_cr= s->dest[2];
2103 dest_y = s->b_scratchpad;
2104 dest_cb= s->b_scratchpad+16*linesize;
2105 dest_cr= s->b_scratchpad+32*linesize;
2109 /* motion handling */
2110 /* decoding or more than one mb_type (MC was already done otherwise) */
2113 if(HAVE_PTHREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2114 if (s->mv_dir & MV_DIR_FORWARD) {
2115 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
2117 if (s->mv_dir & MV_DIR_BACKWARD) {
2118 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
2123 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2125 if (s->mv_dir & MV_DIR_FORWARD) {
2126 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2127 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2129 if (s->mv_dir & MV_DIR_BACKWARD) {
2130 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2133 op_qpix= s->me.qpel_put;
2134 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2135 op_pix = s->dsp.put_pixels_tab;
2137 op_pix = s->dsp.put_no_rnd_pixels_tab;
2139 if (s->mv_dir & MV_DIR_FORWARD) {
2140 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2141 op_pix = s->dsp.avg_pixels_tab;
2142 op_qpix= s->me.qpel_avg;
2144 if (s->mv_dir & MV_DIR_BACKWARD) {
2145 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2150 /* skip dequant / idct if we are really late ;) */
2151 if(s->avctx->skip_idct){
2152 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2153 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2154 || s->avctx->skip_idct >= AVDISCARD_ALL)
2158 /* add dct residue */
2159 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2160 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2161 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2162 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2163 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2164 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2166 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2167 if (s->chroma_y_shift){
2168 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2169 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2173 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2174 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2175 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2176 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2179 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2180 add_dct(s, block[0], 0, dest_y , dct_linesize);
2181 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2182 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2183 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2185 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2186 if(s->chroma_y_shift){//Chroma420
2187 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2188 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2191 dct_linesize = uvlinesize << s->interlaced_dct;
2192 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2194 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2195 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2196 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2197 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2198 if(!s->chroma_x_shift){//Chroma444
2199 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2200 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2201 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2202 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2207 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2208 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2211 /* dct only in intra block */
2212 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2213 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2214 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2215 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2216 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2218 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2219 if(s->chroma_y_shift){
2220 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2221 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2225 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2226 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2227 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2228 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2232 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2233 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2234 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2235 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2237 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2238 if(s->chroma_y_shift){
2239 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2240 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2243 dct_linesize = uvlinesize << s->interlaced_dct;
2244 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2246 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2247 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2248 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2249 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2250 if(!s->chroma_x_shift){//Chroma444
2251 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2252 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2253 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2254 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2262 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2263 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2264 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2269 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2271 if(s->out_format == FMT_MPEG1) {
2272 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2273 else MPV_decode_mb_internal(s, block, 0, 1);
2276 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2277 else MPV_decode_mb_internal(s, block, 0, 0);
2282 * @param h is the normal height, this will be reduced automatically if needed for the last row
2284 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2285 const int field_pic= s->picture_structure != PICT_FRAME;
2291 if (!s->avctx->hwaccel
2292 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2293 && s->unrestricted_mv
2294 && s->current_picture.f.reference
2296 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2297 int sides = 0, edge_h;
2298 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2299 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2300 if (y==0) sides |= EDGE_TOP;
2301 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2303 edge_h= FFMIN(h, s->v_edge_pos - y);
2305 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize , s->linesize,
2306 s->h_edge_pos , edge_h , EDGE_WIDTH , EDGE_WIDTH , sides);
2307 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
2308 s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2309 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
2310 s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2313 h= FFMIN(h, s->avctx->height - y);
2315 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2317 if (s->avctx->draw_horiz_band) {
2321 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2322 src= (AVFrame*)s->current_picture_ptr;
2323 else if(s->last_picture_ptr)
2324 src= (AVFrame*)s->last_picture_ptr;
2328 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2334 offset[0]= y * s->linesize;
2336 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2342 s->avctx->draw_horiz_band(s->avctx, src, offset,
2343 y, s->picture_structure, h);
2347 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2348 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2349 const int uvlinesize = s->current_picture.f.linesize[1];
2350 const int mb_size= 4 - s->avctx->lowres;
2352 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2353 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2354 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2355 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2356 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2357 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2358 //block_index is not used by mpeg2, so it is not affected by chroma_format
2360 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2361 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2362 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2364 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2366 if(s->picture_structure==PICT_FRAME){
2367 s->dest[0] += s->mb_y * linesize << mb_size;
2368 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2369 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2371 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2372 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2373 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2374 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2379 void ff_mpeg_flush(AVCodecContext *avctx){
2381 MpegEncContext *s = avctx->priv_data;
2383 if(s==NULL || s->picture==NULL)
2386 for(i=0; i<s->picture_count; i++){
2387 if (s->picture[i].f.data[0] &&
2388 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2389 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2390 free_frame_buffer(s, &s->picture[i]);
2392 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2394 s->mb_x= s->mb_y= 0;
2397 s->parse_context.state= -1;
2398 s->parse_context.frame_start_found= 0;
2399 s->parse_context.overread= 0;
2400 s->parse_context.overread_index= 0;
2401 s->parse_context.index= 0;
2402 s->parse_context.last_index= 0;
2403 s->bitstream_buffer_size=0;
2407 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2408 DCTELEM *block, int n, int qscale)
2410 int i, level, nCoeffs;
2411 const uint16_t *quant_matrix;
2413 nCoeffs= s->block_last_index[n];
2416 block[0] = block[0] * s->y_dc_scale;
2418 block[0] = block[0] * s->c_dc_scale;
2419 /* XXX: only mpeg1 */
2420 quant_matrix = s->intra_matrix;
2421 for(i=1;i<=nCoeffs;i++) {
2422 int j= s->intra_scantable.permutated[i];
2427 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2428 level = (level - 1) | 1;
2431 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2432 level = (level - 1) | 1;
2439 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2440 DCTELEM *block, int n, int qscale)
2442 int i, level, nCoeffs;
2443 const uint16_t *quant_matrix;
2445 nCoeffs= s->block_last_index[n];
2447 quant_matrix = s->inter_matrix;
2448 for(i=0; i<=nCoeffs; i++) {
2449 int j= s->intra_scantable.permutated[i];
2454 level = (((level << 1) + 1) * qscale *
2455 ((int) (quant_matrix[j]))) >> 4;
2456 level = (level - 1) | 1;
2459 level = (((level << 1) + 1) * qscale *
2460 ((int) (quant_matrix[j]))) >> 4;
2461 level = (level - 1) | 1;
2468 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2469 DCTELEM *block, int n, int qscale)
2471 int i, level, nCoeffs;
2472 const uint16_t *quant_matrix;
2474 if(s->alternate_scan) nCoeffs= 63;
2475 else nCoeffs= s->block_last_index[n];
2478 block[0] = block[0] * s->y_dc_scale;
2480 block[0] = block[0] * s->c_dc_scale;
2481 quant_matrix = s->intra_matrix;
2482 for(i=1;i<=nCoeffs;i++) {
2483 int j= s->intra_scantable.permutated[i];
2488 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2491 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2498 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2499 DCTELEM *block, int n, int qscale)
2501 int i, level, nCoeffs;
2502 const uint16_t *quant_matrix;
2505 if(s->alternate_scan) nCoeffs= 63;
2506 else nCoeffs= s->block_last_index[n];
2509 block[0] = block[0] * s->y_dc_scale;
2511 block[0] = block[0] * s->c_dc_scale;
2512 quant_matrix = s->intra_matrix;
2513 for(i=1;i<=nCoeffs;i++) {
2514 int j= s->intra_scantable.permutated[i];
2519 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2522 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2531 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2532 DCTELEM *block, int n, int qscale)
2534 int i, level, nCoeffs;
2535 const uint16_t *quant_matrix;
2538 if(s->alternate_scan) nCoeffs= 63;
2539 else nCoeffs= s->block_last_index[n];
2541 quant_matrix = s->inter_matrix;
2542 for(i=0; i<=nCoeffs; i++) {
2543 int j= s->intra_scantable.permutated[i];
2548 level = (((level << 1) + 1) * qscale *
2549 ((int) (quant_matrix[j]))) >> 4;
2552 level = (((level << 1) + 1) * qscale *
2553 ((int) (quant_matrix[j]))) >> 4;
2562 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2563 DCTELEM *block, int n, int qscale)
2565 int i, level, qmul, qadd;
2568 assert(s->block_last_index[n]>=0);
2574 block[0] = block[0] * s->y_dc_scale;
2576 block[0] = block[0] * s->c_dc_scale;
2577 qadd = (qscale - 1) | 1;
2584 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2586 for(i=1; i<=nCoeffs; i++) {
2590 level = level * qmul - qadd;
2592 level = level * qmul + qadd;
2599 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2600 DCTELEM *block, int n, int qscale)
2602 int i, level, qmul, qadd;
2605 assert(s->block_last_index[n]>=0);
2607 qadd = (qscale - 1) | 1;
2610 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2612 for(i=0; i<=nCoeffs; i++) {
2616 level = level * qmul - qadd;
2618 level = level * qmul + qadd;
2626 * set qscale and update qscale dependent variables.
2628 void ff_set_qscale(MpegEncContext * s, int qscale)
2632 else if (qscale > 31)
2636 s->chroma_qscale= s->chroma_qscale_table[qscale];
2638 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2639 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2642 void MPV_report_decode_progress(MpegEncContext *s)
2644 if (s->pict_type != FF_B_TYPE && !s->partitioned_frame && !s->error_occurred)
2645 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);