2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32]={
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
74 const uint8_t ff_mpeg1_dc_scale_table[128]={
75 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 static const uint8_t mpeg2_dc_scale_table1[128]={
83 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128]={
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 static const uint8_t mpeg2_dc_scale_table3[128]={
99 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
107 ff_mpeg1_dc_scale_table,
108 mpeg2_dc_scale_table1,
109 mpeg2_dc_scale_table2,
110 mpeg2_dc_scale_table3,
113 const enum PixelFormat ff_pixfmt_list_420[] = {
118 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
125 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
133 uint32_t tmp= *state << 8;
134 *state= tmp + *(p++);
135 if(tmp == 0x100 || p==end)
140 if (p[-1] > 1 ) p+= 3;
141 else if(p[-2] ) p+= 2;
142 else if(p[-3]|(p[-1]-1)) p++;
155 /* init common dct for both encoder and decoder */
156 av_cold int ff_dct_common_init(MpegEncContext *s)
158 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
159 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
160 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
161 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
162 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
163 if(s->flags & CODEC_FLAG_BITEXACT)
164 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
165 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
168 MPV_common_init_mmx(s);
170 MPV_common_init_axp(s);
172 MPV_common_init_mlib(s);
174 MPV_common_init_mmi(s);
176 MPV_common_init_arm(s);
178 MPV_common_init_altivec(s);
180 MPV_common_init_bfin(s);
183 /* load & permutate scantables
184 note: only wmv uses different ones
186 if(s->alternate_scan){
187 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
190 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
194 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
199 void ff_copy_picture(Picture *dst, Picture *src){
201 dst->f.type= FF_BUFFER_TYPE_COPY;
205 * Release a frame buffer
207 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
209 ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
210 av_freep(&pic->f.hwaccel_picture_private);
214 * Allocate a frame buffer
216 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
220 if (s->avctx->hwaccel) {
221 assert(!pic->hwaccel_picture_private);
222 if (s->avctx->hwaccel->priv_data_size) {
223 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
224 if (!pic->f.hwaccel_picture_private) {
225 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
231 r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
233 if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
234 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
235 r, pic->f.age, pic->f.type, pic->f.data[0]);
236 av_freep(&pic->f.hwaccel_picture_private);
240 if (s->linesize && (s->linesize != pic->f.linesize[0] || s->uvlinesize != pic->f.linesize[1])) {
241 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
242 free_frame_buffer(s, pic);
246 if (pic->f.linesize[1] != pic->f.linesize[2]) {
247 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
248 free_frame_buffer(s, pic);
256 * allocates a Picture
257 * The pixels are allocated/set by calling get_buffer() if shared=0
259 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
260 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
261 const int mb_array_size= s->mb_stride*s->mb_height;
262 const int b8_array_size= s->b8_stride*s->mb_height*2;
263 const int b4_array_size= s->b4_stride*s->mb_height*4;
268 assert(pic->f.data[0]);
269 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
270 pic->f.type = FF_BUFFER_TYPE_SHARED;
272 assert(!pic->f.data[0]);
274 if (alloc_frame_buffer(s, pic) < 0)
277 s->linesize = pic->f.linesize[0];
278 s->uvlinesize = pic->f.linesize[1];
281 if (pic->f.qscale_table == NULL) {
283 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
284 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
285 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
288 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table, mb_array_size * sizeof(uint8_t) + 2, fail) //the +2 is for the slice end check
289 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t) , fail)
290 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
291 pic->f.mb_type = pic->mb_type_base + 2*s->mb_stride + 1;
292 pic->f.qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1;
293 if(s->out_format == FMT_H264){
295 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
296 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
297 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
299 pic->f.motion_subsample_log2 = 2;
300 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
302 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
303 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
304 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
306 pic->f.motion_subsample_log2 = 3;
308 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
309 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff, 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
311 pic->f.qstride = s->mb_stride;
312 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan , 1 * sizeof(AVPanScan), fail)
315 /* It might be nicer if the application would keep track of these
316 * but it would require an API change. */
317 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
318 s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
319 if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B)
320 pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
324 fail: //for the FF_ALLOCZ_OR_GOTO macro
326 free_frame_buffer(s, pic);
331 * deallocates a picture
333 static void free_picture(MpegEncContext *s, Picture *pic){
336 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
337 free_frame_buffer(s, pic);
340 av_freep(&pic->mb_var);
341 av_freep(&pic->mc_mb_var);
342 av_freep(&pic->mb_mean);
343 av_freep(&pic->f.mbskip_table);
344 av_freep(&pic->qscale_table_base);
345 av_freep(&pic->mb_type_base);
346 av_freep(&pic->f.dct_coeff);
347 av_freep(&pic->f.pan_scan);
348 pic->f.mb_type = NULL;
350 av_freep(&pic->motion_val_base[i]);
351 av_freep(&pic->f.ref_index[i]);
354 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
357 pic->f.data[i] = NULL;
363 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
364 int y_size = s->b8_stride * (2 * s->mb_height + 1);
365 int c_size = s->mb_stride * (s->mb_height + 1);
366 int yc_size = y_size + 2 * c_size;
369 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
370 FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
371 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
373 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
374 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
375 s->me.temp= s->me.scratchpad;
376 s->rd_scratchpad= s->me.scratchpad;
377 s->b_scratchpad= s->me.scratchpad;
378 s->obmc_scratchpad= s->me.scratchpad + 16;
380 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
381 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
382 if(s->avctx->noise_reduction){
383 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
386 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
387 s->block= s->blocks[0];
390 s->pblocks[i] = &s->block[i];
393 if (s->out_format == FMT_H263) {
395 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
396 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
397 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
398 s->ac_val[2] = s->ac_val[1] + c_size;
403 return -1; //free() through MPV_common_end()
406 static void free_duplicate_context(MpegEncContext *s){
409 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
410 av_freep(&s->me.scratchpad);
414 s->obmc_scratchpad= NULL;
416 av_freep(&s->dct_error_sum);
417 av_freep(&s->me.map);
418 av_freep(&s->me.score_map);
419 av_freep(&s->blocks);
420 av_freep(&s->ac_val_base);
424 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
425 #define COPY(a) bak->a= src->a
426 COPY(allocated_edge_emu_buffer);
427 COPY(edge_emu_buffer);
432 COPY(obmc_scratchpad);
439 COPY(me.map_generation);
451 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
454 //FIXME copy only needed parts
456 backup_duplicate_context(&bak, dst);
457 memcpy(dst, src, sizeof(MpegEncContext));
458 backup_duplicate_context(dst, &bak);
460 dst->pblocks[i] = &dst->block[i];
462 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
465 int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
467 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
469 if(dst == src || !s1->context_initialized) return 0;
471 //FIXME can parameters change on I-frames? in that case dst may need a reinit
472 if(!s->context_initialized){
473 memcpy(s, s1, sizeof(MpegEncContext));
476 s->picture_range_start += MAX_PICTURE_COUNT;
477 s->picture_range_end += MAX_PICTURE_COUNT;
478 s->bitstream_buffer = NULL;
479 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
484 s->avctx->coded_height = s1->avctx->coded_height;
485 s->avctx->coded_width = s1->avctx->coded_width;
486 s->avctx->width = s1->avctx->width;
487 s->avctx->height = s1->avctx->height;
489 s->coded_picture_number = s1->coded_picture_number;
490 s->picture_number = s1->picture_number;
491 s->input_picture_number = s1->input_picture_number;
493 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
494 memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture);
496 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
497 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
498 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
500 memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
502 //Error/bug resilience
503 s->next_p_frame_damaged = s1->next_p_frame_damaged;
504 s->workaround_bugs = s1->workaround_bugs;
507 memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits);
510 s->max_b_frames = s1->max_b_frames;
511 s->low_delay = s1->low_delay;
512 s->dropable = s1->dropable;
514 //DivX handling (doesn't work)
515 s->divx_packed = s1->divx_packed;
517 if(s1->bitstream_buffer){
518 if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
519 av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size);
520 s->bitstream_buffer_size = s1->bitstream_buffer_size;
521 memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size);
522 memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
525 //MPEG2/interlacing info
526 memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence);
528 if(!s1->first_field){
529 s->last_pict_type= s1->pict_type;
530 if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
532 if(s1->pict_type!=FF_B_TYPE){
533 s->last_non_b_pict_type= s1->pict_type;
541 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
542 * the changed fields will not depend upon the prior state of the MpegEncContext.
544 void MPV_common_defaults(MpegEncContext *s){
546 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
547 s->chroma_qscale_table= ff_default_chroma_qscale_table;
548 s->progressive_frame= 1;
549 s->progressive_sequence= 1;
550 s->picture_structure= PICT_FRAME;
552 s->coded_picture_number = 0;
553 s->picture_number = 0;
554 s->input_picture_number = 0;
556 s->picture_in_gop_number = 0;
561 s->picture_range_start = 0;
562 s->picture_range_end = MAX_PICTURE_COUNT;
566 * sets the given MpegEncContext to defaults for decoding.
567 * the changed fields will not depend upon the prior state of the MpegEncContext.
569 void MPV_decode_defaults(MpegEncContext *s){
570 MPV_common_defaults(s);
574 * init common structure for both encoder and decoder.
575 * this assumes that some variables like width/height are already set
577 av_cold int MPV_common_init(MpegEncContext *s)
579 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y,
580 threads = (s->encoding ||
582 s->avctx->active_thread_type & FF_THREAD_SLICE)) ?
583 s->avctx->thread_count : 1;
585 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
586 s->mb_height = (s->height + 31) / 32 * 2;
587 else if (s->codec_id != CODEC_ID_H264)
588 s->mb_height = (s->height + 15) / 16;
590 if(s->avctx->pix_fmt == PIX_FMT_NONE){
591 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
595 if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
596 (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
597 int max_threads = FFMIN(MAX_THREADS, s->mb_height);
598 av_log(s->avctx, AV_LOG_WARNING, "too many threads (%d), reducing to %d\n",
599 s->avctx->thread_count, max_threads);
600 threads = max_threads;
603 if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
606 dsputil_init(&s->dsp, s->avctx);
607 ff_dct_common_init(s);
609 s->flags= s->avctx->flags;
610 s->flags2= s->avctx->flags2;
612 s->mb_width = (s->width + 15) / 16;
613 s->mb_stride = s->mb_width + 1;
614 s->b8_stride = s->mb_width*2 + 1;
615 s->b4_stride = s->mb_width*4 + 1;
616 mb_array_size= s->mb_height * s->mb_stride;
617 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
619 /* set chroma shifts */
620 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
621 &(s->chroma_y_shift) );
623 /* set default edge pos, will be overriden in decode_header if needed */
624 s->h_edge_pos= s->mb_width*16;
625 s->v_edge_pos= s->mb_height*16;
627 s->mb_num = s->mb_width * s->mb_height;
632 s->block_wrap[3]= s->b8_stride;
634 s->block_wrap[5]= s->mb_stride;
636 y_size = s->b8_stride * (2 * s->mb_height + 1);
637 c_size = s->mb_stride * (s->mb_height + 1);
638 yc_size = y_size + 2 * c_size;
640 /* convert fourcc to upper case */
641 s->codec_tag = ff_toupper4(s->avctx->codec_tag);
643 s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
645 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
647 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
648 for(y=0; y<s->mb_height; y++){
649 for(x=0; x<s->mb_width; x++){
650 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
653 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
656 /* Allocate MV tables */
657 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
658 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
659 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
660 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
661 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
662 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
663 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
664 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
665 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
666 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
667 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
668 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
670 if(s->msmpeg4_version){
671 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
673 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
675 /* Allocate MB type table */
676 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
678 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
680 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
681 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
682 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
683 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
684 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
685 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
687 if(s->avctx->noise_reduction){
688 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
692 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
693 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
694 for(i = 0; i < s->picture_count; i++) {
695 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
698 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
700 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
701 /* interlaced direct mode decoding tables */
706 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
707 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
709 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
710 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
711 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
713 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
716 if (s->out_format == FMT_H263) {
718 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
719 s->coded_block= s->coded_block_base + s->b8_stride + 1;
721 /* cbp, ac_pred, pred_dir */
722 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
723 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
726 if (s->h263_pred || s->h263_plus || !s->encoding) {
728 //MN: we need these for error resilience of intra-frames
729 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
730 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
731 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
732 s->dc_val[2] = s->dc_val[1] + c_size;
733 for(i=0;i<yc_size;i++)
734 s->dc_val_base[i] = 1024;
737 /* which mb is a intra block */
738 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
739 memset(s->mbintra_table, 1, mb_array_size);
741 /* init macroblock skip table */
742 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
743 //Note the +1 is for a quicker mpeg4 slice_end detection
744 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
746 s->parse_context.state= -1;
747 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
748 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
749 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
750 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
753 s->context_initialized = 1;
754 s->thread_context[0]= s;
756 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
757 for(i=1; i<threads; i++){
758 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
759 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
762 for(i=0; i<threads; i++){
763 if(init_duplicate_context(s->thread_context[i], s) < 0)
765 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
766 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
769 if(init_duplicate_context(s, s) < 0) goto fail;
771 s->end_mb_y = s->mb_height;
781 /* init common structure for both encoder and decoder */
782 void MPV_common_end(MpegEncContext *s)
786 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
787 for(i=0; i<s->avctx->thread_count; i++){
788 free_duplicate_context(s->thread_context[i]);
790 for(i=1; i<s->avctx->thread_count; i++){
791 av_freep(&s->thread_context[i]);
793 } else free_duplicate_context(s);
795 av_freep(&s->parse_context.buffer);
796 s->parse_context.buffer_size=0;
798 av_freep(&s->mb_type);
799 av_freep(&s->p_mv_table_base);
800 av_freep(&s->b_forw_mv_table_base);
801 av_freep(&s->b_back_mv_table_base);
802 av_freep(&s->b_bidir_forw_mv_table_base);
803 av_freep(&s->b_bidir_back_mv_table_base);
804 av_freep(&s->b_direct_mv_table_base);
806 s->b_forw_mv_table= NULL;
807 s->b_back_mv_table= NULL;
808 s->b_bidir_forw_mv_table= NULL;
809 s->b_bidir_back_mv_table= NULL;
810 s->b_direct_mv_table= NULL;
814 av_freep(&s->b_field_mv_table_base[i][j][k]);
815 s->b_field_mv_table[i][j][k]=NULL;
817 av_freep(&s->b_field_select_table[i][j]);
818 av_freep(&s->p_field_mv_table_base[i][j]);
819 s->p_field_mv_table[i][j]=NULL;
821 av_freep(&s->p_field_select_table[i]);
824 av_freep(&s->dc_val_base);
825 av_freep(&s->coded_block_base);
826 av_freep(&s->mbintra_table);
827 av_freep(&s->cbp_table);
828 av_freep(&s->pred_dir_table);
830 av_freep(&s->mbskip_table);
831 av_freep(&s->prev_pict_types);
832 av_freep(&s->bitstream_buffer);
833 s->allocated_bitstream_buffer_size=0;
835 av_freep(&s->avctx->stats_out);
836 av_freep(&s->ac_stats);
837 av_freep(&s->error_status_table);
838 av_freep(&s->mb_index2xy);
839 av_freep(&s->lambda_table);
840 av_freep(&s->q_intra_matrix);
841 av_freep(&s->q_inter_matrix);
842 av_freep(&s->q_intra_matrix16);
843 av_freep(&s->q_inter_matrix16);
844 av_freep(&s->input_picture);
845 av_freep(&s->reordered_input_picture);
846 av_freep(&s->dct_offset);
848 if(s->picture && !s->avctx->is_copy){
849 for(i=0; i<s->picture_count; i++){
850 free_picture(s, &s->picture[i]);
853 av_freep(&s->picture);
854 s->context_initialized = 0;
857 s->current_picture_ptr= NULL;
858 s->linesize= s->uvlinesize= 0;
861 av_freep(&s->visualization_buffer[i]);
863 if(!(s->avctx->active_thread_type&FF_THREAD_FRAME))
864 avcodec_default_free_buffers(s->avctx);
867 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
869 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
870 uint8_t index_run[MAX_RUN+1];
871 int last, run, level, start, end, i;
873 /* If table is static, we can quit if rl->max_level[0] is not NULL */
874 if(static_store && rl->max_level[0])
877 /* compute max_level[], max_run[] and index_run[] */
878 for(last=0;last<2;last++) {
887 memset(max_level, 0, MAX_RUN + 1);
888 memset(max_run, 0, MAX_LEVEL + 1);
889 memset(index_run, rl->n, MAX_RUN + 1);
890 for(i=start;i<end;i++) {
891 run = rl->table_run[i];
892 level = rl->table_level[i];
893 if (index_run[run] == rl->n)
895 if (level > max_level[run])
896 max_level[run] = level;
897 if (run > max_run[level])
898 max_run[level] = run;
901 rl->max_level[last] = static_store[last];
903 rl->max_level[last] = av_malloc(MAX_RUN + 1);
904 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
906 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
908 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
909 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
911 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
913 rl->index_run[last] = av_malloc(MAX_RUN + 1);
914 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
918 void init_vlc_rl(RLTable *rl)
930 for(i=0; i<rl->vlc.table_size; i++){
931 int code= rl->vlc.table[i][0];
932 int len = rl->vlc.table[i][1];
935 if(len==0){ // illegal code
938 }else if(len<0){ //more bits needed
942 if(code==rl->n){ //esc
946 run= rl->table_run [code] + 1;
947 level= rl->table_level[code] * qmul + qadd;
948 if(code >= rl->last) run+=192;
951 rl->rl_vlc[q][i].len= len;
952 rl->rl_vlc[q][i].level= level;
953 rl->rl_vlc[q][i].run= run;
958 void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
962 /* release non reference frames */
963 for(i=0; i<s->picture_count; i++){
964 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
965 && (!s->picture[i].owner2 || s->picture[i].owner2 == s)
966 && (remove_current || &s->picture[i] != s->current_picture_ptr)
967 /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
968 free_frame_buffer(s, &s->picture[i]);
973 int ff_find_unused_picture(MpegEncContext *s, int shared){
977 for(i=s->picture_range_start; i<s->picture_range_end; i++){
978 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
982 for(i=s->picture_range_start; i<s->picture_range_end; i++){
983 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
986 for(i=s->picture_range_start; i<s->picture_range_end; i++){
987 if (s->picture[i].f.data[0] == NULL)
992 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
993 /* We could return -1, but the codec would crash trying to draw into a
994 * non-existing frame anyway. This is safer than waiting for a random crash.
995 * Also the return of this is never useful, an encoder must only allocate
996 * as much as allowed in the specification. This has no relationship to how
997 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
998 * enough for such valid streams).
999 * Plus, a decoder has to check stream validity and remove frames if too
1000 * many reference frames are around. Waiting for "OOM" is not correct at
1001 * all. Similarly, missing reference frames have to be replaced by
1002 * interpolated/MC frames, anything else is a bug in the codec ...
1008 static void update_noise_reduction(MpegEncContext *s){
1011 for(intra=0; intra<2; intra++){
1012 if(s->dct_count[intra] > (1<<16)){
1013 for(i=0; i<64; i++){
1014 s->dct_error_sum[intra][i] >>=1;
1016 s->dct_count[intra] >>= 1;
1019 for(i=0; i<64; i++){
1020 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1026 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1028 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1034 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1036 /* mark&release old frames */
1037 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.data[0]) {
1038 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1039 free_frame_buffer(s, s->last_picture_ptr);
1041 /* release forgotten pictures */
1042 /* if(mpeg124/h263) */
1044 for(i=0; i<s->picture_count; i++){
1045 if (s->picture[i].f.data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].f.reference) {
1046 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1047 free_frame_buffer(s, &s->picture[i]);
1055 ff_release_unused_pictures(s, 1);
1057 if (s->current_picture_ptr && s->current_picture_ptr->f.data[0] == NULL)
1058 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
1060 i= ff_find_unused_picture(s, 0);
1061 pic= &s->picture[i];
1064 pic->f.reference = 0;
1066 if (s->codec_id == CODEC_ID_H264)
1067 pic->f.reference = s->picture_structure;
1068 else if (s->pict_type != AV_PICTURE_TYPE_B)
1069 pic->f.reference = 3;
1072 pic->f.coded_picture_number = s->coded_picture_number++;
1074 if(ff_alloc_picture(s, pic, 0) < 0)
1077 s->current_picture_ptr= pic;
1078 //FIXME use only the vars from current_pic
1079 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1080 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1081 if(s->picture_structure != PICT_FRAME)
1082 s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1084 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence;
1085 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1088 s->current_picture_ptr->f.pict_type = s->pict_type;
1089 // if(s->flags && CODEC_FLAG_QSCALE)
1090 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1091 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1093 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1095 if (s->pict_type != AV_PICTURE_TYPE_B) {
1096 s->last_picture_ptr= s->next_picture_ptr;
1098 s->next_picture_ptr= s->current_picture_ptr;
1100 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1101 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1102 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1103 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1104 s->pict_type, s->dropable);*/
1106 if(s->codec_id != CODEC_ID_H264){
1107 if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.data[0] == NULL) &&
1108 (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
1109 if (s->pict_type != AV_PICTURE_TYPE_I)
1110 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1111 else if (s->picture_structure != PICT_FRAME)
1112 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
1114 /* Allocate a dummy frame */
1115 i= ff_find_unused_picture(s, 0);
1116 s->last_picture_ptr= &s->picture[i];
1117 s->last_picture_ptr->f.key_frame = 0;
1118 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1120 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
1121 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
1123 if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) {
1124 /* Allocate a dummy frame */
1125 i= ff_find_unused_picture(s, 0);
1126 s->next_picture_ptr= &s->picture[i];
1127 s->next_picture_ptr->f.key_frame = 0;
1128 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1130 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
1131 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
1135 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1136 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1138 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.data[0]));
1140 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1143 if(s->picture_structure == PICT_BOTTOM_FIELD){
1144 s->current_picture.f.data[i] += s->current_picture.f.linesize[i];
1146 s->current_picture.f.linesize[i] *= 2;
1147 s->last_picture.f.linesize[i] *= 2;
1148 s->next_picture.f.linesize[i] *= 2;
1152 s->error_recognition= avctx->error_recognition;
1154 /* set dequantizer, we can't do it during init as it might change for mpeg4
1155 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1156 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1157 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1158 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1159 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1160 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1161 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1163 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1164 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1167 if(s->dct_error_sum){
1168 assert(s->avctx->noise_reduction && s->encoding);
1170 update_noise_reduction(s);
1173 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1174 return ff_xvmc_field_start(s, avctx);
1179 /* generic function for encode/decode called after a frame has been coded/decoded */
1180 void MPV_frame_end(MpegEncContext *s)
1183 /* redraw edges for the frame if decoding didn't complete */
1184 //just to make sure that all data is rendered.
1185 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1186 ff_xvmc_field_end(s);
1187 }else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND))
1188 && !s->avctx->hwaccel
1189 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1190 && s->unrestricted_mv
1191 && s->current_picture.f.reference
1193 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1194 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1195 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1196 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1197 s->h_edge_pos , s->v_edge_pos,
1198 EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
1199 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1200 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1201 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1202 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1203 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1204 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1209 s->last_pict_type = s->pict_type;
1210 s->last_lambda_for[s->pict_type] = s->current_picture_ptr->f.quality;
1211 if(s->pict_type!=AV_PICTURE_TYPE_B){
1212 s->last_non_b_pict_type= s->pict_type;
1215 /* copy back current_picture variables */
1216 for(i=0; i<MAX_PICTURE_COUNT; i++){
1217 if(s->picture[i].f.data[0] == s->current_picture.f.data[0]){
1218 s->picture[i]= s->current_picture;
1222 assert(i<MAX_PICTURE_COUNT);
1226 /* release non-reference frames */
1227 for(i=0; i<s->picture_count; i++){
1228 if (s->picture[i].f.data[0] && !s->picture[i].f.reference /*&& s->picture[i].type != FF_BUFFER_TYPE_SHARED*/) {
1229 free_frame_buffer(s, &s->picture[i]);
1233 // clear copies, to avoid confusion
1235 memset(&s->last_picture, 0, sizeof(Picture));
1236 memset(&s->next_picture, 0, sizeof(Picture));
1237 memset(&s->current_picture, 0, sizeof(Picture));
1239 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1241 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1242 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
1247 * draws an line from (ex, ey) -> (sx, sy).
1248 * @param w width of the image
1249 * @param h height of the image
1250 * @param stride stride/linesize of the image
1251 * @param color color of the arrow
1253 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1256 sx= av_clip(sx, 0, w-1);
1257 sy= av_clip(sy, 0, h-1);
1258 ex= av_clip(ex, 0, w-1);
1259 ey= av_clip(ey, 0, h-1);
1261 buf[sy*stride + sx]+= color;
1263 if(FFABS(ex - sx) > FFABS(ey - sy)){
1265 FFSWAP(int, sx, ex);
1266 FFSWAP(int, sy, ey);
1268 buf+= sx + sy*stride;
1270 f= ((ey-sy)<<16)/ex;
1271 for(x= 0; x <= ex; x++){
1274 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1275 buf[(y+1)*stride + x]+= (color* fr )>>16;
1279 FFSWAP(int, sx, ex);
1280 FFSWAP(int, sy, ey);
1282 buf+= sx + sy*stride;
1284 if(ey) f= ((ex-sx)<<16)/ey;
1286 for(y= 0; y <= ey; y++){
1289 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1290 buf[y*stride + x+1]+= (color* fr )>>16;
1296 * draws an arrow from (ex, ey) -> (sx, sy).
1297 * @param w width of the image
1298 * @param h height of the image
1299 * @param stride stride/linesize of the image
1300 * @param color color of the arrow
1302 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1305 sx= av_clip(sx, -100, w+100);
1306 sy= av_clip(sy, -100, h+100);
1307 ex= av_clip(ex, -100, w+100);
1308 ey= av_clip(ey, -100, h+100);
1313 if(dx*dx + dy*dy > 3*3){
1316 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1318 //FIXME subpixel accuracy
1319 rx= ROUNDED_DIV(rx*3<<4, length);
1320 ry= ROUNDED_DIV(ry*3<<4, length);
1322 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1323 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1325 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1329 * prints debuging info for the given picture.
1331 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1333 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1335 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1338 av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1339 av_get_picture_type_char(pict->pict_type));
1340 for(y=0; y<s->mb_height; y++){
1341 for(x=0; x<s->mb_width; x++){
1342 if(s->avctx->debug&FF_DEBUG_SKIP){
1343 int count= s->mbskip_table[x + y*s->mb_stride];
1344 if(count>9) count=9;
1345 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1347 if(s->avctx->debug&FF_DEBUG_QP){
1348 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1350 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1351 int mb_type= pict->mb_type[x + y*s->mb_stride];
1352 //Type & MV direction
1354 av_log(s->avctx, AV_LOG_DEBUG, "P");
1355 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1356 av_log(s->avctx, AV_LOG_DEBUG, "A");
1357 else if(IS_INTRA4x4(mb_type))
1358 av_log(s->avctx, AV_LOG_DEBUG, "i");
1359 else if(IS_INTRA16x16(mb_type))
1360 av_log(s->avctx, AV_LOG_DEBUG, "I");
1361 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1362 av_log(s->avctx, AV_LOG_DEBUG, "d");
1363 else if(IS_DIRECT(mb_type))
1364 av_log(s->avctx, AV_LOG_DEBUG, "D");
1365 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1366 av_log(s->avctx, AV_LOG_DEBUG, "g");
1367 else if(IS_GMC(mb_type))
1368 av_log(s->avctx, AV_LOG_DEBUG, "G");
1369 else if(IS_SKIP(mb_type))
1370 av_log(s->avctx, AV_LOG_DEBUG, "S");
1371 else if(!USES_LIST(mb_type, 1))
1372 av_log(s->avctx, AV_LOG_DEBUG, ">");
1373 else if(!USES_LIST(mb_type, 0))
1374 av_log(s->avctx, AV_LOG_DEBUG, "<");
1376 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1377 av_log(s->avctx, AV_LOG_DEBUG, "X");
1382 av_log(s->avctx, AV_LOG_DEBUG, "+");
1383 else if(IS_16X8(mb_type))
1384 av_log(s->avctx, AV_LOG_DEBUG, "-");
1385 else if(IS_8X16(mb_type))
1386 av_log(s->avctx, AV_LOG_DEBUG, "|");
1387 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1388 av_log(s->avctx, AV_LOG_DEBUG, " ");
1390 av_log(s->avctx, AV_LOG_DEBUG, "?");
1393 if(IS_INTERLACED(mb_type))
1394 av_log(s->avctx, AV_LOG_DEBUG, "=");
1396 av_log(s->avctx, AV_LOG_DEBUG, " ");
1398 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1400 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1404 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1405 const int shift= 1 + s->quarter_sample;
1409 int h_chroma_shift, v_chroma_shift, block_height;
1410 const int width = s->avctx->width;
1411 const int height= s->avctx->height;
1412 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1413 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1414 s->low_delay=0; //needed to see the vectors without trashing the buffers
1416 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1418 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1419 pict->data[i]= s->visualization_buffer[i];
1421 pict->type= FF_BUFFER_TYPE_COPY;
1424 block_height = 16>>v_chroma_shift;
1426 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1428 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1429 const int mb_index= mb_x + mb_y*s->mb_stride;
1430 if((s->avctx->debug_mv) && pict->motion_val){
1432 for(type=0; type<3; type++){
1435 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
1439 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1443 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1448 if(!USES_LIST(pict->mb_type[mb_index], direction))
1451 if(IS_8X8(pict->mb_type[mb_index])){
1454 int sx= mb_x*16 + 4 + 8*(i&1);
1455 int sy= mb_y*16 + 4 + 8*(i>>1);
1456 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1457 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1458 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1459 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1461 }else if(IS_16X8(pict->mb_type[mb_index])){
1465 int sy=mb_y*16 + 4 + 8*i;
1466 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1467 int mx=(pict->motion_val[direction][xy][0]>>shift);
1468 int my=(pict->motion_val[direction][xy][1]>>shift);
1470 if(IS_INTERLACED(pict->mb_type[mb_index]))
1473 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1475 }else if(IS_8X16(pict->mb_type[mb_index])){
1478 int sx=mb_x*16 + 4 + 8*i;
1480 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1481 int mx=(pict->motion_val[direction][xy][0]>>shift);
1482 int my=(pict->motion_val[direction][xy][1]>>shift);
1484 if(IS_INTERLACED(pict->mb_type[mb_index]))
1487 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1490 int sx= mb_x*16 + 8;
1491 int sy= mb_y*16 + 8;
1492 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1493 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1494 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1495 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1499 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1500 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1502 for(y=0; y<block_height; y++){
1503 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1504 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1507 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1508 int mb_type= pict->mb_type[mb_index];
1511 #define COLOR(theta, r)\
1512 u= (int)(128 + r*cos(theta*3.141592/180));\
1513 v= (int)(128 + r*sin(theta*3.141592/180));
1517 if(IS_PCM(mb_type)){
1519 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1521 }else if(IS_INTRA4x4(mb_type)){
1523 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1525 }else if(IS_DIRECT(mb_type)){
1527 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1529 }else if(IS_GMC(mb_type)){
1531 }else if(IS_SKIP(mb_type)){
1533 }else if(!USES_LIST(mb_type, 1)){
1535 }else if(!USES_LIST(mb_type, 0)){
1538 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1542 u*= 0x0101010101010101ULL;
1543 v*= 0x0101010101010101ULL;
1544 for(y=0; y<block_height; y++){
1545 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1546 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1550 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1551 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1552 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1554 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1556 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1558 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1559 int dm= 1 << (mv_sample_log2-2);
1561 int sx= mb_x*16 + 8*(i&1);
1562 int sy= mb_y*16 + 8*(i>>1);
1563 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1565 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1566 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1568 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1569 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1570 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1574 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1578 s->mbskip_table[mb_index]=0;
1584 static inline int hpel_motion_lowres(MpegEncContext *s,
1585 uint8_t *dest, uint8_t *src,
1586 int field_based, int field_select,
1587 int src_x, int src_y,
1588 int width, int height, int stride,
1589 int h_edge_pos, int v_edge_pos,
1590 int w, int h, h264_chroma_mc_func *pix_op,
1591 int motion_x, int motion_y)
1593 const int lowres= s->avctx->lowres;
1594 const int op_index= FFMIN(lowres, 2);
1595 const int s_mask= (2<<lowres)-1;
1599 if(s->quarter_sample){
1604 sx= motion_x & s_mask;
1605 sy= motion_y & s_mask;
1606 src_x += motion_x >> (lowres+1);
1607 src_y += motion_y >> (lowres+1);
1609 src += src_y * stride + src_x;
1611 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1612 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1613 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1614 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1615 src= s->edge_emu_buffer;
1619 sx= (sx << 2) >> lowres;
1620 sy= (sy << 2) >> lowres;
1623 pix_op[op_index](dest, src, stride, h, sx, sy);
1627 /* apply one mpeg motion vector to the three components */
1628 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1629 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1630 int field_based, int bottom_field, int field_select,
1631 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1632 int motion_x, int motion_y, int h, int mb_y)
1634 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1635 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1636 const int lowres= s->avctx->lowres;
1637 const int op_index= FFMIN(lowres-1+s->chroma_x_shift, 2);
1638 const int block_s= 8>>lowres;
1639 const int s_mask= (2<<lowres)-1;
1640 const int h_edge_pos = s->h_edge_pos >> lowres;
1641 const int v_edge_pos = s->v_edge_pos >> lowres;
1642 linesize = s->current_picture.f.linesize[0] << field_based;
1643 uvlinesize = s->current_picture.f.linesize[1] << field_based;
1645 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1651 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1654 sx= motion_x & s_mask;
1655 sy= motion_y & s_mask;
1656 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1657 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1659 if (s->out_format == FMT_H263) {
1660 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1661 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1664 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1667 uvsx = (2*mx) & s_mask;
1668 uvsy = (2*my) & s_mask;
1669 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1670 uvsrc_y = mb_y*block_s + (my >> lowres);
1672 if(s->chroma_y_shift){
1677 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1678 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1680 if(s->chroma_x_shift){
1684 uvsy = motion_y & s_mask;
1686 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1689 uvsx = motion_x & s_mask;
1690 uvsy = motion_y & s_mask;
1697 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1698 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1699 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1701 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1702 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1703 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1704 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1705 ptr_y = s->edge_emu_buffer;
1706 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1707 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1708 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1709 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1710 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1711 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1717 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
1718 dest_y += s->linesize;
1719 dest_cb+= s->uvlinesize;
1720 dest_cr+= s->uvlinesize;
1724 ptr_y += s->linesize;
1725 ptr_cb+= s->uvlinesize;
1726 ptr_cr+= s->uvlinesize;
1729 sx= (sx << 2) >> lowres;
1730 sy= (sy << 2) >> lowres;
1731 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1733 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1734 uvsx= (uvsx << 2) >> lowres;
1735 uvsy= (uvsy << 2) >> lowres;
1736 if(h >> s->chroma_y_shift){
1737 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1738 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1741 //FIXME h261 lowres loop filter
1744 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1745 uint8_t *dest_cb, uint8_t *dest_cr,
1746 uint8_t **ref_picture,
1747 h264_chroma_mc_func *pix_op,
1749 const int lowres= s->avctx->lowres;
1750 const int op_index= FFMIN(lowres, 2);
1751 const int block_s= 8>>lowres;
1752 const int s_mask= (2<<lowres)-1;
1753 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1754 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1755 int emu=0, src_x, src_y, offset, sx, sy;
1758 if(s->quarter_sample){
1763 /* In case of 8X8, we construct a single chroma motion vector
1764 with a special rounding */
1765 mx= ff_h263_round_chroma(mx);
1766 my= ff_h263_round_chroma(my);
1770 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1771 src_y = s->mb_y*block_s + (my >> (lowres+1));
1773 offset = src_y * s->uvlinesize + src_x;
1774 ptr = ref_picture[1] + offset;
1775 if(s->flags&CODEC_FLAG_EMU_EDGE){
1776 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1777 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1778 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1779 ptr= s->edge_emu_buffer;
1783 sx= (sx << 2) >> lowres;
1784 sy= (sy << 2) >> lowres;
1785 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1787 ptr = ref_picture[2] + offset;
1789 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1790 ptr= s->edge_emu_buffer;
1792 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1796 * motion compensation of a single macroblock
1798 * @param dest_y luma destination pointer
1799 * @param dest_cb chroma cb/u destination pointer
1800 * @param dest_cr chroma cr/v destination pointer
1801 * @param dir direction (0->forward, 1->backward)
1802 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1803 * @param pix_op halfpel motion compensation function (average or put normally)
1804 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1806 static inline void MPV_motion_lowres(MpegEncContext *s,
1807 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1808 int dir, uint8_t **ref_picture,
1809 h264_chroma_mc_func *pix_op)
1813 const int lowres= s->avctx->lowres;
1814 const int block_s= 8>>lowres;
1819 switch(s->mv_type) {
1821 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1823 ref_picture, pix_op,
1824 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1830 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1831 ref_picture[0], 0, 0,
1832 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1833 s->width, s->height, s->linesize,
1834 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1835 block_s, block_s, pix_op,
1836 s->mv[dir][i][0], s->mv[dir][i][1]);
1838 mx += s->mv[dir][i][0];
1839 my += s->mv[dir][i][1];
1842 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1843 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1846 if (s->picture_structure == PICT_FRAME) {
1848 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1849 1, 0, s->field_select[dir][0],
1850 ref_picture, pix_op,
1851 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1853 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1854 1, 1, s->field_select[dir][1],
1855 ref_picture, pix_op,
1856 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1858 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
1859 ref_picture = s->current_picture_ptr->f.data;
1862 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1863 0, 0, s->field_select[dir][0],
1864 ref_picture, pix_op,
1865 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1870 uint8_t ** ref2picture;
1872 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
1873 ref2picture= ref_picture;
1875 ref2picture = s->current_picture_ptr->f.data;
1878 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1879 0, 0, s->field_select[dir][i],
1880 ref2picture, pix_op,
1881 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1883 dest_y += 2*block_s*s->linesize;
1884 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1885 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1889 if(s->picture_structure == PICT_FRAME){
1893 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1895 ref_picture, pix_op,
1896 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1898 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1902 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1903 0, 0, s->picture_structure != i+1,
1904 ref_picture, pix_op,
1905 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1907 // after put we make avg of the same block
1908 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1910 //opposite parity is always in the same frame if this is second field
1911 if(!s->first_field){
1912 ref_picture = s->current_picture_ptr->f.data;
1922 * find the lowest MB row referenced in the MVs
1924 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1926 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1927 int my, off, i, mvs;
1929 if (s->picture_structure != PICT_FRAME) goto unhandled;
1931 switch (s->mv_type) {
1945 for (i = 0; i < mvs; i++) {
1946 my = s->mv[dir][i][1]<<qpel_shift;
1947 my_max = FFMAX(my_max, my);
1948 my_min = FFMIN(my_min, my);
1951 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1953 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1955 return s->mb_height-1;
1958 /* put block[] to dest[] */
1959 static inline void put_dct(MpegEncContext *s,
1960 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1962 s->dct_unquantize_intra(s, block, i, qscale);
1963 s->dsp.idct_put (dest, line_size, block);
1966 /* add block[] to dest[] */
1967 static inline void add_dct(MpegEncContext *s,
1968 DCTELEM *block, int i, uint8_t *dest, int line_size)
1970 if (s->block_last_index[i] >= 0) {
1971 s->dsp.idct_add (dest, line_size, block);
1975 static inline void add_dequant_dct(MpegEncContext *s,
1976 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1978 if (s->block_last_index[i] >= 0) {
1979 s->dct_unquantize_inter(s, block, i, qscale);
1981 s->dsp.idct_add (dest, line_size, block);
1986 * cleans dc, ac, coded_block for the current non intra MB
1988 void ff_clean_intra_table_entries(MpegEncContext *s)
1990 int wrap = s->b8_stride;
1991 int xy = s->block_index[0];
1994 s->dc_val[0][xy + 1 ] =
1995 s->dc_val[0][xy + wrap] =
1996 s->dc_val[0][xy + 1 + wrap] = 1024;
1998 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1999 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2000 if (s->msmpeg4_version>=3) {
2001 s->coded_block[xy ] =
2002 s->coded_block[xy + 1 ] =
2003 s->coded_block[xy + wrap] =
2004 s->coded_block[xy + 1 + wrap] = 0;
2007 wrap = s->mb_stride;
2008 xy = s->mb_x + s->mb_y * wrap;
2010 s->dc_val[2][xy] = 1024;
2012 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2013 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2015 s->mbintra_table[xy]= 0;
2018 /* generic function called after a macroblock has been parsed by the
2019 decoder or after it has been encoded by the encoder.
2021 Important variables used:
2022 s->mb_intra : true if intra macroblock
2023 s->mv_dir : motion vector direction
2024 s->mv_type : motion vector type
2025 s->mv : motion vector
2026 s->interlaced_dct : true if interlaced dct used (mpeg2)
2028 static av_always_inline
2029 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2030 int lowres_flag, int is_mpeg12)
2032 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2033 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2034 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2038 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2039 /* save DCT coefficients */
2041 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2042 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2044 for(j=0; j<64; j++){
2045 *dct++ = block[i][s->dsp.idct_permutation[j]];
2046 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2048 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2052 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2054 /* update DC predictors for P macroblocks */
2056 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2057 if(s->mbintra_table[mb_xy])
2058 ff_clean_intra_table_entries(s);
2062 s->last_dc[2] = 128 << s->intra_dc_precision;
2065 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2066 s->mbintra_table[mb_xy]=1;
2068 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2069 uint8_t *dest_y, *dest_cb, *dest_cr;
2070 int dct_linesize, dct_offset;
2071 op_pixels_func (*op_pix)[4];
2072 qpel_mc_func (*op_qpix)[16];
2073 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2074 const int uvlinesize = s->current_picture.f.linesize[1];
2075 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2076 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2078 /* avoid copy if macroblock skipped in last frame too */
2079 /* skip only during decoding as we might trash the buffers during encoding a bit */
2081 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2082 const int age = s->current_picture.f.age;
2086 if (s->mb_skipped) {
2088 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2090 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
2091 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2093 /* if previous was skipped too, then nothing to do ! */
2094 if (*mbskip_ptr >= age && s->current_picture.f.reference){
2097 } else if(!s->current_picture.f.reference) {
2098 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2099 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2101 *mbskip_ptr = 0; /* not skipped */
2105 dct_linesize = linesize << s->interlaced_dct;
2106 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
2110 dest_cb= s->dest[1];
2111 dest_cr= s->dest[2];
2113 dest_y = s->b_scratchpad;
2114 dest_cb= s->b_scratchpad+16*linesize;
2115 dest_cr= s->b_scratchpad+32*linesize;
2119 /* motion handling */
2120 /* decoding or more than one mb_type (MC was already done otherwise) */
2123 if(HAVE_PTHREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2124 if (s->mv_dir & MV_DIR_FORWARD) {
2125 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
2127 if (s->mv_dir & MV_DIR_BACKWARD) {
2128 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
2133 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2135 if (s->mv_dir & MV_DIR_FORWARD) {
2136 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2137 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2139 if (s->mv_dir & MV_DIR_BACKWARD) {
2140 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2143 op_qpix= s->me.qpel_put;
2144 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2145 op_pix = s->dsp.put_pixels_tab;
2147 op_pix = s->dsp.put_no_rnd_pixels_tab;
2149 if (s->mv_dir & MV_DIR_FORWARD) {
2150 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2151 op_pix = s->dsp.avg_pixels_tab;
2152 op_qpix= s->me.qpel_avg;
2154 if (s->mv_dir & MV_DIR_BACKWARD) {
2155 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2160 /* skip dequant / idct if we are really late ;) */
2161 if(s->avctx->skip_idct){
2162 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2163 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2164 || s->avctx->skip_idct >= AVDISCARD_ALL)
2168 /* add dct residue */
2169 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2170 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2171 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2172 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2173 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2174 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2176 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2177 if (s->chroma_y_shift){
2178 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2179 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2183 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2184 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2185 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2186 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2189 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2190 add_dct(s, block[0], 0, dest_y , dct_linesize);
2191 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2192 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2193 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2195 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2196 if(s->chroma_y_shift){//Chroma420
2197 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2198 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2201 dct_linesize = uvlinesize << s->interlaced_dct;
2202 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
2204 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2205 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2206 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2207 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2208 if(!s->chroma_x_shift){//Chroma444
2209 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2210 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2211 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2212 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2217 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2218 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2221 /* dct only in intra block */
2222 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2223 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2224 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2225 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2226 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2228 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2229 if(s->chroma_y_shift){
2230 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2231 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2235 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2236 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2237 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2238 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2242 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2243 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2244 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2245 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2247 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2248 if(s->chroma_y_shift){
2249 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2250 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2253 dct_linesize = uvlinesize << s->interlaced_dct;
2254 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
2256 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2257 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2258 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2259 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2260 if(!s->chroma_x_shift){//Chroma444
2261 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2262 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2263 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2264 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2272 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2273 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2274 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2279 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2281 if(s->out_format == FMT_MPEG1) {
2282 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2283 else MPV_decode_mb_internal(s, block, 0, 1);
2286 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2287 else MPV_decode_mb_internal(s, block, 0, 0);
2292 * @param h is the normal height, this will be reduced automatically if needed for the last row
2294 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2295 const int field_pic= s->picture_structure != PICT_FRAME;
2301 if (!s->avctx->hwaccel
2302 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2303 && s->unrestricted_mv
2304 && s->current_picture.f.reference
2306 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2307 int sides = 0, edge_h;
2308 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2309 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2310 if (y==0) sides |= EDGE_TOP;
2311 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2313 edge_h= FFMIN(h, s->v_edge_pos - y);
2315 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize , s->linesize,
2316 s->h_edge_pos , edge_h , EDGE_WIDTH , EDGE_WIDTH , sides);
2317 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
2318 s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2319 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
2320 s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2323 h= FFMIN(h, s->avctx->height - y);
2325 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2327 if (s->avctx->draw_horiz_band) {
2331 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2332 src= (AVFrame*)s->current_picture_ptr;
2333 else if(s->last_picture_ptr)
2334 src= (AVFrame*)s->last_picture_ptr;
2338 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2344 offset[0]= y * s->linesize;
2346 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2352 s->avctx->draw_horiz_band(s->avctx, src, offset,
2353 y, s->picture_structure, h);
2357 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2358 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2359 const int uvlinesize = s->current_picture.f.linesize[1];
2360 const int mb_size= 4 - s->avctx->lowres;
2362 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2363 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2364 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2365 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2366 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2367 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2368 //block_index is not used by mpeg2, so it is not affected by chroma_format
2370 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2371 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2372 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2374 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2376 if(s->picture_structure==PICT_FRAME){
2377 s->dest[0] += s->mb_y * linesize << mb_size;
2378 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2379 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2381 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2382 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2383 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2384 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2389 void ff_mpeg_flush(AVCodecContext *avctx){
2391 MpegEncContext *s = avctx->priv_data;
2393 if(s==NULL || s->picture==NULL)
2396 for(i=0; i<s->picture_count; i++){
2397 if (s->picture[i].f.data[0] &&
2398 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2399 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2400 free_frame_buffer(s, &s->picture[i]);
2402 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2404 s->mb_x= s->mb_y= 0;
2407 s->parse_context.state= -1;
2408 s->parse_context.frame_start_found= 0;
2409 s->parse_context.overread= 0;
2410 s->parse_context.overread_index= 0;
2411 s->parse_context.index= 0;
2412 s->parse_context.last_index= 0;
2413 s->bitstream_buffer_size=0;
2417 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2418 DCTELEM *block, int n, int qscale)
2420 int i, level, nCoeffs;
2421 const uint16_t *quant_matrix;
2423 nCoeffs= s->block_last_index[n];
2426 block[0] = block[0] * s->y_dc_scale;
2428 block[0] = block[0] * s->c_dc_scale;
2429 /* XXX: only mpeg1 */
2430 quant_matrix = s->intra_matrix;
2431 for(i=1;i<=nCoeffs;i++) {
2432 int j= s->intra_scantable.permutated[i];
2437 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2438 level = (level - 1) | 1;
2441 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2442 level = (level - 1) | 1;
2449 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2450 DCTELEM *block, int n, int qscale)
2452 int i, level, nCoeffs;
2453 const uint16_t *quant_matrix;
2455 nCoeffs= s->block_last_index[n];
2457 quant_matrix = s->inter_matrix;
2458 for(i=0; i<=nCoeffs; i++) {
2459 int j= s->intra_scantable.permutated[i];
2464 level = (((level << 1) + 1) * qscale *
2465 ((int) (quant_matrix[j]))) >> 4;
2466 level = (level - 1) | 1;
2469 level = (((level << 1) + 1) * qscale *
2470 ((int) (quant_matrix[j]))) >> 4;
2471 level = (level - 1) | 1;
2478 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2479 DCTELEM *block, int n, int qscale)
2481 int i, level, nCoeffs;
2482 const uint16_t *quant_matrix;
2484 if(s->alternate_scan) nCoeffs= 63;
2485 else nCoeffs= s->block_last_index[n];
2488 block[0] = block[0] * s->y_dc_scale;
2490 block[0] = block[0] * s->c_dc_scale;
2491 quant_matrix = s->intra_matrix;
2492 for(i=1;i<=nCoeffs;i++) {
2493 int j= s->intra_scantable.permutated[i];
2498 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2501 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2508 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2509 DCTELEM *block, int n, int qscale)
2511 int i, level, nCoeffs;
2512 const uint16_t *quant_matrix;
2515 if(s->alternate_scan) nCoeffs= 63;
2516 else nCoeffs= s->block_last_index[n];
2519 block[0] = block[0] * s->y_dc_scale;
2521 block[0] = block[0] * s->c_dc_scale;
2522 quant_matrix = s->intra_matrix;
2523 for(i=1;i<=nCoeffs;i++) {
2524 int j= s->intra_scantable.permutated[i];
2529 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2532 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2541 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2542 DCTELEM *block, int n, int qscale)
2544 int i, level, nCoeffs;
2545 const uint16_t *quant_matrix;
2548 if(s->alternate_scan) nCoeffs= 63;
2549 else nCoeffs= s->block_last_index[n];
2551 quant_matrix = s->inter_matrix;
2552 for(i=0; i<=nCoeffs; i++) {
2553 int j= s->intra_scantable.permutated[i];
2558 level = (((level << 1) + 1) * qscale *
2559 ((int) (quant_matrix[j]))) >> 4;
2562 level = (((level << 1) + 1) * qscale *
2563 ((int) (quant_matrix[j]))) >> 4;
2572 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2573 DCTELEM *block, int n, int qscale)
2575 int i, level, qmul, qadd;
2578 assert(s->block_last_index[n]>=0);
2584 block[0] = block[0] * s->y_dc_scale;
2586 block[0] = block[0] * s->c_dc_scale;
2587 qadd = (qscale - 1) | 1;
2594 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2596 for(i=1; i<=nCoeffs; i++) {
2600 level = level * qmul - qadd;
2602 level = level * qmul + qadd;
2609 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2610 DCTELEM *block, int n, int qscale)
2612 int i, level, qmul, qadd;
2615 assert(s->block_last_index[n]>=0);
2617 qadd = (qscale - 1) | 1;
2620 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2622 for(i=0; i<=nCoeffs; i++) {
2626 level = level * qmul - qadd;
2628 level = level * qmul + qadd;
2636 * set qscale and update qscale dependent variables.
2638 void ff_set_qscale(MpegEncContext * s, int qscale)
2642 else if (qscale > 31)
2646 s->chroma_qscale= s->chroma_qscale_table[qscale];
2648 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2649 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2652 void MPV_report_decode_progress(MpegEncContext *s)
2654 if (s->pict_type != FF_B_TYPE && !s->partitioned_frame && !s->error_occurred)
2655 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);