2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32]={
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
74 const uint8_t ff_mpeg1_dc_scale_table[128]={
75 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 static const uint8_t mpeg2_dc_scale_table1[128]={
83 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128]={
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 static const uint8_t mpeg2_dc_scale_table3[128]={
99 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
107 ff_mpeg1_dc_scale_table,
108 mpeg2_dc_scale_table1,
109 mpeg2_dc_scale_table2,
110 mpeg2_dc_scale_table3,
113 const enum PixelFormat ff_pixfmt_list_420[] = {
118 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
125 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
133 uint32_t tmp= *state << 8;
134 *state= tmp + *(p++);
135 if(tmp == 0x100 || p==end)
140 if (p[-1] > 1 ) p+= 3;
141 else if(p[-2] ) p+= 2;
142 else if(p[-3]|(p[-1]-1)) p++;
155 /* init common dct for both encoder and decoder */
156 av_cold int ff_dct_common_init(MpegEncContext *s)
158 dsputil_init(&s->dsp, s->avctx);
160 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
161 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
162 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
163 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
164 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
165 if(s->flags & CODEC_FLAG_BITEXACT)
166 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
167 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
170 MPV_common_init_mmx(s);
172 MPV_common_init_axp(s);
174 MPV_common_init_mlib(s);
176 MPV_common_init_mmi(s);
178 MPV_common_init_arm(s);
180 MPV_common_init_altivec(s);
182 MPV_common_init_bfin(s);
185 /* load & permutate scantables
186 note: only wmv uses different ones
188 if(s->alternate_scan){
189 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
190 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
192 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
195 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
196 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
201 void ff_copy_picture(Picture *dst, Picture *src){
203 dst->f.type= FF_BUFFER_TYPE_COPY;
207 * Release a frame buffer
209 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
211 /* Windows Media Image codecs allocate internal buffers with different
212 dimensions; ignore user defined callbacks for these */
213 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
214 ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
216 avcodec_default_release_buffer(s->avctx, (AVFrame*)pic);
217 av_freep(&pic->f.hwaccel_picture_private);
221 * Allocate a frame buffer
223 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
227 if (s->avctx->hwaccel) {
228 assert(!pic->hwaccel_picture_private);
229 if (s->avctx->hwaccel->priv_data_size) {
230 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
231 if (!pic->f.hwaccel_picture_private) {
232 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
238 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
239 r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
241 r = avcodec_default_get_buffer(s->avctx, (AVFrame*)pic);
243 if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
244 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
245 r, pic->f.age, pic->f.type, pic->f.data[0]);
246 av_freep(&pic->f.hwaccel_picture_private);
250 if (s->linesize && (s->linesize != pic->f.linesize[0] || s->uvlinesize != pic->f.linesize[1])) {
251 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
252 free_frame_buffer(s, pic);
256 if (pic->f.linesize[1] != pic->f.linesize[2]) {
257 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
258 free_frame_buffer(s, pic);
266 * allocates a Picture
267 * The pixels are allocated/set by calling get_buffer() if shared=0
269 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
270 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
271 const int mb_array_size= s->mb_stride*s->mb_height;
272 const int b8_array_size= s->b8_stride*s->mb_height*2;
273 const int b4_array_size= s->b4_stride*s->mb_height*4;
278 assert(pic->f.data[0]);
279 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
280 pic->f.type = FF_BUFFER_TYPE_SHARED;
282 assert(!pic->f.data[0]);
284 if (alloc_frame_buffer(s, pic) < 0)
287 s->linesize = pic->f.linesize[0];
288 s->uvlinesize = pic->f.linesize[1];
291 if (pic->f.qscale_table == NULL) {
293 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
294 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
295 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
298 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table, mb_array_size * sizeof(uint8_t) + 2, fail) //the +2 is for the slice end check
299 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t) , fail)
300 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
301 pic->f.mb_type = pic->mb_type_base + 2*s->mb_stride + 1;
302 pic->f.qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1;
303 if(s->out_format == FMT_H264){
305 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
306 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
307 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
309 pic->f.motion_subsample_log2 = 2;
310 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
312 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
313 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
314 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
316 pic->f.motion_subsample_log2 = 3;
318 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
319 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff, 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
321 pic->f.qstride = s->mb_stride;
322 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan , 1 * sizeof(AVPanScan), fail)
325 /* It might be nicer if the application would keep track of these
326 * but it would require an API change. */
327 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
328 s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
329 if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B)
330 pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
334 fail: //for the FF_ALLOCZ_OR_GOTO macro
336 free_frame_buffer(s, pic);
341 * deallocates a picture
343 static void free_picture(MpegEncContext *s, Picture *pic){
346 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
347 free_frame_buffer(s, pic);
350 av_freep(&pic->mb_var);
351 av_freep(&pic->mc_mb_var);
352 av_freep(&pic->mb_mean);
353 av_freep(&pic->f.mbskip_table);
354 av_freep(&pic->qscale_table_base);
355 av_freep(&pic->mb_type_base);
356 av_freep(&pic->f.dct_coeff);
357 av_freep(&pic->f.pan_scan);
358 pic->f.mb_type = NULL;
360 av_freep(&pic->motion_val_base[i]);
361 av_freep(&pic->f.ref_index[i]);
364 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
367 pic->f.data[i] = NULL;
373 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
374 int y_size = s->b8_stride * (2 * s->mb_height + 1);
375 int c_size = s->mb_stride * (s->mb_height + 1);
376 int yc_size = y_size + 2 * c_size;
379 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
380 FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
381 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
383 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
384 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
385 s->me.temp= s->me.scratchpad;
386 s->rd_scratchpad= s->me.scratchpad;
387 s->b_scratchpad= s->me.scratchpad;
388 s->obmc_scratchpad= s->me.scratchpad + 16;
390 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
391 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
392 if(s->avctx->noise_reduction){
393 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
396 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
397 s->block= s->blocks[0];
400 s->pblocks[i] = &s->block[i];
403 if (s->out_format == FMT_H263) {
405 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
406 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
407 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
408 s->ac_val[2] = s->ac_val[1] + c_size;
413 return -1; //free() through MPV_common_end()
416 static void free_duplicate_context(MpegEncContext *s){
419 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
420 av_freep(&s->me.scratchpad);
424 s->obmc_scratchpad= NULL;
426 av_freep(&s->dct_error_sum);
427 av_freep(&s->me.map);
428 av_freep(&s->me.score_map);
429 av_freep(&s->blocks);
430 av_freep(&s->ac_val_base);
434 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
435 #define COPY(a) bak->a= src->a
436 COPY(allocated_edge_emu_buffer);
437 COPY(edge_emu_buffer);
442 COPY(obmc_scratchpad);
449 COPY(me.map_generation);
461 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
464 //FIXME copy only needed parts
466 backup_duplicate_context(&bak, dst);
467 memcpy(dst, src, sizeof(MpegEncContext));
468 backup_duplicate_context(dst, &bak);
470 dst->pblocks[i] = &dst->block[i];
472 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
475 int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
477 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
479 if(dst == src || !s1->context_initialized) return 0;
481 //FIXME can parameters change on I-frames? in that case dst may need a reinit
482 if(!s->context_initialized){
483 memcpy(s, s1, sizeof(MpegEncContext));
486 s->picture_range_start += MAX_PICTURE_COUNT;
487 s->picture_range_end += MAX_PICTURE_COUNT;
488 s->bitstream_buffer = NULL;
489 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
494 s->avctx->coded_height = s1->avctx->coded_height;
495 s->avctx->coded_width = s1->avctx->coded_width;
496 s->avctx->width = s1->avctx->width;
497 s->avctx->height = s1->avctx->height;
499 s->coded_picture_number = s1->coded_picture_number;
500 s->picture_number = s1->picture_number;
501 s->input_picture_number = s1->input_picture_number;
503 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
504 memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture);
506 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
507 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
508 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
510 memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
512 //Error/bug resilience
513 s->next_p_frame_damaged = s1->next_p_frame_damaged;
514 s->workaround_bugs = s1->workaround_bugs;
517 memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits);
520 s->max_b_frames = s1->max_b_frames;
521 s->low_delay = s1->low_delay;
522 s->dropable = s1->dropable;
524 //DivX handling (doesn't work)
525 s->divx_packed = s1->divx_packed;
527 if(s1->bitstream_buffer){
528 if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
529 av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size);
530 s->bitstream_buffer_size = s1->bitstream_buffer_size;
531 memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size);
532 memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
535 //MPEG2/interlacing info
536 memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence);
538 if(!s1->first_field){
539 s->last_pict_type= s1->pict_type;
540 if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
542 if(s1->pict_type!=FF_B_TYPE){
543 s->last_non_b_pict_type= s1->pict_type;
551 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
552 * the changed fields will not depend upon the prior state of the MpegEncContext.
554 void MPV_common_defaults(MpegEncContext *s){
556 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
557 s->chroma_qscale_table= ff_default_chroma_qscale_table;
558 s->progressive_frame= 1;
559 s->progressive_sequence= 1;
560 s->picture_structure= PICT_FRAME;
562 s->coded_picture_number = 0;
563 s->picture_number = 0;
564 s->input_picture_number = 0;
566 s->picture_in_gop_number = 0;
571 s->picture_range_start = 0;
572 s->picture_range_end = MAX_PICTURE_COUNT;
576 * sets the given MpegEncContext to defaults for decoding.
577 * the changed fields will not depend upon the prior state of the MpegEncContext.
579 void MPV_decode_defaults(MpegEncContext *s){
580 MPV_common_defaults(s);
584 * init common structure for both encoder and decoder.
585 * this assumes that some variables like width/height are already set
587 av_cold int MPV_common_init(MpegEncContext *s)
589 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y,
590 threads = (s->encoding ||
592 s->avctx->active_thread_type & FF_THREAD_SLICE)) ?
593 s->avctx->thread_count : 1;
595 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
596 s->mb_height = (s->height + 31) / 32 * 2;
597 else if (s->codec_id != CODEC_ID_H264)
598 s->mb_height = (s->height + 15) / 16;
600 if(s->avctx->pix_fmt == PIX_FMT_NONE){
601 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
605 if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
606 (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
607 int max_threads = FFMIN(MAX_THREADS, s->mb_height);
608 av_log(s->avctx, AV_LOG_WARNING, "too many threads (%d), reducing to %d\n",
609 s->avctx->thread_count, max_threads);
610 threads = max_threads;
613 if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
616 ff_dct_common_init(s);
618 s->flags= s->avctx->flags;
619 s->flags2= s->avctx->flags2;
621 if (s->width && s->height) {
622 s->mb_width = (s->width + 15) / 16;
623 s->mb_stride = s->mb_width + 1;
624 s->b8_stride = s->mb_width*2 + 1;
625 s->b4_stride = s->mb_width*4 + 1;
626 mb_array_size= s->mb_height * s->mb_stride;
627 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
629 /* set chroma shifts */
630 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
631 &(s->chroma_y_shift) );
633 /* set default edge pos, will be overriden in decode_header if needed */
634 s->h_edge_pos= s->mb_width*16;
635 s->v_edge_pos= s->mb_height*16;
637 s->mb_num = s->mb_width * s->mb_height;
642 s->block_wrap[3]= s->b8_stride;
644 s->block_wrap[5]= s->mb_stride;
646 y_size = s->b8_stride * (2 * s->mb_height + 1);
647 c_size = s->mb_stride * (s->mb_height + 1);
648 yc_size = y_size + 2 * c_size;
650 /* convert fourcc to upper case */
651 s->codec_tag = ff_toupper4(s->avctx->codec_tag);
653 s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
655 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
657 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
658 for(y=0; y<s->mb_height; y++){
659 for(x=0; x<s->mb_width; x++){
660 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
663 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
666 /* Allocate MV tables */
667 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
668 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
669 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
670 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
671 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
672 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
673 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
674 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
675 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
676 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
677 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
678 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
680 if(s->msmpeg4_version){
681 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
683 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
685 /* Allocate MB type table */
686 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
688 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
690 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
691 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
692 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
693 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
694 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
695 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
697 if(s->avctx->noise_reduction){
698 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
703 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
704 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
705 for(i = 0; i < s->picture_count; i++) {
706 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
709 if (s->width && s->height) {
710 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
712 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
713 /* interlaced direct mode decoding tables */
718 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
719 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
721 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
722 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
723 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
725 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
728 if (s->out_format == FMT_H263) {
730 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
731 s->coded_block= s->coded_block_base + s->b8_stride + 1;
733 /* cbp, ac_pred, pred_dir */
734 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
735 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
738 if (s->h263_pred || s->h263_plus || !s->encoding) {
740 //MN: we need these for error resilience of intra-frames
741 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
742 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
743 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
744 s->dc_val[2] = s->dc_val[1] + c_size;
745 for(i=0;i<yc_size;i++)
746 s->dc_val_base[i] = 1024;
749 /* which mb is a intra block */
750 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
751 memset(s->mbintra_table, 1, mb_array_size);
753 /* init macroblock skip table */
754 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
755 //Note the +1 is for a quicker mpeg4 slice_end detection
756 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
758 s->parse_context.state= -1;
759 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
760 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
761 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
762 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
766 s->context_initialized = 1;
767 s->thread_context[0]= s;
769 if (s->width && s->height) {
770 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
771 for(i=1; i<threads; i++){
772 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
773 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
776 for(i=0; i<threads; i++){
777 if(init_duplicate_context(s->thread_context[i], s) < 0)
779 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
780 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
783 if(init_duplicate_context(s, s) < 0) goto fail;
785 s->end_mb_y = s->mb_height;
795 /* init common structure for both encoder and decoder */
796 void MPV_common_end(MpegEncContext *s)
800 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
801 for(i=0; i<s->avctx->thread_count; i++){
802 free_duplicate_context(s->thread_context[i]);
804 for(i=1; i<s->avctx->thread_count; i++){
805 av_freep(&s->thread_context[i]);
807 } else free_duplicate_context(s);
809 av_freep(&s->parse_context.buffer);
810 s->parse_context.buffer_size=0;
812 av_freep(&s->mb_type);
813 av_freep(&s->p_mv_table_base);
814 av_freep(&s->b_forw_mv_table_base);
815 av_freep(&s->b_back_mv_table_base);
816 av_freep(&s->b_bidir_forw_mv_table_base);
817 av_freep(&s->b_bidir_back_mv_table_base);
818 av_freep(&s->b_direct_mv_table_base);
820 s->b_forw_mv_table= NULL;
821 s->b_back_mv_table= NULL;
822 s->b_bidir_forw_mv_table= NULL;
823 s->b_bidir_back_mv_table= NULL;
824 s->b_direct_mv_table= NULL;
828 av_freep(&s->b_field_mv_table_base[i][j][k]);
829 s->b_field_mv_table[i][j][k]=NULL;
831 av_freep(&s->b_field_select_table[i][j]);
832 av_freep(&s->p_field_mv_table_base[i][j]);
833 s->p_field_mv_table[i][j]=NULL;
835 av_freep(&s->p_field_select_table[i]);
838 av_freep(&s->dc_val_base);
839 av_freep(&s->coded_block_base);
840 av_freep(&s->mbintra_table);
841 av_freep(&s->cbp_table);
842 av_freep(&s->pred_dir_table);
844 av_freep(&s->mbskip_table);
845 av_freep(&s->prev_pict_types);
846 av_freep(&s->bitstream_buffer);
847 s->allocated_bitstream_buffer_size=0;
849 av_freep(&s->avctx->stats_out);
850 av_freep(&s->ac_stats);
851 av_freep(&s->error_status_table);
852 av_freep(&s->mb_index2xy);
853 av_freep(&s->lambda_table);
854 av_freep(&s->q_intra_matrix);
855 av_freep(&s->q_inter_matrix);
856 av_freep(&s->q_intra_matrix16);
857 av_freep(&s->q_inter_matrix16);
858 av_freep(&s->input_picture);
859 av_freep(&s->reordered_input_picture);
860 av_freep(&s->dct_offset);
862 if(s->picture && !s->avctx->is_copy){
863 for(i=0; i<s->picture_count; i++){
864 free_picture(s, &s->picture[i]);
867 av_freep(&s->picture);
868 s->context_initialized = 0;
871 s->current_picture_ptr= NULL;
872 s->linesize= s->uvlinesize= 0;
875 av_freep(&s->visualization_buffer[i]);
877 if(!(s->avctx->active_thread_type&FF_THREAD_FRAME))
878 avcodec_default_free_buffers(s->avctx);
881 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
883 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
884 uint8_t index_run[MAX_RUN+1];
885 int last, run, level, start, end, i;
887 /* If table is static, we can quit if rl->max_level[0] is not NULL */
888 if(static_store && rl->max_level[0])
891 /* compute max_level[], max_run[] and index_run[] */
892 for(last=0;last<2;last++) {
901 memset(max_level, 0, MAX_RUN + 1);
902 memset(max_run, 0, MAX_LEVEL + 1);
903 memset(index_run, rl->n, MAX_RUN + 1);
904 for(i=start;i<end;i++) {
905 run = rl->table_run[i];
906 level = rl->table_level[i];
907 if (index_run[run] == rl->n)
909 if (level > max_level[run])
910 max_level[run] = level;
911 if (run > max_run[level])
912 max_run[level] = run;
915 rl->max_level[last] = static_store[last];
917 rl->max_level[last] = av_malloc(MAX_RUN + 1);
918 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
920 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
922 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
923 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
925 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
927 rl->index_run[last] = av_malloc(MAX_RUN + 1);
928 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
932 void init_vlc_rl(RLTable *rl)
944 for(i=0; i<rl->vlc.table_size; i++){
945 int code= rl->vlc.table[i][0];
946 int len = rl->vlc.table[i][1];
949 if(len==0){ // illegal code
952 }else if(len<0){ //more bits needed
956 if(code==rl->n){ //esc
960 run= rl->table_run [code] + 1;
961 level= rl->table_level[code] * qmul + qadd;
962 if(code >= rl->last) run+=192;
965 rl->rl_vlc[q][i].len= len;
966 rl->rl_vlc[q][i].level= level;
967 rl->rl_vlc[q][i].run= run;
972 void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
976 /* release non reference frames */
977 for(i=0; i<s->picture_count; i++){
978 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
979 && (!s->picture[i].owner2 || s->picture[i].owner2 == s)
980 && (remove_current || &s->picture[i] != s->current_picture_ptr)
981 /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
982 free_frame_buffer(s, &s->picture[i]);
987 int ff_find_unused_picture(MpegEncContext *s, int shared){
991 for(i=s->picture_range_start; i<s->picture_range_end; i++){
992 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
996 for(i=s->picture_range_start; i<s->picture_range_end; i++){
997 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
1000 for(i=s->picture_range_start; i<s->picture_range_end; i++){
1001 if (s->picture[i].f.data[0] == NULL)
1006 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
1007 /* We could return -1, but the codec would crash trying to draw into a
1008 * non-existing frame anyway. This is safer than waiting for a random crash.
1009 * Also the return of this is never useful, an encoder must only allocate
1010 * as much as allowed in the specification. This has no relationship to how
1011 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1012 * enough for such valid streams).
1013 * Plus, a decoder has to check stream validity and remove frames if too
1014 * many reference frames are around. Waiting for "OOM" is not correct at
1015 * all. Similarly, missing reference frames have to be replaced by
1016 * interpolated/MC frames, anything else is a bug in the codec ...
1022 static void update_noise_reduction(MpegEncContext *s){
1025 for(intra=0; intra<2; intra++){
1026 if(s->dct_count[intra] > (1<<16)){
1027 for(i=0; i<64; i++){
1028 s->dct_error_sum[intra][i] >>=1;
1030 s->dct_count[intra] >>= 1;
1033 for(i=0; i<64; i++){
1034 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1040 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1042 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1048 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1050 /* mark&release old frames */
1051 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.data[0]) {
1052 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1053 if (s->last_picture_ptr->owner2 == s)
1054 free_frame_buffer(s, s->last_picture_ptr);
1056 /* release forgotten pictures */
1057 /* if(mpeg124/h263) */
1059 for(i=0; i<s->picture_count; i++){
1060 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].f.reference) {
1061 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1062 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1063 free_frame_buffer(s, &s->picture[i]);
1071 ff_release_unused_pictures(s, 1);
1073 if (s->current_picture_ptr && s->current_picture_ptr->f.data[0] == NULL)
1074 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
1076 i= ff_find_unused_picture(s, 0);
1077 pic= &s->picture[i];
1080 pic->f.reference = 0;
1082 if (s->codec_id == CODEC_ID_H264)
1083 pic->f.reference = s->picture_structure;
1084 else if (s->pict_type != AV_PICTURE_TYPE_B)
1085 pic->f.reference = 3;
1088 pic->f.coded_picture_number = s->coded_picture_number++;
1090 if(ff_alloc_picture(s, pic, 0) < 0)
1093 s->current_picture_ptr= pic;
1094 //FIXME use only the vars from current_pic
1095 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1096 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1097 if(s->picture_structure != PICT_FRAME)
1098 s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1100 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence;
1101 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1104 s->current_picture_ptr->f.pict_type = s->pict_type;
1105 // if(s->flags && CODEC_FLAG_QSCALE)
1106 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1107 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1109 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1111 if (s->pict_type != AV_PICTURE_TYPE_B) {
1112 s->last_picture_ptr= s->next_picture_ptr;
1114 s->next_picture_ptr= s->current_picture_ptr;
1116 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1117 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1118 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1119 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1120 s->pict_type, s->dropable);*/
1122 if(s->codec_id != CODEC_ID_H264){
1123 if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.data[0] == NULL) &&
1124 (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
1125 if (s->pict_type != AV_PICTURE_TYPE_I)
1126 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1127 else if (s->picture_structure != PICT_FRAME)
1128 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
1130 /* Allocate a dummy frame */
1131 i= ff_find_unused_picture(s, 0);
1132 s->last_picture_ptr= &s->picture[i];
1133 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1135 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
1136 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
1138 if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) {
1139 /* Allocate a dummy frame */
1140 i= ff_find_unused_picture(s, 0);
1141 s->next_picture_ptr= &s->picture[i];
1142 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1144 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
1145 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
1149 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1150 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1152 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.data[0]));
1154 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1157 if(s->picture_structure == PICT_BOTTOM_FIELD){
1158 s->current_picture.f.data[i] += s->current_picture.f.linesize[i];
1160 s->current_picture.f.linesize[i] *= 2;
1161 s->last_picture.f.linesize[i] *= 2;
1162 s->next_picture.f.linesize[i] *= 2;
1166 s->error_recognition= avctx->error_recognition;
1168 /* set dequantizer, we can't do it during init as it might change for mpeg4
1169 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1170 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1171 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1172 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1173 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1174 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1175 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1177 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1178 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1181 if(s->dct_error_sum){
1182 assert(s->avctx->noise_reduction && s->encoding);
1184 update_noise_reduction(s);
1187 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1188 return ff_xvmc_field_start(s, avctx);
1193 /* generic function for encode/decode called after a frame has been coded/decoded */
1194 void MPV_frame_end(MpegEncContext *s)
1197 /* redraw edges for the frame if decoding didn't complete */
1198 //just to make sure that all data is rendered.
1199 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1200 ff_xvmc_field_end(s);
1201 }else if((s->error_count || s->encoding)
1202 && !s->avctx->hwaccel
1203 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1204 && s->unrestricted_mv
1205 && s->current_picture.f.reference
1207 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1208 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1209 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1210 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1211 s->h_edge_pos , s->v_edge_pos,
1212 EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
1213 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1214 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1215 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1216 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1217 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1218 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1223 s->last_pict_type = s->pict_type;
1224 s->last_lambda_for[s->pict_type] = s->current_picture_ptr->f.quality;
1225 if(s->pict_type!=AV_PICTURE_TYPE_B){
1226 s->last_non_b_pict_type= s->pict_type;
1229 /* copy back current_picture variables */
1230 for(i=0; i<MAX_PICTURE_COUNT; i++){
1231 if(s->picture[i].f.data[0] == s->current_picture.f.data[0]){
1232 s->picture[i]= s->current_picture;
1236 assert(i<MAX_PICTURE_COUNT);
1240 /* release non-reference frames */
1241 for(i=0; i<s->picture_count; i++){
1242 if (s->picture[i].f.data[0] && !s->picture[i].f.reference /*&& s->picture[i].type != FF_BUFFER_TYPE_SHARED*/) {
1243 free_frame_buffer(s, &s->picture[i]);
1247 // clear copies, to avoid confusion
1249 memset(&s->last_picture, 0, sizeof(Picture));
1250 memset(&s->next_picture, 0, sizeof(Picture));
1251 memset(&s->current_picture, 0, sizeof(Picture));
1253 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1255 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1256 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
1261 * draws an line from (ex, ey) -> (sx, sy).
1262 * @param w width of the image
1263 * @param h height of the image
1264 * @param stride stride/linesize of the image
1265 * @param color color of the arrow
1267 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1270 sx= av_clip(sx, 0, w-1);
1271 sy= av_clip(sy, 0, h-1);
1272 ex= av_clip(ex, 0, w-1);
1273 ey= av_clip(ey, 0, h-1);
1275 buf[sy*stride + sx]+= color;
1277 if(FFABS(ex - sx) > FFABS(ey - sy)){
1279 FFSWAP(int, sx, ex);
1280 FFSWAP(int, sy, ey);
1282 buf+= sx + sy*stride;
1284 f= ((ey-sy)<<16)/ex;
1285 for(x= 0; x <= ex; x++){
1288 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1289 buf[(y+1)*stride + x]+= (color* fr )>>16;
1293 FFSWAP(int, sx, ex);
1294 FFSWAP(int, sy, ey);
1296 buf+= sx + sy*stride;
1298 if(ey) f= ((ex-sx)<<16)/ey;
1300 for(y= 0; y <= ey; y++){
1303 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1304 buf[y*stride + x+1]+= (color* fr )>>16;
1310 * draws an arrow from (ex, ey) -> (sx, sy).
1311 * @param w width of the image
1312 * @param h height of the image
1313 * @param stride stride/linesize of the image
1314 * @param color color of the arrow
1316 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1319 sx= av_clip(sx, -100, w+100);
1320 sy= av_clip(sy, -100, h+100);
1321 ex= av_clip(ex, -100, w+100);
1322 ey= av_clip(ey, -100, h+100);
1327 if(dx*dx + dy*dy > 3*3){
1330 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1332 //FIXME subpixel accuracy
1333 rx= ROUNDED_DIV(rx*3<<4, length);
1334 ry= ROUNDED_DIV(ry*3<<4, length);
1336 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1337 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1339 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1343 * prints debuging info for the given picture.
1345 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1347 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1349 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1352 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1353 switch (pict->pict_type) {
1354 case AV_PICTURE_TYPE_I: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1355 case AV_PICTURE_TYPE_P: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1356 case AV_PICTURE_TYPE_B: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1357 case AV_PICTURE_TYPE_S: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1358 case AV_PICTURE_TYPE_SI: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1359 case AV_PICTURE_TYPE_SP: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1361 for(y=0; y<s->mb_height; y++){
1362 for(x=0; x<s->mb_width; x++){
1363 if(s->avctx->debug&FF_DEBUG_SKIP){
1364 int count= s->mbskip_table[x + y*s->mb_stride];
1365 if(count>9) count=9;
1366 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1368 if(s->avctx->debug&FF_DEBUG_QP){
1369 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1371 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1372 int mb_type= pict->mb_type[x + y*s->mb_stride];
1373 //Type & MV direction
1375 av_log(s->avctx, AV_LOG_DEBUG, "P");
1376 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1377 av_log(s->avctx, AV_LOG_DEBUG, "A");
1378 else if(IS_INTRA4x4(mb_type))
1379 av_log(s->avctx, AV_LOG_DEBUG, "i");
1380 else if(IS_INTRA16x16(mb_type))
1381 av_log(s->avctx, AV_LOG_DEBUG, "I");
1382 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1383 av_log(s->avctx, AV_LOG_DEBUG, "d");
1384 else if(IS_DIRECT(mb_type))
1385 av_log(s->avctx, AV_LOG_DEBUG, "D");
1386 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1387 av_log(s->avctx, AV_LOG_DEBUG, "g");
1388 else if(IS_GMC(mb_type))
1389 av_log(s->avctx, AV_LOG_DEBUG, "G");
1390 else if(IS_SKIP(mb_type))
1391 av_log(s->avctx, AV_LOG_DEBUG, "S");
1392 else if(!USES_LIST(mb_type, 1))
1393 av_log(s->avctx, AV_LOG_DEBUG, ">");
1394 else if(!USES_LIST(mb_type, 0))
1395 av_log(s->avctx, AV_LOG_DEBUG, "<");
1397 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1398 av_log(s->avctx, AV_LOG_DEBUG, "X");
1403 av_log(s->avctx, AV_LOG_DEBUG, "+");
1404 else if(IS_16X8(mb_type))
1405 av_log(s->avctx, AV_LOG_DEBUG, "-");
1406 else if(IS_8X16(mb_type))
1407 av_log(s->avctx, AV_LOG_DEBUG, "|");
1408 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1409 av_log(s->avctx, AV_LOG_DEBUG, " ");
1411 av_log(s->avctx, AV_LOG_DEBUG, "?");
1414 if(IS_INTERLACED(mb_type))
1415 av_log(s->avctx, AV_LOG_DEBUG, "=");
1417 av_log(s->avctx, AV_LOG_DEBUG, " ");
1419 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1421 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1425 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1426 const int shift= 1 + s->quarter_sample;
1430 int h_chroma_shift, v_chroma_shift, block_height;
1431 const int width = s->avctx->width;
1432 const int height= s->avctx->height;
1433 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1434 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1435 s->low_delay=0; //needed to see the vectors without trashing the buffers
1437 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1439 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1440 pict->data[i]= s->visualization_buffer[i];
1442 pict->type= FF_BUFFER_TYPE_COPY;
1444 block_height = 16>>v_chroma_shift;
1446 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1448 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1449 const int mb_index= mb_x + mb_y*s->mb_stride;
1450 if((s->avctx->debug_mv) && pict->motion_val){
1452 for(type=0; type<3; type++){
1455 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
1459 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1463 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1468 if(!USES_LIST(pict->mb_type[mb_index], direction))
1471 if(IS_8X8(pict->mb_type[mb_index])){
1474 int sx= mb_x*16 + 4 + 8*(i&1);
1475 int sy= mb_y*16 + 4 + 8*(i>>1);
1476 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1477 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1478 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1479 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1481 }else if(IS_16X8(pict->mb_type[mb_index])){
1485 int sy=mb_y*16 + 4 + 8*i;
1486 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1487 int mx=(pict->motion_val[direction][xy][0]>>shift);
1488 int my=(pict->motion_val[direction][xy][1]>>shift);
1490 if(IS_INTERLACED(pict->mb_type[mb_index]))
1493 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1495 }else if(IS_8X16(pict->mb_type[mb_index])){
1498 int sx=mb_x*16 + 4 + 8*i;
1500 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1501 int mx=(pict->motion_val[direction][xy][0]>>shift);
1502 int my=(pict->motion_val[direction][xy][1]>>shift);
1504 if(IS_INTERLACED(pict->mb_type[mb_index]))
1507 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1510 int sx= mb_x*16 + 8;
1511 int sy= mb_y*16 + 8;
1512 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1513 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1514 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1515 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1519 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1520 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1522 for(y=0; y<block_height; y++){
1523 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1524 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1527 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1528 int mb_type= pict->mb_type[mb_index];
1531 #define COLOR(theta, r)\
1532 u= (int)(128 + r*cos(theta*3.141592/180));\
1533 v= (int)(128 + r*sin(theta*3.141592/180));
1537 if(IS_PCM(mb_type)){
1539 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1541 }else if(IS_INTRA4x4(mb_type)){
1543 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1545 }else if(IS_DIRECT(mb_type)){
1547 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1549 }else if(IS_GMC(mb_type)){
1551 }else if(IS_SKIP(mb_type)){
1553 }else if(!USES_LIST(mb_type, 1)){
1555 }else if(!USES_LIST(mb_type, 0)){
1558 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1562 u*= 0x0101010101010101ULL;
1563 v*= 0x0101010101010101ULL;
1564 for(y=0; y<block_height; y++){
1565 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1566 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1570 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1571 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1572 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1574 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1576 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1578 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1579 int dm= 1 << (mv_sample_log2-2);
1581 int sx= mb_x*16 + 8*(i&1);
1582 int sy= mb_y*16 + 8*(i>>1);
1583 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1585 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1586 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1588 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1589 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1590 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1594 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1598 s->mbskip_table[mb_index]=0;
1604 static inline int hpel_motion_lowres(MpegEncContext *s,
1605 uint8_t *dest, uint8_t *src,
1606 int field_based, int field_select,
1607 int src_x, int src_y,
1608 int width, int height, int stride,
1609 int h_edge_pos, int v_edge_pos,
1610 int w, int h, h264_chroma_mc_func *pix_op,
1611 int motion_x, int motion_y)
1613 const int lowres= s->avctx->lowres;
1614 const int op_index= FFMIN(lowres, 2);
1615 const int s_mask= (2<<lowres)-1;
1619 if(s->quarter_sample){
1624 sx= motion_x & s_mask;
1625 sy= motion_y & s_mask;
1626 src_x += motion_x >> (lowres+1);
1627 src_y += motion_y >> (lowres+1);
1629 src += src_y * stride + src_x;
1631 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1632 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1633 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1634 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1635 src= s->edge_emu_buffer;
1639 sx= (sx << 2) >> lowres;
1640 sy= (sy << 2) >> lowres;
1643 pix_op[op_index](dest, src, stride, h, sx, sy);
1647 /* apply one mpeg motion vector to the three components */
1648 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1649 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1650 int field_based, int bottom_field, int field_select,
1651 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1652 int motion_x, int motion_y, int h, int mb_y)
1654 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1655 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1656 const int lowres= s->avctx->lowres;
1657 const int op_index= FFMIN(lowres, 2);
1658 const int block_s= 8>>lowres;
1659 const int s_mask= (2<<lowres)-1;
1660 const int h_edge_pos = s->h_edge_pos >> lowres;
1661 const int v_edge_pos = s->v_edge_pos >> lowres;
1662 linesize = s->current_picture.f.linesize[0] << field_based;
1663 uvlinesize = s->current_picture.f.linesize[1] << field_based;
1665 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1671 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1674 sx= motion_x & s_mask;
1675 sy= motion_y & s_mask;
1676 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1677 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1679 if (s->out_format == FMT_H263) {
1680 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1681 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1684 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1687 uvsx = (2*mx) & s_mask;
1688 uvsy = (2*my) & s_mask;
1689 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1690 uvsrc_y = mb_y*block_s + (my >> lowres);
1696 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1697 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1700 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1701 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1702 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1704 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1705 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1706 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1707 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1708 ptr_y = s->edge_emu_buffer;
1709 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1710 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1711 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1712 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1713 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1714 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1720 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
1721 dest_y += s->linesize;
1722 dest_cb+= s->uvlinesize;
1723 dest_cr+= s->uvlinesize;
1727 ptr_y += s->linesize;
1728 ptr_cb+= s->uvlinesize;
1729 ptr_cr+= s->uvlinesize;
1732 sx= (sx << 2) >> lowres;
1733 sy= (sy << 2) >> lowres;
1734 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1736 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1737 uvsx= (uvsx << 2) >> lowres;
1738 uvsy= (uvsy << 2) >> lowres;
1739 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1740 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1742 //FIXME h261 lowres loop filter
1745 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1746 uint8_t *dest_cb, uint8_t *dest_cr,
1747 uint8_t **ref_picture,
1748 h264_chroma_mc_func *pix_op,
1750 const int lowres= s->avctx->lowres;
1751 const int op_index= FFMIN(lowres, 2);
1752 const int block_s= 8>>lowres;
1753 const int s_mask= (2<<lowres)-1;
1754 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1755 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1756 int emu=0, src_x, src_y, offset, sx, sy;
1759 if(s->quarter_sample){
1764 /* In case of 8X8, we construct a single chroma motion vector
1765 with a special rounding */
1766 mx= ff_h263_round_chroma(mx);
1767 my= ff_h263_round_chroma(my);
1771 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1772 src_y = s->mb_y*block_s + (my >> (lowres+1));
1774 offset = src_y * s->uvlinesize + src_x;
1775 ptr = ref_picture[1] + offset;
1776 if(s->flags&CODEC_FLAG_EMU_EDGE){
1777 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1778 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1779 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1780 ptr= s->edge_emu_buffer;
1784 sx= (sx << 2) >> lowres;
1785 sy= (sy << 2) >> lowres;
1786 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1788 ptr = ref_picture[2] + offset;
1790 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1791 ptr= s->edge_emu_buffer;
1793 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1797 * motion compensation of a single macroblock
1799 * @param dest_y luma destination pointer
1800 * @param dest_cb chroma cb/u destination pointer
1801 * @param dest_cr chroma cr/v destination pointer
1802 * @param dir direction (0->forward, 1->backward)
1803 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1804 * @param pix_op halfpel motion compensation function (average or put normally)
1805 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1807 static inline void MPV_motion_lowres(MpegEncContext *s,
1808 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1809 int dir, uint8_t **ref_picture,
1810 h264_chroma_mc_func *pix_op)
1814 const int lowres= s->avctx->lowres;
1815 const int block_s= 8>>lowres;
1820 switch(s->mv_type) {
1822 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1824 ref_picture, pix_op,
1825 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1831 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1832 ref_picture[0], 0, 0,
1833 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1834 s->width, s->height, s->linesize,
1835 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1836 block_s, block_s, pix_op,
1837 s->mv[dir][i][0], s->mv[dir][i][1]);
1839 mx += s->mv[dir][i][0];
1840 my += s->mv[dir][i][1];
1843 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1844 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1847 if (s->picture_structure == PICT_FRAME) {
1849 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1850 1, 0, s->field_select[dir][0],
1851 ref_picture, pix_op,
1852 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1854 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1855 1, 1, s->field_select[dir][1],
1856 ref_picture, pix_op,
1857 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1859 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
1860 ref_picture = s->current_picture_ptr->f.data;
1863 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1864 0, 0, s->field_select[dir][0],
1865 ref_picture, pix_op,
1866 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1871 uint8_t ** ref2picture;
1873 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
1874 ref2picture= ref_picture;
1876 ref2picture = s->current_picture_ptr->f.data;
1879 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1880 0, 0, s->field_select[dir][i],
1881 ref2picture, pix_op,
1882 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1884 dest_y += 2*block_s*s->linesize;
1885 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1886 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1890 if(s->picture_structure == PICT_FRAME){
1894 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1896 ref_picture, pix_op,
1897 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1899 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1903 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1904 0, 0, s->picture_structure != i+1,
1905 ref_picture, pix_op,
1906 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1908 // after put we make avg of the same block
1909 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1911 //opposite parity is always in the same frame if this is second field
1912 if(!s->first_field){
1913 ref_picture = s->current_picture_ptr->f.data;
1923 * find the lowest MB row referenced in the MVs
1925 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1927 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1928 int my, off, i, mvs;
1930 if (s->picture_structure != PICT_FRAME) goto unhandled;
1932 switch (s->mv_type) {
1946 for (i = 0; i < mvs; i++) {
1947 my = s->mv[dir][i][1]<<qpel_shift;
1948 my_max = FFMAX(my_max, my);
1949 my_min = FFMIN(my_min, my);
1952 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1954 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1956 return s->mb_height-1;
1959 /* put block[] to dest[] */
1960 static inline void put_dct(MpegEncContext *s,
1961 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1963 s->dct_unquantize_intra(s, block, i, qscale);
1964 s->dsp.idct_put (dest, line_size, block);
1967 /* add block[] to dest[] */
1968 static inline void add_dct(MpegEncContext *s,
1969 DCTELEM *block, int i, uint8_t *dest, int line_size)
1971 if (s->block_last_index[i] >= 0) {
1972 s->dsp.idct_add (dest, line_size, block);
1976 static inline void add_dequant_dct(MpegEncContext *s,
1977 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1979 if (s->block_last_index[i] >= 0) {
1980 s->dct_unquantize_inter(s, block, i, qscale);
1982 s->dsp.idct_add (dest, line_size, block);
1987 * cleans dc, ac, coded_block for the current non intra MB
1989 void ff_clean_intra_table_entries(MpegEncContext *s)
1991 int wrap = s->b8_stride;
1992 int xy = s->block_index[0];
1995 s->dc_val[0][xy + 1 ] =
1996 s->dc_val[0][xy + wrap] =
1997 s->dc_val[0][xy + 1 + wrap] = 1024;
1999 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2000 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2001 if (s->msmpeg4_version>=3) {
2002 s->coded_block[xy ] =
2003 s->coded_block[xy + 1 ] =
2004 s->coded_block[xy + wrap] =
2005 s->coded_block[xy + 1 + wrap] = 0;
2008 wrap = s->mb_stride;
2009 xy = s->mb_x + s->mb_y * wrap;
2011 s->dc_val[2][xy] = 1024;
2013 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2014 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2016 s->mbintra_table[xy]= 0;
2019 /* generic function called after a macroblock has been parsed by the
2020 decoder or after it has been encoded by the encoder.
2022 Important variables used:
2023 s->mb_intra : true if intra macroblock
2024 s->mv_dir : motion vector direction
2025 s->mv_type : motion vector type
2026 s->mv : motion vector
2027 s->interlaced_dct : true if interlaced dct used (mpeg2)
2029 static av_always_inline
2030 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2031 int lowres_flag, int is_mpeg12)
2033 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2034 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2035 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2039 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2040 /* save DCT coefficients */
2042 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2043 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2045 for(j=0; j<64; j++){
2046 *dct++ = block[i][s->dsp.idct_permutation[j]];
2047 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2049 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2053 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2055 /* update DC predictors for P macroblocks */
2057 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2058 if(s->mbintra_table[mb_xy])
2059 ff_clean_intra_table_entries(s);
2063 s->last_dc[2] = 128 << s->intra_dc_precision;
2066 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2067 s->mbintra_table[mb_xy]=1;
2069 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2070 uint8_t *dest_y, *dest_cb, *dest_cr;
2071 int dct_linesize, dct_offset;
2072 op_pixels_func (*op_pix)[4];
2073 qpel_mc_func (*op_qpix)[16];
2074 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2075 const int uvlinesize = s->current_picture.f.linesize[1];
2076 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2077 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2079 /* avoid copy if macroblock skipped in last frame too */
2080 /* skip only during decoding as we might trash the buffers during encoding a bit */
2082 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2083 const int age = s->current_picture.f.age;
2087 if (s->mb_skipped) {
2089 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2091 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
2092 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2094 /* if previous was skipped too, then nothing to do ! */
2095 if (*mbskip_ptr >= age && s->current_picture.f.reference){
2098 } else if(!s->current_picture.f.reference) {
2099 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2100 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2102 *mbskip_ptr = 0; /* not skipped */
2106 dct_linesize = linesize << s->interlaced_dct;
2107 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
2111 dest_cb= s->dest[1];
2112 dest_cr= s->dest[2];
2114 dest_y = s->b_scratchpad;
2115 dest_cb= s->b_scratchpad+16*linesize;
2116 dest_cr= s->b_scratchpad+32*linesize;
2120 /* motion handling */
2121 /* decoding or more than one mb_type (MC was already done otherwise) */
2124 if(HAVE_PTHREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2125 if (s->mv_dir & MV_DIR_FORWARD) {
2126 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
2128 if (s->mv_dir & MV_DIR_BACKWARD) {
2129 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
2134 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2136 if (s->mv_dir & MV_DIR_FORWARD) {
2137 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2138 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2140 if (s->mv_dir & MV_DIR_BACKWARD) {
2141 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2144 op_qpix= s->me.qpel_put;
2145 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2146 op_pix = s->dsp.put_pixels_tab;
2148 op_pix = s->dsp.put_no_rnd_pixels_tab;
2150 if (s->mv_dir & MV_DIR_FORWARD) {
2151 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2152 op_pix = s->dsp.avg_pixels_tab;
2153 op_qpix= s->me.qpel_avg;
2155 if (s->mv_dir & MV_DIR_BACKWARD) {
2156 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2161 /* skip dequant / idct if we are really late ;) */
2162 if(s->avctx->skip_idct){
2163 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2164 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2165 || s->avctx->skip_idct >= AVDISCARD_ALL)
2169 /* add dct residue */
2170 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2171 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2172 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2173 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2174 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2175 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2177 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2178 if (s->chroma_y_shift){
2179 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2180 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2184 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2185 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2186 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2187 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2190 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2191 add_dct(s, block[0], 0, dest_y , dct_linesize);
2192 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2193 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2194 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2196 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2197 if(s->chroma_y_shift){//Chroma420
2198 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2199 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2202 dct_linesize = uvlinesize << s->interlaced_dct;
2203 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2205 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2206 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2207 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2208 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2209 if(!s->chroma_x_shift){//Chroma444
2210 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2211 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2212 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2213 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2218 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2219 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2222 /* dct only in intra block */
2223 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2224 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2225 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2226 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2227 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2229 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2230 if(s->chroma_y_shift){
2231 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2232 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2236 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2237 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2238 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2239 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2243 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2244 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2245 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2246 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2248 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2249 if(s->chroma_y_shift){
2250 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2251 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2254 dct_linesize = uvlinesize << s->interlaced_dct;
2255 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2257 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2258 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2259 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2260 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2261 if(!s->chroma_x_shift){//Chroma444
2262 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2263 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2264 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2265 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2273 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2274 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2275 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2280 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2282 if(s->out_format == FMT_MPEG1) {
2283 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2284 else MPV_decode_mb_internal(s, block, 0, 1);
2287 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2288 else MPV_decode_mb_internal(s, block, 0, 0);
2293 * @param h is the normal height, this will be reduced automatically if needed for the last row
2295 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2296 const int field_pic= s->picture_structure != PICT_FRAME;
2302 if (!s->avctx->hwaccel
2303 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2304 && s->unrestricted_mv
2305 && s->current_picture.f.reference
2307 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2308 int sides = 0, edge_h;
2309 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2310 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2311 if (y==0) sides |= EDGE_TOP;
2312 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2314 edge_h= FFMIN(h, s->v_edge_pos - y);
2316 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize , s->linesize,
2317 s->h_edge_pos , edge_h , EDGE_WIDTH , EDGE_WIDTH , sides);
2318 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
2319 s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2320 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
2321 s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2324 h= FFMIN(h, s->avctx->height - y);
2326 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2328 if (s->avctx->draw_horiz_band) {
2332 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2333 src= (AVFrame*)s->current_picture_ptr;
2334 else if(s->last_picture_ptr)
2335 src= (AVFrame*)s->last_picture_ptr;
2339 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2345 offset[0]= y * s->linesize;
2347 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2353 s->avctx->draw_horiz_band(s->avctx, src, offset,
2354 y, s->picture_structure, h);
2358 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2359 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2360 const int uvlinesize = s->current_picture.f.linesize[1];
2361 const int mb_size= 4 - s->avctx->lowres;
2363 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2364 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2365 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2366 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2367 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2368 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2369 //block_index is not used by mpeg2, so it is not affected by chroma_format
2371 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2372 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2373 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2375 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2377 if(s->picture_structure==PICT_FRAME){
2378 s->dest[0] += s->mb_y * linesize << mb_size;
2379 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2380 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2382 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2383 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2384 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2385 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2390 void ff_mpeg_flush(AVCodecContext *avctx){
2392 MpegEncContext *s = avctx->priv_data;
2394 if(s==NULL || s->picture==NULL)
2397 for(i=0; i<s->picture_count; i++){
2398 if (s->picture[i].f.data[0] &&
2399 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2400 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2401 free_frame_buffer(s, &s->picture[i]);
2403 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2405 s->mb_x= s->mb_y= 0;
2408 s->parse_context.state= -1;
2409 s->parse_context.frame_start_found= 0;
2410 s->parse_context.overread= 0;
2411 s->parse_context.overread_index= 0;
2412 s->parse_context.index= 0;
2413 s->parse_context.last_index= 0;
2414 s->bitstream_buffer_size=0;
2418 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2419 DCTELEM *block, int n, int qscale)
2421 int i, level, nCoeffs;
2422 const uint16_t *quant_matrix;
2424 nCoeffs= s->block_last_index[n];
2427 block[0] = block[0] * s->y_dc_scale;
2429 block[0] = block[0] * s->c_dc_scale;
2430 /* XXX: only mpeg1 */
2431 quant_matrix = s->intra_matrix;
2432 for(i=1;i<=nCoeffs;i++) {
2433 int j= s->intra_scantable.permutated[i];
2438 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2439 level = (level - 1) | 1;
2442 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2443 level = (level - 1) | 1;
2450 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2451 DCTELEM *block, int n, int qscale)
2453 int i, level, nCoeffs;
2454 const uint16_t *quant_matrix;
2456 nCoeffs= s->block_last_index[n];
2458 quant_matrix = s->inter_matrix;
2459 for(i=0; i<=nCoeffs; i++) {
2460 int j= s->intra_scantable.permutated[i];
2465 level = (((level << 1) + 1) * qscale *
2466 ((int) (quant_matrix[j]))) >> 4;
2467 level = (level - 1) | 1;
2470 level = (((level << 1) + 1) * qscale *
2471 ((int) (quant_matrix[j]))) >> 4;
2472 level = (level - 1) | 1;
2479 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2480 DCTELEM *block, int n, int qscale)
2482 int i, level, nCoeffs;
2483 const uint16_t *quant_matrix;
2485 if(s->alternate_scan) nCoeffs= 63;
2486 else nCoeffs= s->block_last_index[n];
2489 block[0] = block[0] * s->y_dc_scale;
2491 block[0] = block[0] * s->c_dc_scale;
2492 quant_matrix = s->intra_matrix;
2493 for(i=1;i<=nCoeffs;i++) {
2494 int j= s->intra_scantable.permutated[i];
2499 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2502 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2509 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2510 DCTELEM *block, int n, int qscale)
2512 int i, level, nCoeffs;
2513 const uint16_t *quant_matrix;
2516 if(s->alternate_scan) nCoeffs= 63;
2517 else nCoeffs= s->block_last_index[n];
2520 block[0] = block[0] * s->y_dc_scale;
2522 block[0] = block[0] * s->c_dc_scale;
2523 quant_matrix = s->intra_matrix;
2524 for(i=1;i<=nCoeffs;i++) {
2525 int j= s->intra_scantable.permutated[i];
2530 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2533 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2542 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2543 DCTELEM *block, int n, int qscale)
2545 int i, level, nCoeffs;
2546 const uint16_t *quant_matrix;
2549 if(s->alternate_scan) nCoeffs= 63;
2550 else nCoeffs= s->block_last_index[n];
2552 quant_matrix = s->inter_matrix;
2553 for(i=0; i<=nCoeffs; i++) {
2554 int j= s->intra_scantable.permutated[i];
2559 level = (((level << 1) + 1) * qscale *
2560 ((int) (quant_matrix[j]))) >> 4;
2563 level = (((level << 1) + 1) * qscale *
2564 ((int) (quant_matrix[j]))) >> 4;
2573 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2574 DCTELEM *block, int n, int qscale)
2576 int i, level, qmul, qadd;
2579 assert(s->block_last_index[n]>=0);
2585 block[0] = block[0] * s->y_dc_scale;
2587 block[0] = block[0] * s->c_dc_scale;
2588 qadd = (qscale - 1) | 1;
2595 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2597 for(i=1; i<=nCoeffs; i++) {
2601 level = level * qmul - qadd;
2603 level = level * qmul + qadd;
2610 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2611 DCTELEM *block, int n, int qscale)
2613 int i, level, qmul, qadd;
2616 assert(s->block_last_index[n]>=0);
2618 qadd = (qscale - 1) | 1;
2621 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2623 for(i=0; i<=nCoeffs; i++) {
2627 level = level * qmul - qadd;
2629 level = level * qmul + qadd;
2637 * set qscale and update qscale dependent variables.
2639 void ff_set_qscale(MpegEncContext * s, int qscale)
2643 else if (qscale > 31)
2647 s->chroma_qscale= s->chroma_qscale_table[qscale];
2649 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2650 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2653 void MPV_report_decode_progress(MpegEncContext *s)
2655 if (s->pict_type != FF_B_TYPE && !s->partitioned_frame && !s->error_occurred)
2656 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);