2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32]={
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
74 const uint8_t ff_mpeg1_dc_scale_table[128]={
75 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 static const uint8_t mpeg2_dc_scale_table1[128]={
83 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128]={
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 static const uint8_t mpeg2_dc_scale_table3[128]={
99 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
107 ff_mpeg1_dc_scale_table,
108 mpeg2_dc_scale_table1,
109 mpeg2_dc_scale_table2,
110 mpeg2_dc_scale_table3,
113 const enum PixelFormat ff_pixfmt_list_420[] = {
118 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
125 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
133 uint32_t tmp= *state << 8;
134 *state= tmp + *(p++);
135 if(tmp == 0x100 || p==end)
140 if (p[-1] > 1 ) p+= 3;
141 else if(p[-2] ) p+= 2;
142 else if(p[-3]|(p[-1]-1)) p++;
155 /* init common dct for both encoder and decoder */
156 av_cold int ff_dct_common_init(MpegEncContext *s)
158 dsputil_init(&s->dsp, s->avctx);
160 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
161 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
162 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
163 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
164 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
165 if(s->flags & CODEC_FLAG_BITEXACT)
166 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
167 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
170 MPV_common_init_mmx(s);
172 MPV_common_init_axp(s);
174 MPV_common_init_mlib(s);
176 MPV_common_init_mmi(s);
178 MPV_common_init_arm(s);
180 MPV_common_init_altivec(s);
182 MPV_common_init_bfin(s);
185 /* load & permutate scantables
186 note: only wmv uses different ones
188 if(s->alternate_scan){
189 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
190 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
192 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
195 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
196 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
201 void ff_copy_picture(Picture *dst, Picture *src){
203 dst->f.type= FF_BUFFER_TYPE_COPY;
207 * Release a frame buffer
209 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
211 /* Windows Media Image codecs allocate internal buffers with different
212 dimensions; ignore user defined callbacks for these */
213 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
214 ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
216 avcodec_default_release_buffer(s->avctx, (AVFrame*)pic);
217 av_freep(&pic->f.hwaccel_picture_private);
221 * Allocate a frame buffer
223 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
227 if (s->avctx->hwaccel) {
228 assert(!pic->f.hwaccel_picture_private);
229 if (s->avctx->hwaccel->priv_data_size) {
230 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
231 if (!pic->f.hwaccel_picture_private) {
232 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
238 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
239 r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
241 r = avcodec_default_get_buffer(s->avctx, (AVFrame*)pic);
243 if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
244 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
245 r, pic->f.age, pic->f.type, pic->f.data[0]);
246 av_freep(&pic->f.hwaccel_picture_private);
250 if (s->linesize && (s->linesize != pic->f.linesize[0] || s->uvlinesize != pic->f.linesize[1])) {
251 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
252 free_frame_buffer(s, pic);
256 if (pic->f.linesize[1] != pic->f.linesize[2]) {
257 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
258 free_frame_buffer(s, pic);
266 * allocates a Picture
267 * The pixels are allocated/set by calling get_buffer() if shared=0
269 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
270 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
271 const int mb_array_size= s->mb_stride*s->mb_height;
272 const int b8_array_size= s->b8_stride*s->mb_height*2;
273 const int b4_array_size= s->b4_stride*s->mb_height*4;
278 assert(pic->f.data[0]);
279 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
280 pic->f.type = FF_BUFFER_TYPE_SHARED;
282 assert(!pic->f.data[0]);
284 if (alloc_frame_buffer(s, pic) < 0)
287 s->linesize = pic->f.linesize[0];
288 s->uvlinesize = pic->f.linesize[1];
291 if (pic->f.qscale_table == NULL) {
293 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
294 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
295 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
298 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table, mb_array_size * sizeof(uint8_t) + 2, fail) //the +2 is for the slice end check
299 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t) , fail)
300 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
301 pic->f.mb_type = pic->mb_type_base + 2*s->mb_stride + 1;
302 pic->f.qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1;
303 if(s->out_format == FMT_H264){
305 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
306 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
307 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
309 pic->f.motion_subsample_log2 = 2;
310 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
312 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
313 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
314 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
316 pic->f.motion_subsample_log2 = 3;
318 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
319 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff, 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
321 pic->f.qstride = s->mb_stride;
322 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan , 1 * sizeof(AVPanScan), fail)
325 /* It might be nicer if the application would keep track of these
326 * but it would require an API change. */
327 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
328 s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
329 if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B)
330 pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
334 fail: //for the FF_ALLOCZ_OR_GOTO macro
336 free_frame_buffer(s, pic);
341 * deallocates a picture
343 static void free_picture(MpegEncContext *s, Picture *pic){
346 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
347 free_frame_buffer(s, pic);
350 av_freep(&pic->mb_var);
351 av_freep(&pic->mc_mb_var);
352 av_freep(&pic->mb_mean);
353 av_freep(&pic->f.mbskip_table);
354 av_freep(&pic->qscale_table_base);
355 av_freep(&pic->mb_type_base);
356 av_freep(&pic->f.dct_coeff);
357 av_freep(&pic->f.pan_scan);
358 pic->f.mb_type = NULL;
360 av_freep(&pic->motion_val_base[i]);
361 av_freep(&pic->f.ref_index[i]);
364 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
367 pic->f.data[i] = NULL;
373 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
374 int y_size = s->b8_stride * (2 * s->mb_height + 1);
375 int c_size = s->mb_stride * (s->mb_height + 1);
376 int yc_size = y_size + 2 * c_size;
379 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
380 FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
381 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21*2;
383 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
384 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
385 s->me.temp= s->me.scratchpad;
386 s->rd_scratchpad= s->me.scratchpad;
387 s->b_scratchpad= s->me.scratchpad;
388 s->obmc_scratchpad= s->me.scratchpad + 16;
390 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
391 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
392 if(s->avctx->noise_reduction){
393 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
396 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
397 s->block= s->blocks[0];
400 s->pblocks[i] = &s->block[i];
403 if (s->out_format == FMT_H263) {
405 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
406 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
407 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
408 s->ac_val[2] = s->ac_val[1] + c_size;
413 return -1; //free() through MPV_common_end()
416 static void free_duplicate_context(MpegEncContext *s){
419 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
420 av_freep(&s->me.scratchpad);
424 s->obmc_scratchpad= NULL;
426 av_freep(&s->dct_error_sum);
427 av_freep(&s->me.map);
428 av_freep(&s->me.score_map);
429 av_freep(&s->blocks);
430 av_freep(&s->ac_val_base);
434 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
435 #define COPY(a) bak->a= src->a
436 COPY(allocated_edge_emu_buffer);
437 COPY(edge_emu_buffer);
442 COPY(obmc_scratchpad);
449 COPY(me.map_generation);
461 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
464 //FIXME copy only needed parts
466 backup_duplicate_context(&bak, dst);
467 memcpy(dst, src, sizeof(MpegEncContext));
468 backup_duplicate_context(dst, &bak);
470 dst->pblocks[i] = &dst->block[i];
472 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
475 int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
477 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
479 if(dst == src || !s1->context_initialized) return 0;
481 //FIXME can parameters change on I-frames? in that case dst may need a reinit
482 if(!s->context_initialized){
483 memcpy(s, s1, sizeof(MpegEncContext));
486 s->picture_range_start += MAX_PICTURE_COUNT;
487 s->picture_range_end += MAX_PICTURE_COUNT;
488 s->bitstream_buffer = NULL;
489 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
494 s->avctx->coded_height = s1->avctx->coded_height;
495 s->avctx->coded_width = s1->avctx->coded_width;
496 s->avctx->width = s1->avctx->width;
497 s->avctx->height = s1->avctx->height;
499 s->coded_picture_number = s1->coded_picture_number;
500 s->picture_number = s1->picture_number;
501 s->input_picture_number = s1->input_picture_number;
503 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
504 memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture);
506 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
507 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
508 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
510 memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
512 //Error/bug resilience
513 s->next_p_frame_damaged = s1->next_p_frame_damaged;
514 s->workaround_bugs = s1->workaround_bugs;
517 memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits);
520 s->max_b_frames = s1->max_b_frames;
521 s->low_delay = s1->low_delay;
522 s->dropable = s1->dropable;
524 //DivX handling (doesn't work)
525 s->divx_packed = s1->divx_packed;
527 if(s1->bitstream_buffer){
528 if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
529 av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size);
530 s->bitstream_buffer_size = s1->bitstream_buffer_size;
531 memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size);
532 memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
535 //MPEG2/interlacing info
536 memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence);
538 if(!s1->first_field){
539 s->last_pict_type= s1->pict_type;
540 if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
542 if (s1->pict_type != AV_PICTURE_TYPE_B) {
543 s->last_non_b_pict_type= s1->pict_type;
551 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
552 * the changed fields will not depend upon the prior state of the MpegEncContext.
554 void MPV_common_defaults(MpegEncContext *s){
556 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
557 s->chroma_qscale_table= ff_default_chroma_qscale_table;
558 s->progressive_frame= 1;
559 s->progressive_sequence= 1;
560 s->picture_structure= PICT_FRAME;
562 s->coded_picture_number = 0;
563 s->picture_number = 0;
564 s->input_picture_number = 0;
566 s->picture_in_gop_number = 0;
571 s->picture_range_start = 0;
572 s->picture_range_end = MAX_PICTURE_COUNT;
576 * sets the given MpegEncContext to defaults for decoding.
577 * the changed fields will not depend upon the prior state of the MpegEncContext.
579 void MPV_decode_defaults(MpegEncContext *s){
580 MPV_common_defaults(s);
584 * init common structure for both encoder and decoder.
585 * this assumes that some variables like width/height are already set
587 av_cold int MPV_common_init(MpegEncContext *s)
589 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y,
590 threads = (s->encoding ||
592 s->avctx->active_thread_type & FF_THREAD_SLICE)) ?
593 s->avctx->thread_count : 1;
595 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
596 s->mb_height = (s->height + 31) / 32 * 2;
597 else if (s->codec_id != CODEC_ID_H264)
598 s->mb_height = (s->height + 15) / 16;
600 if(s->avctx->pix_fmt == PIX_FMT_NONE){
601 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
605 if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
606 (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
607 int max_threads = FFMIN(MAX_THREADS, s->mb_height);
608 av_log(s->avctx, AV_LOG_WARNING, "too many threads (%d), reducing to %d\n",
609 s->avctx->thread_count, max_threads);
610 threads = max_threads;
613 if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
616 ff_dct_common_init(s);
618 s->flags= s->avctx->flags;
619 s->flags2= s->avctx->flags2;
621 s->mb_width = (s->width + 15) / 16;
622 s->mb_stride = s->mb_width + 1;
623 s->b8_stride = s->mb_width*2 + 1;
624 s->b4_stride = s->mb_width*4 + 1;
625 mb_array_size= s->mb_height * s->mb_stride;
626 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
628 /* set chroma shifts */
629 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
630 &(s->chroma_y_shift) );
632 /* set default edge pos, will be overriden in decode_header if needed */
633 s->h_edge_pos= s->mb_width*16;
634 s->v_edge_pos= s->mb_height*16;
636 s->mb_num = s->mb_width * s->mb_height;
641 s->block_wrap[3]= s->b8_stride;
643 s->block_wrap[5]= s->mb_stride;
645 y_size = s->b8_stride * (2 * s->mb_height + 1);
646 c_size = s->mb_stride * (s->mb_height + 1);
647 yc_size = y_size + 2 * c_size;
649 /* convert fourcc to upper case */
650 s->codec_tag = ff_toupper4(s->avctx->codec_tag);
652 s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
654 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
656 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
657 for(y=0; y<s->mb_height; y++){
658 for(x=0; x<s->mb_width; x++){
659 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
662 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
665 /* Allocate MV tables */
666 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
667 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
668 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
669 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
670 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
671 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
672 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
673 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
674 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
675 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
676 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
677 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
679 if(s->msmpeg4_version){
680 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
682 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
684 /* Allocate MB type table */
685 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
687 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
689 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
690 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix , 64*32 * sizeof(int), fail)
691 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
692 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
693 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
694 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
695 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
696 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
698 if(s->avctx->noise_reduction){
699 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
703 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
704 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
705 for(i = 0; i < s->picture_count; i++) {
706 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
709 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
711 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
712 /* interlaced direct mode decoding tables */
717 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
718 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
720 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
721 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
722 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
724 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
727 if (s->out_format == FMT_H263) {
729 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
730 s->coded_block= s->coded_block_base + s->b8_stride + 1;
732 /* cbp, ac_pred, pred_dir */
733 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
734 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
737 if (s->h263_pred || s->h263_plus || !s->encoding) {
739 //MN: we need these for error resilience of intra-frames
740 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
741 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
742 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
743 s->dc_val[2] = s->dc_val[1] + c_size;
744 for(i=0;i<yc_size;i++)
745 s->dc_val_base[i] = 1024;
748 /* which mb is a intra block */
749 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
750 memset(s->mbintra_table, 1, mb_array_size);
752 /* init macroblock skip table */
753 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
754 //Note the +1 is for a quicker mpeg4 slice_end detection
755 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
757 s->parse_context.state= -1;
758 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
759 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
760 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
761 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
764 s->context_initialized = 1;
765 s->thread_context[0]= s;
767 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
768 for(i=1; i<threads; i++){
769 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
770 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
773 for(i=0; i<threads; i++){
774 if(init_duplicate_context(s->thread_context[i], s) < 0)
776 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
777 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
780 if(init_duplicate_context(s, s) < 0) goto fail;
782 s->end_mb_y = s->mb_height;
792 /* init common structure for both encoder and decoder */
793 void MPV_common_end(MpegEncContext *s)
797 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
798 for(i=0; i<s->avctx->thread_count; i++){
799 free_duplicate_context(s->thread_context[i]);
801 for(i=1; i<s->avctx->thread_count; i++){
802 av_freep(&s->thread_context[i]);
804 } else free_duplicate_context(s);
806 av_freep(&s->parse_context.buffer);
807 s->parse_context.buffer_size=0;
809 av_freep(&s->mb_type);
810 av_freep(&s->p_mv_table_base);
811 av_freep(&s->b_forw_mv_table_base);
812 av_freep(&s->b_back_mv_table_base);
813 av_freep(&s->b_bidir_forw_mv_table_base);
814 av_freep(&s->b_bidir_back_mv_table_base);
815 av_freep(&s->b_direct_mv_table_base);
817 s->b_forw_mv_table= NULL;
818 s->b_back_mv_table= NULL;
819 s->b_bidir_forw_mv_table= NULL;
820 s->b_bidir_back_mv_table= NULL;
821 s->b_direct_mv_table= NULL;
825 av_freep(&s->b_field_mv_table_base[i][j][k]);
826 s->b_field_mv_table[i][j][k]=NULL;
828 av_freep(&s->b_field_select_table[i][j]);
829 av_freep(&s->p_field_mv_table_base[i][j]);
830 s->p_field_mv_table[i][j]=NULL;
832 av_freep(&s->p_field_select_table[i]);
835 av_freep(&s->dc_val_base);
836 av_freep(&s->coded_block_base);
837 av_freep(&s->mbintra_table);
838 av_freep(&s->cbp_table);
839 av_freep(&s->pred_dir_table);
841 av_freep(&s->mbskip_table);
842 av_freep(&s->prev_pict_types);
843 av_freep(&s->bitstream_buffer);
844 s->allocated_bitstream_buffer_size=0;
846 av_freep(&s->avctx->stats_out);
847 av_freep(&s->ac_stats);
848 av_freep(&s->error_status_table);
849 av_freep(&s->mb_index2xy);
850 av_freep(&s->lambda_table);
851 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
852 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
853 s->q_chroma_intra_matrix= NULL;
854 s->q_chroma_intra_matrix16= NULL;
855 av_freep(&s->q_intra_matrix);
856 av_freep(&s->q_inter_matrix);
857 av_freep(&s->q_intra_matrix16);
858 av_freep(&s->q_inter_matrix16);
859 av_freep(&s->input_picture);
860 av_freep(&s->reordered_input_picture);
861 av_freep(&s->dct_offset);
863 if(s->picture && !s->avctx->is_copy){
864 for(i=0; i<s->picture_count; i++){
865 free_picture(s, &s->picture[i]);
868 av_freep(&s->picture);
869 s->context_initialized = 0;
872 s->current_picture_ptr= NULL;
873 s->linesize= s->uvlinesize= 0;
876 av_freep(&s->visualization_buffer[i]);
878 if(!(s->avctx->active_thread_type&FF_THREAD_FRAME))
879 avcodec_default_free_buffers(s->avctx);
882 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
884 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
885 uint8_t index_run[MAX_RUN+1];
886 int last, run, level, start, end, i;
888 /* If table is static, we can quit if rl->max_level[0] is not NULL */
889 if(static_store && rl->max_level[0])
892 /* compute max_level[], max_run[] and index_run[] */
893 for(last=0;last<2;last++) {
902 memset(max_level, 0, MAX_RUN + 1);
903 memset(max_run, 0, MAX_LEVEL + 1);
904 memset(index_run, rl->n, MAX_RUN + 1);
905 for(i=start;i<end;i++) {
906 run = rl->table_run[i];
907 level = rl->table_level[i];
908 if (index_run[run] == rl->n)
910 if (level > max_level[run])
911 max_level[run] = level;
912 if (run > max_run[level])
913 max_run[level] = run;
916 rl->max_level[last] = static_store[last];
918 rl->max_level[last] = av_malloc(MAX_RUN + 1);
919 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
921 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
923 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
924 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
926 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
928 rl->index_run[last] = av_malloc(MAX_RUN + 1);
929 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
933 void init_vlc_rl(RLTable *rl)
945 for(i=0; i<rl->vlc.table_size; i++){
946 int code= rl->vlc.table[i][0];
947 int len = rl->vlc.table[i][1];
950 if(len==0){ // illegal code
953 }else if(len<0){ //more bits needed
957 if(code==rl->n){ //esc
961 run= rl->table_run [code] + 1;
962 level= rl->table_level[code] * qmul + qadd;
963 if(code >= rl->last) run+=192;
966 rl->rl_vlc[q][i].len= len;
967 rl->rl_vlc[q][i].level= level;
968 rl->rl_vlc[q][i].run= run;
973 void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
977 /* release non reference frames */
978 for(i=0; i<s->picture_count; i++){
979 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
980 && (!s->picture[i].owner2 || s->picture[i].owner2 == s)
981 && (remove_current || &s->picture[i] != s->current_picture_ptr)
982 /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
983 free_frame_buffer(s, &s->picture[i]);
988 int ff_find_unused_picture(MpegEncContext *s, int shared){
992 for(i=s->picture_range_start; i<s->picture_range_end; i++){
993 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
997 for(i=s->picture_range_start; i<s->picture_range_end; i++){
998 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
1001 for(i=s->picture_range_start; i<s->picture_range_end; i++){
1002 if (s->picture[i].f.data[0] == NULL)
1007 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
1008 /* We could return -1, but the codec would crash trying to draw into a
1009 * non-existing frame anyway. This is safer than waiting for a random crash.
1010 * Also the return of this is never useful, an encoder must only allocate
1011 * as much as allowed in the specification. This has no relationship to how
1012 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1013 * enough for such valid streams).
1014 * Plus, a decoder has to check stream validity and remove frames if too
1015 * many reference frames are around. Waiting for "OOM" is not correct at
1016 * all. Similarly, missing reference frames have to be replaced by
1017 * interpolated/MC frames, anything else is a bug in the codec ...
1023 static void update_noise_reduction(MpegEncContext *s){
1026 for(intra=0; intra<2; intra++){
1027 if(s->dct_count[intra] > (1<<16)){
1028 for(i=0; i<64; i++){
1029 s->dct_error_sum[intra][i] >>=1;
1031 s->dct_count[intra] >>= 1;
1034 for(i=0; i<64; i++){
1035 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1041 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1043 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1049 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1051 /* mark&release old frames */
1052 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.data[0]) {
1053 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1054 if (s->last_picture_ptr->owner2 == s)
1055 free_frame_buffer(s, s->last_picture_ptr);
1057 /* release forgotten pictures */
1058 /* if(mpeg124/h263) */
1060 for(i=0; i<s->picture_count; i++){
1061 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].f.reference) {
1062 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1063 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1064 free_frame_buffer(s, &s->picture[i]);
1072 ff_release_unused_pictures(s, 1);
1074 if (s->current_picture_ptr && s->current_picture_ptr->f.data[0] == NULL)
1075 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
1077 i= ff_find_unused_picture(s, 0);
1078 pic= &s->picture[i];
1081 pic->f.reference = 0;
1083 if (s->codec_id == CODEC_ID_H264)
1084 pic->f.reference = s->picture_structure;
1085 else if (s->pict_type != AV_PICTURE_TYPE_B)
1086 pic->f.reference = 3;
1089 pic->f.coded_picture_number = s->coded_picture_number++;
1091 if(ff_alloc_picture(s, pic, 0) < 0)
1094 s->current_picture_ptr= pic;
1095 //FIXME use only the vars from current_pic
1096 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1097 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1098 if(s->picture_structure != PICT_FRAME)
1099 s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1101 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence;
1102 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1105 s->current_picture_ptr->f.pict_type = s->pict_type;
1106 // if(s->flags && CODEC_FLAG_QSCALE)
1107 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1108 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1110 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1112 if (s->pict_type != AV_PICTURE_TYPE_B) {
1113 s->last_picture_ptr= s->next_picture_ptr;
1115 s->next_picture_ptr= s->current_picture_ptr;
1117 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1118 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1119 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1120 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1121 s->pict_type, s->dropable);*/
1123 if(s->codec_id != CODEC_ID_H264){
1124 if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.data[0] == NULL) &&
1125 (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
1126 if (s->pict_type != AV_PICTURE_TYPE_I)
1127 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1128 else if (s->picture_structure != PICT_FRAME)
1129 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
1131 /* Allocate a dummy frame */
1132 i= ff_find_unused_picture(s, 0);
1133 s->last_picture_ptr= &s->picture[i];
1134 s->last_picture_ptr->f.key_frame = 0;
1135 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1137 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
1138 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
1140 if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) {
1141 /* Allocate a dummy frame */
1142 i= ff_find_unused_picture(s, 0);
1143 s->next_picture_ptr= &s->picture[i];
1144 s->next_picture_ptr->f.key_frame = 0;
1145 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1147 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
1148 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
1152 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1153 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1155 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.data[0]));
1157 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1160 if(s->picture_structure == PICT_BOTTOM_FIELD){
1161 s->current_picture.f.data[i] += s->current_picture.f.linesize[i];
1163 s->current_picture.f.linesize[i] *= 2;
1164 s->last_picture.f.linesize[i] *= 2;
1165 s->next_picture.f.linesize[i] *= 2;
1169 s->error_recognition= avctx->error_recognition;
1171 /* set dequantizer, we can't do it during init as it might change for mpeg4
1172 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1173 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1174 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1175 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1176 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1177 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1178 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1180 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1181 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1184 if(s->dct_error_sum){
1185 assert(s->avctx->noise_reduction && s->encoding);
1187 update_noise_reduction(s);
1190 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1191 return ff_xvmc_field_start(s, avctx);
1196 /* generic function for encode/decode called after a frame has been coded/decoded */
1197 void MPV_frame_end(MpegEncContext *s)
1200 /* redraw edges for the frame if decoding didn't complete */
1201 //just to make sure that all data is rendered.
1202 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1203 ff_xvmc_field_end(s);
1204 }else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND))
1205 && !s->avctx->hwaccel
1206 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1207 && s->unrestricted_mv
1208 && s->current_picture.f.reference
1210 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1211 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1212 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1213 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1214 s->h_edge_pos , s->v_edge_pos,
1215 EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
1216 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1217 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1218 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1219 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1220 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1221 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1226 s->last_pict_type = s->pict_type;
1227 s->last_lambda_for[s->pict_type] = s->current_picture_ptr->f.quality;
1228 if(s->pict_type!=AV_PICTURE_TYPE_B){
1229 s->last_non_b_pict_type= s->pict_type;
1232 /* copy back current_picture variables */
1233 for(i=0; i<MAX_PICTURE_COUNT; i++){
1234 if(s->picture[i].f.data[0] == s->current_picture.f.data[0]){
1235 s->picture[i]= s->current_picture;
1239 assert(i<MAX_PICTURE_COUNT);
1243 /* release non-reference frames */
1244 for(i=0; i<s->picture_count; i++){
1245 if (s->picture[i].f.data[0] && !s->picture[i].f.reference /*&& s->picture[i].type != FF_BUFFER_TYPE_SHARED*/) {
1246 free_frame_buffer(s, &s->picture[i]);
1250 // clear copies, to avoid confusion
1252 memset(&s->last_picture, 0, sizeof(Picture));
1253 memset(&s->next_picture, 0, sizeof(Picture));
1254 memset(&s->current_picture, 0, sizeof(Picture));
1256 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1258 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1259 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
1264 * draws an line from (ex, ey) -> (sx, sy).
1265 * @param w width of the image
1266 * @param h height of the image
1267 * @param stride stride/linesize of the image
1268 * @param color color of the arrow
1270 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1273 sx= av_clip(sx, 0, w-1);
1274 sy= av_clip(sy, 0, h-1);
1275 ex= av_clip(ex, 0, w-1);
1276 ey= av_clip(ey, 0, h-1);
1278 buf[sy*stride + sx]+= color;
1280 if(FFABS(ex - sx) > FFABS(ey - sy)){
1282 FFSWAP(int, sx, ex);
1283 FFSWAP(int, sy, ey);
1285 buf+= sx + sy*stride;
1287 f= ((ey-sy)<<16)/ex;
1288 for(x= 0; x <= ex; x++){
1291 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1292 buf[(y+1)*stride + x]+= (color* fr )>>16;
1296 FFSWAP(int, sx, ex);
1297 FFSWAP(int, sy, ey);
1299 buf+= sx + sy*stride;
1301 if(ey) f= ((ex-sx)<<16)/ey;
1303 for(y= 0; y <= ey; y++){
1306 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1307 buf[y*stride + x+1]+= (color* fr )>>16;
1313 * draws an arrow from (ex, ey) -> (sx, sy).
1314 * @param w width of the image
1315 * @param h height of the image
1316 * @param stride stride/linesize of the image
1317 * @param color color of the arrow
1319 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1322 sx= av_clip(sx, -100, w+100);
1323 sy= av_clip(sy, -100, h+100);
1324 ex= av_clip(ex, -100, w+100);
1325 ey= av_clip(ey, -100, h+100);
1330 if(dx*dx + dy*dy > 3*3){
1333 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1335 //FIXME subpixel accuracy
1336 rx= ROUNDED_DIV(rx*3<<4, length);
1337 ry= ROUNDED_DIV(ry*3<<4, length);
1339 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1340 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1342 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1346 * prints debuging info for the given picture.
1348 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1350 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1352 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1355 av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1356 av_get_picture_type_char(pict->pict_type));
1357 for(y=0; y<s->mb_height; y++){
1358 for(x=0; x<s->mb_width; x++){
1359 if(s->avctx->debug&FF_DEBUG_SKIP){
1360 int count= s->mbskip_table[x + y*s->mb_stride];
1361 if(count>9) count=9;
1362 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1364 if(s->avctx->debug&FF_DEBUG_QP){
1365 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1367 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1368 int mb_type= pict->mb_type[x + y*s->mb_stride];
1369 //Type & MV direction
1371 av_log(s->avctx, AV_LOG_DEBUG, "P");
1372 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1373 av_log(s->avctx, AV_LOG_DEBUG, "A");
1374 else if(IS_INTRA4x4(mb_type))
1375 av_log(s->avctx, AV_LOG_DEBUG, "i");
1376 else if(IS_INTRA16x16(mb_type))
1377 av_log(s->avctx, AV_LOG_DEBUG, "I");
1378 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1379 av_log(s->avctx, AV_LOG_DEBUG, "d");
1380 else if(IS_DIRECT(mb_type))
1381 av_log(s->avctx, AV_LOG_DEBUG, "D");
1382 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1383 av_log(s->avctx, AV_LOG_DEBUG, "g");
1384 else if(IS_GMC(mb_type))
1385 av_log(s->avctx, AV_LOG_DEBUG, "G");
1386 else if(IS_SKIP(mb_type))
1387 av_log(s->avctx, AV_LOG_DEBUG, "S");
1388 else if(!USES_LIST(mb_type, 1))
1389 av_log(s->avctx, AV_LOG_DEBUG, ">");
1390 else if(!USES_LIST(mb_type, 0))
1391 av_log(s->avctx, AV_LOG_DEBUG, "<");
1393 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1394 av_log(s->avctx, AV_LOG_DEBUG, "X");
1399 av_log(s->avctx, AV_LOG_DEBUG, "+");
1400 else if(IS_16X8(mb_type))
1401 av_log(s->avctx, AV_LOG_DEBUG, "-");
1402 else if(IS_8X16(mb_type))
1403 av_log(s->avctx, AV_LOG_DEBUG, "|");
1404 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1405 av_log(s->avctx, AV_LOG_DEBUG, " ");
1407 av_log(s->avctx, AV_LOG_DEBUG, "?");
1410 if(IS_INTERLACED(mb_type))
1411 av_log(s->avctx, AV_LOG_DEBUG, "=");
1413 av_log(s->avctx, AV_LOG_DEBUG, " ");
1415 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1417 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1421 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1422 const int shift= 1 + s->quarter_sample;
1426 int h_chroma_shift, v_chroma_shift, block_height;
1427 const int width = s->avctx->width;
1428 const int height= s->avctx->height;
1429 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1430 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1431 s->low_delay=0; //needed to see the vectors without trashing the buffers
1433 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1435 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1436 pict->data[i]= s->visualization_buffer[i];
1438 pict->type= FF_BUFFER_TYPE_COPY;
1441 block_height = 16>>v_chroma_shift;
1443 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1445 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1446 const int mb_index= mb_x + mb_y*s->mb_stride;
1447 if((s->avctx->debug_mv) && pict->motion_val){
1449 for(type=0; type<3; type++){
1452 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
1456 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1460 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1465 if(!USES_LIST(pict->mb_type[mb_index], direction))
1468 if(IS_8X8(pict->mb_type[mb_index])){
1471 int sx= mb_x*16 + 4 + 8*(i&1);
1472 int sy= mb_y*16 + 4 + 8*(i>>1);
1473 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1474 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1475 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1476 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1478 }else if(IS_16X8(pict->mb_type[mb_index])){
1482 int sy=mb_y*16 + 4 + 8*i;
1483 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1484 int mx=(pict->motion_val[direction][xy][0]>>shift);
1485 int my=(pict->motion_val[direction][xy][1]>>shift);
1487 if(IS_INTERLACED(pict->mb_type[mb_index]))
1490 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1492 }else if(IS_8X16(pict->mb_type[mb_index])){
1495 int sx=mb_x*16 + 4 + 8*i;
1497 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1498 int mx=(pict->motion_val[direction][xy][0]>>shift);
1499 int my=(pict->motion_val[direction][xy][1]>>shift);
1501 if(IS_INTERLACED(pict->mb_type[mb_index]))
1504 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1507 int sx= mb_x*16 + 8;
1508 int sy= mb_y*16 + 8;
1509 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1510 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1511 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1512 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1516 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1517 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1519 for(y=0; y<block_height; y++){
1520 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1521 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1524 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1525 int mb_type= pict->mb_type[mb_index];
1528 #define COLOR(theta, r)\
1529 u= (int)(128 + r*cos(theta*3.141592/180));\
1530 v= (int)(128 + r*sin(theta*3.141592/180));
1534 if(IS_PCM(mb_type)){
1536 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1538 }else if(IS_INTRA4x4(mb_type)){
1540 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1542 }else if(IS_DIRECT(mb_type)){
1544 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1546 }else if(IS_GMC(mb_type)){
1548 }else if(IS_SKIP(mb_type)){
1550 }else if(!USES_LIST(mb_type, 1)){
1552 }else if(!USES_LIST(mb_type, 0)){
1555 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1559 u*= 0x0101010101010101ULL;
1560 v*= 0x0101010101010101ULL;
1561 for(y=0; y<block_height; y++){
1562 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1563 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1567 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1568 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1569 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1571 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1573 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1575 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1576 int dm= 1 << (mv_sample_log2-2);
1578 int sx= mb_x*16 + 8*(i&1);
1579 int sy= mb_y*16 + 8*(i>>1);
1580 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1582 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1583 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1585 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1586 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1587 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1591 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1595 s->mbskip_table[mb_index]=0;
1601 static inline int hpel_motion_lowres(MpegEncContext *s,
1602 uint8_t *dest, uint8_t *src,
1603 int field_based, int field_select,
1604 int src_x, int src_y,
1605 int width, int height, int stride,
1606 int h_edge_pos, int v_edge_pos,
1607 int w, int h, h264_chroma_mc_func *pix_op,
1608 int motion_x, int motion_y)
1610 const int lowres= s->avctx->lowres;
1611 const int op_index= FFMIN(lowres, 2);
1612 const int s_mask= (2<<lowres)-1;
1616 if(s->quarter_sample){
1621 sx= motion_x & s_mask;
1622 sy= motion_y & s_mask;
1623 src_x += motion_x >> (lowres+1);
1624 src_y += motion_y >> (lowres+1);
1626 src += src_y * stride + src_x;
1628 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1629 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1630 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1631 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1632 src= s->edge_emu_buffer;
1636 sx= (sx << 2) >> lowres;
1637 sy= (sy << 2) >> lowres;
1640 pix_op[op_index](dest, src, stride, h, sx, sy);
1644 /* apply one mpeg motion vector to the three components */
1645 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1646 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1647 int field_based, int bottom_field, int field_select,
1648 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1649 int motion_x, int motion_y, int h, int mb_y)
1651 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1652 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1653 const int lowres= s->avctx->lowres;
1654 const int op_index= FFMIN(lowres-1+s->chroma_x_shift, 2);
1655 const int block_s= 8>>lowres;
1656 const int s_mask= (2<<lowres)-1;
1657 const int h_edge_pos = s->h_edge_pos >> lowres;
1658 const int v_edge_pos = s->v_edge_pos >> lowres;
1659 linesize = s->current_picture.f.linesize[0] << field_based;
1660 uvlinesize = s->current_picture.f.linesize[1] << field_based;
1662 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1668 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1671 sx= motion_x & s_mask;
1672 sy= motion_y & s_mask;
1673 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1674 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1676 if (s->out_format == FMT_H263) {
1677 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1678 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1681 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1684 uvsx = (2*mx) & s_mask;
1685 uvsy = (2*my) & s_mask;
1686 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1687 uvsrc_y = mb_y*block_s + (my >> lowres);
1689 if(s->chroma_y_shift){
1694 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1695 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1697 if(s->chroma_x_shift){
1701 uvsy = motion_y & s_mask;
1703 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1706 uvsx = motion_x & s_mask;
1707 uvsy = motion_y & s_mask;
1714 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1715 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1716 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1718 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1719 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1720 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1721 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1722 ptr_y = s->edge_emu_buffer;
1723 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1724 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1725 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1726 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1727 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1728 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1734 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
1735 dest_y += s->linesize;
1736 dest_cb+= s->uvlinesize;
1737 dest_cr+= s->uvlinesize;
1741 ptr_y += s->linesize;
1742 ptr_cb+= s->uvlinesize;
1743 ptr_cr+= s->uvlinesize;
1746 sx= (sx << 2) >> lowres;
1747 sy= (sy << 2) >> lowres;
1748 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1750 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1751 uvsx= (uvsx << 2) >> lowres;
1752 uvsy= (uvsy << 2) >> lowres;
1753 if(h >> s->chroma_y_shift){
1754 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1755 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1758 //FIXME h261 lowres loop filter
1761 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1762 uint8_t *dest_cb, uint8_t *dest_cr,
1763 uint8_t **ref_picture,
1764 h264_chroma_mc_func *pix_op,
1766 const int lowres= s->avctx->lowres;
1767 const int op_index= FFMIN(lowres, 2);
1768 const int block_s= 8>>lowres;
1769 const int s_mask= (2<<lowres)-1;
1770 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1771 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1772 int emu=0, src_x, src_y, offset, sx, sy;
1775 if(s->quarter_sample){
1780 /* In case of 8X8, we construct a single chroma motion vector
1781 with a special rounding */
1782 mx= ff_h263_round_chroma(mx);
1783 my= ff_h263_round_chroma(my);
1787 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1788 src_y = s->mb_y*block_s + (my >> (lowres+1));
1790 offset = src_y * s->uvlinesize + src_x;
1791 ptr = ref_picture[1] + offset;
1792 if(s->flags&CODEC_FLAG_EMU_EDGE){
1793 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1794 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1795 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1796 ptr= s->edge_emu_buffer;
1800 sx= (sx << 2) >> lowres;
1801 sy= (sy << 2) >> lowres;
1802 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1804 ptr = ref_picture[2] + offset;
1806 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1807 ptr= s->edge_emu_buffer;
1809 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1813 * motion compensation of a single macroblock
1815 * @param dest_y luma destination pointer
1816 * @param dest_cb chroma cb/u destination pointer
1817 * @param dest_cr chroma cr/v destination pointer
1818 * @param dir direction (0->forward, 1->backward)
1819 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1820 * @param pix_op halfpel motion compensation function (average or put normally)
1821 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1823 static inline void MPV_motion_lowres(MpegEncContext *s,
1824 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1825 int dir, uint8_t **ref_picture,
1826 h264_chroma_mc_func *pix_op)
1830 const int lowres= s->avctx->lowres;
1831 const int block_s= 8>>lowres;
1836 switch(s->mv_type) {
1838 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1840 ref_picture, pix_op,
1841 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1847 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1848 ref_picture[0], 0, 0,
1849 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1850 s->width, s->height, s->linesize,
1851 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1852 block_s, block_s, pix_op,
1853 s->mv[dir][i][0], s->mv[dir][i][1]);
1855 mx += s->mv[dir][i][0];
1856 my += s->mv[dir][i][1];
1859 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1860 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1863 if (s->picture_structure == PICT_FRAME) {
1865 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1866 1, 0, s->field_select[dir][0],
1867 ref_picture, pix_op,
1868 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1870 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1871 1, 1, s->field_select[dir][1],
1872 ref_picture, pix_op,
1873 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1875 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
1876 ref_picture = s->current_picture_ptr->f.data;
1879 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1880 0, 0, s->field_select[dir][0],
1881 ref_picture, pix_op,
1882 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1887 uint8_t ** ref2picture;
1889 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
1890 ref2picture= ref_picture;
1892 ref2picture = s->current_picture_ptr->f.data;
1895 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1896 0, 0, s->field_select[dir][i],
1897 ref2picture, pix_op,
1898 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1900 dest_y += 2*block_s*s->linesize;
1901 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1902 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1906 if(s->picture_structure == PICT_FRAME){
1910 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1912 ref_picture, pix_op,
1913 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1915 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1919 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1920 0, 0, s->picture_structure != i+1,
1921 ref_picture, pix_op,
1922 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1924 // after put we make avg of the same block
1925 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1927 //opposite parity is always in the same frame if this is second field
1928 if(!s->first_field){
1929 ref_picture = s->current_picture_ptr->f.data;
1939 * find the lowest MB row referenced in the MVs
1941 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1943 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1944 int my, off, i, mvs;
1946 if (s->picture_structure != PICT_FRAME) goto unhandled;
1948 switch (s->mv_type) {
1962 for (i = 0; i < mvs; i++) {
1963 my = s->mv[dir][i][1]<<qpel_shift;
1964 my_max = FFMAX(my_max, my);
1965 my_min = FFMIN(my_min, my);
1968 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1970 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1972 return s->mb_height-1;
1975 /* put block[] to dest[] */
1976 static inline void put_dct(MpegEncContext *s,
1977 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1979 s->dct_unquantize_intra(s, block, i, qscale);
1980 s->dsp.idct_put (dest, line_size, block);
1983 /* add block[] to dest[] */
1984 static inline void add_dct(MpegEncContext *s,
1985 DCTELEM *block, int i, uint8_t *dest, int line_size)
1987 if (s->block_last_index[i] >= 0) {
1988 s->dsp.idct_add (dest, line_size, block);
1992 static inline void add_dequant_dct(MpegEncContext *s,
1993 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1995 if (s->block_last_index[i] >= 0) {
1996 s->dct_unquantize_inter(s, block, i, qscale);
1998 s->dsp.idct_add (dest, line_size, block);
2003 * cleans dc, ac, coded_block for the current non intra MB
2005 void ff_clean_intra_table_entries(MpegEncContext *s)
2007 int wrap = s->b8_stride;
2008 int xy = s->block_index[0];
2011 s->dc_val[0][xy + 1 ] =
2012 s->dc_val[0][xy + wrap] =
2013 s->dc_val[0][xy + 1 + wrap] = 1024;
2015 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2016 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2017 if (s->msmpeg4_version>=3) {
2018 s->coded_block[xy ] =
2019 s->coded_block[xy + 1 ] =
2020 s->coded_block[xy + wrap] =
2021 s->coded_block[xy + 1 + wrap] = 0;
2024 wrap = s->mb_stride;
2025 xy = s->mb_x + s->mb_y * wrap;
2027 s->dc_val[2][xy] = 1024;
2029 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2030 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2032 s->mbintra_table[xy]= 0;
2035 /* generic function called after a macroblock has been parsed by the
2036 decoder or after it has been encoded by the encoder.
2038 Important variables used:
2039 s->mb_intra : true if intra macroblock
2040 s->mv_dir : motion vector direction
2041 s->mv_type : motion vector type
2042 s->mv : motion vector
2043 s->interlaced_dct : true if interlaced dct used (mpeg2)
2045 static av_always_inline
2046 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2047 int lowres_flag, int is_mpeg12)
2049 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2050 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2051 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2055 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2056 /* save DCT coefficients */
2058 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2059 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2061 for(j=0; j<64; j++){
2062 *dct++ = block[i][s->dsp.idct_permutation[j]];
2063 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2065 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2069 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2071 /* update DC predictors for P macroblocks */
2073 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2074 if(s->mbintra_table[mb_xy])
2075 ff_clean_intra_table_entries(s);
2079 s->last_dc[2] = 128 << s->intra_dc_precision;
2082 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2083 s->mbintra_table[mb_xy]=1;
2085 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2086 uint8_t *dest_y, *dest_cb, *dest_cr;
2087 int dct_linesize, dct_offset;
2088 op_pixels_func (*op_pix)[4];
2089 qpel_mc_func (*op_qpix)[16];
2090 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2091 const int uvlinesize = s->current_picture.f.linesize[1];
2092 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2093 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2095 /* avoid copy if macroblock skipped in last frame too */
2096 /* skip only during decoding as we might trash the buffers during encoding a bit */
2098 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2099 const int age = s->current_picture.f.age;
2103 if (s->mb_skipped) {
2105 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2107 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
2108 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2110 /* if previous was skipped too, then nothing to do ! */
2111 if (*mbskip_ptr >= age && s->current_picture.f.reference){
2114 } else if(!s->current_picture.f.reference) {
2115 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2116 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2118 *mbskip_ptr = 0; /* not skipped */
2122 dct_linesize = linesize << s->interlaced_dct;
2123 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
2127 dest_cb= s->dest[1];
2128 dest_cr= s->dest[2];
2130 dest_y = s->b_scratchpad;
2131 dest_cb= s->b_scratchpad+16*linesize;
2132 dest_cr= s->b_scratchpad+32*linesize;
2136 /* motion handling */
2137 /* decoding or more than one mb_type (MC was already done otherwise) */
2140 if(HAVE_PTHREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2141 if (s->mv_dir & MV_DIR_FORWARD) {
2142 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
2144 if (s->mv_dir & MV_DIR_BACKWARD) {
2145 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
2150 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2152 if (s->mv_dir & MV_DIR_FORWARD) {
2153 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2154 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2156 if (s->mv_dir & MV_DIR_BACKWARD) {
2157 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2160 op_qpix= s->me.qpel_put;
2161 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2162 op_pix = s->dsp.put_pixels_tab;
2164 op_pix = s->dsp.put_no_rnd_pixels_tab;
2166 if (s->mv_dir & MV_DIR_FORWARD) {
2167 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2168 op_pix = s->dsp.avg_pixels_tab;
2169 op_qpix= s->me.qpel_avg;
2171 if (s->mv_dir & MV_DIR_BACKWARD) {
2172 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2177 /* skip dequant / idct if we are really late ;) */
2178 if(s->avctx->skip_idct){
2179 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2180 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2181 || s->avctx->skip_idct >= AVDISCARD_ALL)
2185 /* add dct residue */
2186 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2187 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2188 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2189 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2190 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2191 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2193 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2194 if (s->chroma_y_shift){
2195 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2196 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2200 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2201 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2202 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2203 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2206 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2207 add_dct(s, block[0], 0, dest_y , dct_linesize);
2208 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2209 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2210 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2212 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2213 if(s->chroma_y_shift){//Chroma420
2214 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2215 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2218 dct_linesize = uvlinesize << s->interlaced_dct;
2219 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
2221 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2222 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2223 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2224 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2225 if(!s->chroma_x_shift){//Chroma444
2226 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2227 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2228 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2229 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2234 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2235 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2238 /* dct only in intra block */
2239 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2240 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2241 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2242 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2243 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2245 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2246 if(s->chroma_y_shift){
2247 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2248 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2252 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2253 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2254 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2255 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2259 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2260 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2261 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2262 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2264 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2265 if(s->chroma_y_shift){
2266 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2267 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2270 dct_linesize = uvlinesize << s->interlaced_dct;
2271 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
2273 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2274 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2275 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2276 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2277 if(!s->chroma_x_shift){//Chroma444
2278 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2279 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2280 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2281 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2289 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2290 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2291 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2296 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2298 if(s->out_format == FMT_MPEG1) {
2299 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2300 else MPV_decode_mb_internal(s, block, 0, 1);
2303 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2304 else MPV_decode_mb_internal(s, block, 0, 0);
2309 * @param h is the normal height, this will be reduced automatically if needed for the last row
2311 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2312 const int field_pic= s->picture_structure != PICT_FRAME;
2318 if (!s->avctx->hwaccel
2319 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2320 && s->unrestricted_mv
2321 && s->current_picture.f.reference
2323 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2324 int sides = 0, edge_h;
2325 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2326 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2327 if (y==0) sides |= EDGE_TOP;
2328 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2330 edge_h= FFMIN(h, s->v_edge_pos - y);
2332 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize , s->linesize,
2333 s->h_edge_pos , edge_h , EDGE_WIDTH , EDGE_WIDTH , sides);
2334 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
2335 s->h_edge_pos>>hshift, edge_h>>vshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2336 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
2337 s->h_edge_pos>>hshift, edge_h>>vshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2340 h= FFMIN(h, s->avctx->height - y);
2342 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2344 if (s->avctx->draw_horiz_band) {
2348 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2349 src= (AVFrame*)s->current_picture_ptr;
2350 else if(s->last_picture_ptr)
2351 src= (AVFrame*)s->last_picture_ptr;
2355 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2361 offset[0]= y * s->linesize;
2363 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2369 s->avctx->draw_horiz_band(s->avctx, src, offset,
2370 y, s->picture_structure, h);
2374 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2375 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2376 const int uvlinesize = s->current_picture.f.linesize[1];
2377 const int mb_size= 4 - s->avctx->lowres;
2379 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2380 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2381 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2382 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2383 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2384 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2385 //block_index is not used by mpeg2, so it is not affected by chroma_format
2387 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2388 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2389 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2391 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2393 if(s->picture_structure==PICT_FRAME){
2394 s->dest[0] += s->mb_y * linesize << mb_size;
2395 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2396 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2398 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2399 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2400 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2401 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2406 void ff_mpeg_flush(AVCodecContext *avctx){
2408 MpegEncContext *s = avctx->priv_data;
2410 if(s==NULL || s->picture==NULL)
2413 for(i=0; i<s->picture_count; i++){
2414 if (s->picture[i].f.data[0] &&
2415 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2416 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2417 free_frame_buffer(s, &s->picture[i]);
2419 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2421 s->mb_x= s->mb_y= 0;
2424 s->parse_context.state= -1;
2425 s->parse_context.frame_start_found= 0;
2426 s->parse_context.overread= 0;
2427 s->parse_context.overread_index= 0;
2428 s->parse_context.index= 0;
2429 s->parse_context.last_index= 0;
2430 s->bitstream_buffer_size=0;
2434 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2435 DCTELEM *block, int n, int qscale)
2437 int i, level, nCoeffs;
2438 const uint16_t *quant_matrix;
2440 nCoeffs= s->block_last_index[n];
2443 block[0] = block[0] * s->y_dc_scale;
2445 block[0] = block[0] * s->c_dc_scale;
2446 /* XXX: only mpeg1 */
2447 quant_matrix = s->intra_matrix;
2448 for(i=1;i<=nCoeffs;i++) {
2449 int j= s->intra_scantable.permutated[i];
2454 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2455 level = (level - 1) | 1;
2458 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2459 level = (level - 1) | 1;
2466 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2467 DCTELEM *block, int n, int qscale)
2469 int i, level, nCoeffs;
2470 const uint16_t *quant_matrix;
2472 nCoeffs= s->block_last_index[n];
2474 quant_matrix = s->inter_matrix;
2475 for(i=0; i<=nCoeffs; i++) {
2476 int j= s->intra_scantable.permutated[i];
2481 level = (((level << 1) + 1) * qscale *
2482 ((int) (quant_matrix[j]))) >> 4;
2483 level = (level - 1) | 1;
2486 level = (((level << 1) + 1) * qscale *
2487 ((int) (quant_matrix[j]))) >> 4;
2488 level = (level - 1) | 1;
2495 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2496 DCTELEM *block, int n, int qscale)
2498 int i, level, nCoeffs;
2499 const uint16_t *quant_matrix;
2501 if(s->alternate_scan) nCoeffs= 63;
2502 else nCoeffs= s->block_last_index[n];
2505 block[0] = block[0] * s->y_dc_scale;
2507 block[0] = block[0] * s->c_dc_scale;
2508 quant_matrix = s->intra_matrix;
2509 for(i=1;i<=nCoeffs;i++) {
2510 int j= s->intra_scantable.permutated[i];
2515 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2518 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2525 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2526 DCTELEM *block, int n, int qscale)
2528 int i, level, nCoeffs;
2529 const uint16_t *quant_matrix;
2532 if(s->alternate_scan) nCoeffs= 63;
2533 else nCoeffs= s->block_last_index[n];
2536 block[0] = block[0] * s->y_dc_scale;
2538 block[0] = block[0] * s->c_dc_scale;
2539 quant_matrix = s->intra_matrix;
2540 for(i=1;i<=nCoeffs;i++) {
2541 int j= s->intra_scantable.permutated[i];
2546 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2549 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2558 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2559 DCTELEM *block, int n, int qscale)
2561 int i, level, nCoeffs;
2562 const uint16_t *quant_matrix;
2565 if(s->alternate_scan) nCoeffs= 63;
2566 else nCoeffs= s->block_last_index[n];
2568 quant_matrix = s->inter_matrix;
2569 for(i=0; i<=nCoeffs; i++) {
2570 int j= s->intra_scantable.permutated[i];
2575 level = (((level << 1) + 1) * qscale *
2576 ((int) (quant_matrix[j]))) >> 4;
2579 level = (((level << 1) + 1) * qscale *
2580 ((int) (quant_matrix[j]))) >> 4;
2589 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2590 DCTELEM *block, int n, int qscale)
2592 int i, level, qmul, qadd;
2595 assert(s->block_last_index[n]>=0);
2601 block[0] = block[0] * s->y_dc_scale;
2603 block[0] = block[0] * s->c_dc_scale;
2604 qadd = (qscale - 1) | 1;
2611 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2613 for(i=1; i<=nCoeffs; i++) {
2617 level = level * qmul - qadd;
2619 level = level * qmul + qadd;
2626 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2627 DCTELEM *block, int n, int qscale)
2629 int i, level, qmul, qadd;
2632 assert(s->block_last_index[n]>=0);
2634 qadd = (qscale - 1) | 1;
2637 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2639 for(i=0; i<=nCoeffs; i++) {
2643 level = level * qmul - qadd;
2645 level = level * qmul + qadd;
2653 * set qscale and update qscale dependent variables.
2655 void ff_set_qscale(MpegEncContext * s, int qscale)
2659 else if (qscale > 31)
2663 s->chroma_qscale= s->chroma_qscale_table[qscale];
2665 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2666 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2669 void MPV_report_decode_progress(MpegEncContext *s)
2671 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2672 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);