2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32]={
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
74 const uint8_t ff_mpeg1_dc_scale_table[128]={
75 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 static const uint8_t mpeg2_dc_scale_table1[128]={
83 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128]={
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 static const uint8_t mpeg2_dc_scale_table3[128]={
99 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
107 ff_mpeg1_dc_scale_table,
108 mpeg2_dc_scale_table1,
109 mpeg2_dc_scale_table2,
110 mpeg2_dc_scale_table3,
113 const enum PixelFormat ff_pixfmt_list_420[] = {
118 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
126 const uint8_t *avpriv_mpv_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
134 uint32_t tmp= *state << 8;
135 *state= tmp + *(p++);
136 if(tmp == 0x100 || p==end)
141 if (p[-1] > 1 ) p+= 3;
142 else if(p[-2] ) p+= 2;
143 else if(p[-3]|(p[-1]-1)) p++;
156 /* init common dct for both encoder and decoder */
157 av_cold int ff_dct_common_init(MpegEncContext *s)
159 dsputil_init(&s->dsp, s->avctx);
161 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
162 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
163 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
164 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
165 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
166 if(s->flags & CODEC_FLAG_BITEXACT)
167 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
168 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
171 MPV_common_init_mmx(s);
173 MPV_common_init_axp(s);
175 MPV_common_init_mlib(s);
177 MPV_common_init_mmi(s);
179 MPV_common_init_arm(s);
181 MPV_common_init_altivec(s);
183 MPV_common_init_bfin(s);
186 /* load & permutate scantables
187 note: only wmv uses different ones
189 if(s->alternate_scan){
190 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
193 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
194 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
196 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
197 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
202 void ff_copy_picture(Picture *dst, Picture *src){
204 dst->f.type= FF_BUFFER_TYPE_COPY;
208 * Release a frame buffer
210 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
212 /* Windows Media Image codecs allocate internal buffers with different
213 dimensions; ignore user defined callbacks for these */
214 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
215 ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
217 avcodec_default_release_buffer(s->avctx, (AVFrame*)pic);
218 av_freep(&pic->f.hwaccel_picture_private);
222 * Allocate a frame buffer
224 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
228 if (s->avctx->hwaccel) {
229 assert(!pic->f.hwaccel_picture_private);
230 if (s->avctx->hwaccel->priv_data_size) {
231 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
232 if (!pic->f.hwaccel_picture_private) {
233 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
239 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
240 r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
242 r = avcodec_default_get_buffer(s->avctx, (AVFrame*)pic);
244 if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
245 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
246 r, pic->f.age, pic->f.type, pic->f.data[0]);
247 av_freep(&pic->f.hwaccel_picture_private);
251 if (s->linesize && (s->linesize != pic->f.linesize[0] || s->uvlinesize != pic->f.linesize[1])) {
252 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
253 free_frame_buffer(s, pic);
257 if (pic->f.linesize[1] != pic->f.linesize[2]) {
258 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
259 free_frame_buffer(s, pic);
267 * allocates a Picture
268 * The pixels are allocated/set by calling get_buffer() if shared=0
270 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
271 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
272 const int mb_array_size= s->mb_stride*s->mb_height;
273 const int b8_array_size= s->b8_stride*s->mb_height*2;
274 const int b4_array_size= s->b4_stride*s->mb_height*4;
279 assert(pic->f.data[0]);
280 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
281 pic->f.type = FF_BUFFER_TYPE_SHARED;
283 assert(!pic->f.data[0]);
285 if (alloc_frame_buffer(s, pic) < 0)
288 s->linesize = pic->f.linesize[0];
289 s->uvlinesize = pic->f.linesize[1];
292 if (pic->f.qscale_table == NULL) {
294 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
295 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
296 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
299 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table, mb_array_size * sizeof(uint8_t) + 2, fail) //the +2 is for the slice end check
300 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t) , fail)
301 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
302 pic->f.mb_type = pic->mb_type_base + 2*s->mb_stride + 1;
303 pic->f.qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1;
304 if(s->out_format == FMT_H264){
306 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
307 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
308 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
310 pic->f.motion_subsample_log2 = 2;
311 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
313 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
314 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
315 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
317 pic->f.motion_subsample_log2 = 3;
319 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
320 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff, 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
322 pic->f.qstride = s->mb_stride;
323 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan , 1 * sizeof(AVPanScan), fail)
326 /* It might be nicer if the application would keep track of these
327 * but it would require an API change. */
328 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
329 s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
330 if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B)
331 pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
335 fail: //for the FF_ALLOCZ_OR_GOTO macro
337 free_frame_buffer(s, pic);
342 * deallocates a picture
344 static void free_picture(MpegEncContext *s, Picture *pic){
347 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
348 free_frame_buffer(s, pic);
351 av_freep(&pic->mb_var);
352 av_freep(&pic->mc_mb_var);
353 av_freep(&pic->mb_mean);
354 av_freep(&pic->f.mbskip_table);
355 av_freep(&pic->qscale_table_base);
356 av_freep(&pic->mb_type_base);
357 av_freep(&pic->f.dct_coeff);
358 av_freep(&pic->f.pan_scan);
359 pic->f.mb_type = NULL;
361 av_freep(&pic->motion_val_base[i]);
362 av_freep(&pic->f.ref_index[i]);
365 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
368 pic->f.data[i] = NULL;
374 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
375 int y_size = s->b8_stride * (2 * s->mb_height + 1);
376 int c_size = s->mb_stride * (s->mb_height + 1);
377 int yc_size = y_size + 2 * c_size;
380 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
381 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
383 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
384 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
385 s->me.temp= s->me.scratchpad;
386 s->rd_scratchpad= s->me.scratchpad;
387 s->b_scratchpad= s->me.scratchpad;
388 s->obmc_scratchpad= s->me.scratchpad + 16;
390 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
391 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
392 if(s->avctx->noise_reduction){
393 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
396 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
397 s->block= s->blocks[0];
400 s->pblocks[i] = &s->block[i];
403 if (s->out_format == FMT_H263) {
405 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
406 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
407 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
408 s->ac_val[2] = s->ac_val[1] + c_size;
413 return -1; //free() through MPV_common_end()
416 static void free_duplicate_context(MpegEncContext *s){
419 av_freep(&s->edge_emu_buffer);
420 av_freep(&s->me.scratchpad);
424 s->obmc_scratchpad= NULL;
426 av_freep(&s->dct_error_sum);
427 av_freep(&s->me.map);
428 av_freep(&s->me.score_map);
429 av_freep(&s->blocks);
430 av_freep(&s->ac_val_base);
434 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
435 #define COPY(a) bak->a= src->a
436 COPY(edge_emu_buffer);
441 COPY(obmc_scratchpad);
448 COPY(me.map_generation);
460 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
463 //FIXME copy only needed parts
465 backup_duplicate_context(&bak, dst);
466 memcpy(dst, src, sizeof(MpegEncContext));
467 backup_duplicate_context(dst, &bak);
469 dst->pblocks[i] = &dst->block[i];
471 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
474 int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
476 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
478 if(dst == src || !s1->context_initialized) return 0;
480 //FIXME can parameters change on I-frames? in that case dst may need a reinit
481 if(!s->context_initialized){
482 memcpy(s, s1, sizeof(MpegEncContext));
485 s->picture_range_start += MAX_PICTURE_COUNT;
486 s->picture_range_end += MAX_PICTURE_COUNT;
487 s->bitstream_buffer = NULL;
488 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
493 s->avctx->coded_height = s1->avctx->coded_height;
494 s->avctx->coded_width = s1->avctx->coded_width;
495 s->avctx->width = s1->avctx->width;
496 s->avctx->height = s1->avctx->height;
498 s->coded_picture_number = s1->coded_picture_number;
499 s->picture_number = s1->picture_number;
500 s->input_picture_number = s1->input_picture_number;
502 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
503 memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture);
505 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
506 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
507 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
509 memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
511 //Error/bug resilience
512 s->next_p_frame_damaged = s1->next_p_frame_damaged;
513 s->workaround_bugs = s1->workaround_bugs;
514 s->padding_bug_score = s1->padding_bug_score;
517 memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits);
520 s->max_b_frames = s1->max_b_frames;
521 s->low_delay = s1->low_delay;
522 s->dropable = s1->dropable;
524 //DivX handling (doesn't work)
525 s->divx_packed = s1->divx_packed;
527 if(s1->bitstream_buffer){
528 if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
529 av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size);
530 s->bitstream_buffer_size = s1->bitstream_buffer_size;
531 memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size);
532 memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
535 //MPEG2/interlacing info
536 memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence);
538 if(!s1->first_field){
539 s->last_pict_type= s1->pict_type;
540 if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
542 if (s1->pict_type != AV_PICTURE_TYPE_B) {
543 s->last_non_b_pict_type= s1->pict_type;
551 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
552 * the changed fields will not depend upon the prior state of the MpegEncContext.
554 void MPV_common_defaults(MpegEncContext *s){
556 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
557 s->chroma_qscale_table= ff_default_chroma_qscale_table;
558 s->progressive_frame= 1;
559 s->progressive_sequence= 1;
560 s->picture_structure= PICT_FRAME;
562 s->coded_picture_number = 0;
563 s->picture_number = 0;
564 s->input_picture_number = 0;
566 s->picture_in_gop_number = 0;
571 s->picture_range_start = 0;
572 s->picture_range_end = MAX_PICTURE_COUNT;
576 * sets the given MpegEncContext to defaults for decoding.
577 * the changed fields will not depend upon the prior state of the MpegEncContext.
579 void MPV_decode_defaults(MpegEncContext *s){
580 MPV_common_defaults(s);
584 * init common structure for both encoder and decoder.
585 * this assumes that some variables like width/height are already set
587 av_cold int MPV_common_init(MpegEncContext *s)
589 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y,
590 threads = (s->encoding ||
592 s->avctx->active_thread_type & FF_THREAD_SLICE)) ?
593 s->avctx->thread_count : 1;
595 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
596 s->mb_height = (s->height + 31) / 32 * 2;
597 else if (s->codec_id != CODEC_ID_H264)
598 s->mb_height = (s->height + 15) / 16;
600 if(s->avctx->pix_fmt == PIX_FMT_NONE){
601 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
605 if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
606 (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
607 int max_threads = FFMIN(MAX_THREADS, s->mb_height);
608 av_log(s->avctx, AV_LOG_WARNING, "too many threads (%d), reducing to %d\n",
609 s->avctx->thread_count, max_threads);
610 threads = max_threads;
613 if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
616 ff_dct_common_init(s);
618 s->flags= s->avctx->flags;
619 s->flags2= s->avctx->flags2;
621 s->mb_width = (s->width + 15) / 16;
622 s->mb_stride = s->mb_width + 1;
623 s->b8_stride = s->mb_width*2 + 1;
624 s->b4_stride = s->mb_width*4 + 1;
625 mb_array_size= s->mb_height * s->mb_stride;
626 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
628 /* set chroma shifts */
629 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
630 &(s->chroma_y_shift) );
632 /* set default edge pos, will be overriden in decode_header if needed */
633 s->h_edge_pos= s->mb_width*16;
634 s->v_edge_pos= s->mb_height*16;
636 s->mb_num = s->mb_width * s->mb_height;
641 s->block_wrap[3]= s->b8_stride;
643 s->block_wrap[5]= s->mb_stride;
645 y_size = s->b8_stride * (2 * s->mb_height + 1);
646 c_size = s->mb_stride * (s->mb_height + 1);
647 yc_size = y_size + 2 * c_size;
649 /* convert fourcc to upper case */
650 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
651 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
653 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
655 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
656 for(y=0; y<s->mb_height; y++){
657 for(x=0; x<s->mb_width; x++){
658 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
661 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
664 /* Allocate MV tables */
665 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
666 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
667 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
668 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
669 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
670 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
671 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
672 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
673 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
674 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
675 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
676 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
678 if(s->msmpeg4_version){
679 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
681 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
683 /* Allocate MB type table */
684 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
686 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
688 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
689 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix , 64*32 * sizeof(int), fail)
690 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
691 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
692 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
693 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
694 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
695 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
697 if(s->avctx->noise_reduction){
698 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
702 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
703 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
704 for(i = 0; i < s->picture_count; i++) {
705 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
708 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
710 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
711 /* interlaced direct mode decoding tables */
716 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
717 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
719 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
720 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
721 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
723 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
726 if (s->out_format == FMT_H263) {
728 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
729 s->coded_block= s->coded_block_base + s->b8_stride + 1;
731 /* cbp, ac_pred, pred_dir */
732 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
733 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
736 if (s->h263_pred || s->h263_plus || !s->encoding) {
738 //MN: we need these for error resilience of intra-frames
739 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
740 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
741 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
742 s->dc_val[2] = s->dc_val[1] + c_size;
743 for(i=0;i<yc_size;i++)
744 s->dc_val_base[i] = 1024;
747 /* which mb is a intra block */
748 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
749 memset(s->mbintra_table, 1, mb_array_size);
751 /* init macroblock skip table */
752 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
753 //Note the +1 is for a quicker mpeg4 slice_end detection
754 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
756 s->parse_context.state= -1;
757 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
758 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
759 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
760 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
763 s->context_initialized = 1;
764 s->thread_context[0]= s;
766 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
767 for(i=1; i<threads; i++){
768 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
769 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
772 for(i=0; i<threads; i++){
773 if(init_duplicate_context(s->thread_context[i], s) < 0)
775 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
776 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
779 if(init_duplicate_context(s, s) < 0) goto fail;
781 s->end_mb_y = s->mb_height;
791 /* init common structure for both encoder and decoder */
792 void MPV_common_end(MpegEncContext *s)
796 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
797 for(i=0; i<s->avctx->thread_count; i++){
798 free_duplicate_context(s->thread_context[i]);
800 for(i=1; i<s->avctx->thread_count; i++){
801 av_freep(&s->thread_context[i]);
803 } else free_duplicate_context(s);
805 av_freep(&s->parse_context.buffer);
806 s->parse_context.buffer_size=0;
808 av_freep(&s->mb_type);
809 av_freep(&s->p_mv_table_base);
810 av_freep(&s->b_forw_mv_table_base);
811 av_freep(&s->b_back_mv_table_base);
812 av_freep(&s->b_bidir_forw_mv_table_base);
813 av_freep(&s->b_bidir_back_mv_table_base);
814 av_freep(&s->b_direct_mv_table_base);
816 s->b_forw_mv_table= NULL;
817 s->b_back_mv_table= NULL;
818 s->b_bidir_forw_mv_table= NULL;
819 s->b_bidir_back_mv_table= NULL;
820 s->b_direct_mv_table= NULL;
824 av_freep(&s->b_field_mv_table_base[i][j][k]);
825 s->b_field_mv_table[i][j][k]=NULL;
827 av_freep(&s->b_field_select_table[i][j]);
828 av_freep(&s->p_field_mv_table_base[i][j]);
829 s->p_field_mv_table[i][j]=NULL;
831 av_freep(&s->p_field_select_table[i]);
834 av_freep(&s->dc_val_base);
835 av_freep(&s->coded_block_base);
836 av_freep(&s->mbintra_table);
837 av_freep(&s->cbp_table);
838 av_freep(&s->pred_dir_table);
840 av_freep(&s->mbskip_table);
841 av_freep(&s->prev_pict_types);
842 av_freep(&s->bitstream_buffer);
843 s->allocated_bitstream_buffer_size=0;
845 av_freep(&s->avctx->stats_out);
846 av_freep(&s->ac_stats);
847 av_freep(&s->error_status_table);
848 av_freep(&s->mb_index2xy);
849 av_freep(&s->lambda_table);
850 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
851 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
852 s->q_chroma_intra_matrix= NULL;
853 s->q_chroma_intra_matrix16= NULL;
854 av_freep(&s->q_intra_matrix);
855 av_freep(&s->q_inter_matrix);
856 av_freep(&s->q_intra_matrix16);
857 av_freep(&s->q_inter_matrix16);
858 av_freep(&s->input_picture);
859 av_freep(&s->reordered_input_picture);
860 av_freep(&s->dct_offset);
862 if(s->picture && !s->avctx->internal->is_copy){
863 for(i=0; i<s->picture_count; i++){
864 free_picture(s, &s->picture[i]);
867 av_freep(&s->picture);
868 s->context_initialized = 0;
871 s->current_picture_ptr= NULL;
872 s->linesize= s->uvlinesize= 0;
875 av_freep(&s->visualization_buffer[i]);
877 if(!(s->avctx->active_thread_type&FF_THREAD_FRAME))
878 avcodec_default_free_buffers(s->avctx);
881 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
883 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
884 uint8_t index_run[MAX_RUN+1];
885 int last, run, level, start, end, i;
887 /* If table is static, we can quit if rl->max_level[0] is not NULL */
888 if(static_store && rl->max_level[0])
891 /* compute max_level[], max_run[] and index_run[] */
892 for(last=0;last<2;last++) {
901 memset(max_level, 0, MAX_RUN + 1);
902 memset(max_run, 0, MAX_LEVEL + 1);
903 memset(index_run, rl->n, MAX_RUN + 1);
904 for(i=start;i<end;i++) {
905 run = rl->table_run[i];
906 level = rl->table_level[i];
907 if (index_run[run] == rl->n)
909 if (level > max_level[run])
910 max_level[run] = level;
911 if (run > max_run[level])
912 max_run[level] = run;
915 rl->max_level[last] = static_store[last];
917 rl->max_level[last] = av_malloc(MAX_RUN + 1);
918 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
920 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
922 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
923 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
925 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
927 rl->index_run[last] = av_malloc(MAX_RUN + 1);
928 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
932 void init_vlc_rl(RLTable *rl)
944 for(i=0; i<rl->vlc.table_size; i++){
945 int code= rl->vlc.table[i][0];
946 int len = rl->vlc.table[i][1];
949 if(len==0){ // illegal code
952 }else if(len<0){ //more bits needed
956 if(code==rl->n){ //esc
960 run= rl->table_run [code] + 1;
961 level= rl->table_level[code] * qmul + qadd;
962 if(code >= rl->last) run+=192;
965 rl->rl_vlc[q][i].len= len;
966 rl->rl_vlc[q][i].level= level;
967 rl->rl_vlc[q][i].run= run;
972 void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
976 /* release non reference frames */
977 for(i=0; i<s->picture_count; i++){
978 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
979 && (!s->picture[i].owner2 || s->picture[i].owner2 == s)
980 && (remove_current || &s->picture[i] != s->current_picture_ptr)
981 /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
982 free_frame_buffer(s, &s->picture[i]);
987 int ff_find_unused_picture(MpegEncContext *s, int shared){
991 for(i=s->picture_range_start; i<s->picture_range_end; i++){
992 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
996 for(i=s->picture_range_start; i<s->picture_range_end; i++){
997 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
1000 for(i=s->picture_range_start; i<s->picture_range_end; i++){
1001 if (s->picture[i].f.data[0] == NULL)
1006 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
1007 /* We could return -1, but the codec would crash trying to draw into a
1008 * non-existing frame anyway. This is safer than waiting for a random crash.
1009 * Also the return of this is never useful, an encoder must only allocate
1010 * as much as allowed in the specification. This has no relationship to how
1011 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1012 * enough for such valid streams).
1013 * Plus, a decoder has to check stream validity and remove frames if too
1014 * many reference frames are around. Waiting for "OOM" is not correct at
1015 * all. Similarly, missing reference frames have to be replaced by
1016 * interpolated/MC frames, anything else is a bug in the codec ...
1022 static void update_noise_reduction(MpegEncContext *s){
1025 for(intra=0; intra<2; intra++){
1026 if(s->dct_count[intra] > (1<<16)){
1027 for(i=0; i<64; i++){
1028 s->dct_error_sum[intra][i] >>=1;
1030 s->dct_count[intra] >>= 1;
1033 for(i=0; i<64; i++){
1034 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1040 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1042 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1048 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1050 /* mark&release old frames */
1051 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.data[0]) {
1052 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1053 if (s->last_picture_ptr->owner2 == s)
1054 free_frame_buffer(s, s->last_picture_ptr);
1056 /* release forgotten pictures */
1057 /* if(mpeg124/h263) */
1059 for(i=0; i<s->picture_count; i++){
1060 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].f.reference) {
1061 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1062 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1063 free_frame_buffer(s, &s->picture[i]);
1071 ff_release_unused_pictures(s, 1);
1073 if (s->current_picture_ptr && s->current_picture_ptr->f.data[0] == NULL)
1074 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
1076 i= ff_find_unused_picture(s, 0);
1077 pic= &s->picture[i];
1080 pic->f.reference = 0;
1082 if (s->codec_id == CODEC_ID_H264)
1083 pic->f.reference = s->picture_structure;
1084 else if (s->pict_type != AV_PICTURE_TYPE_B)
1085 pic->f.reference = 3;
1088 pic->f.coded_picture_number = s->coded_picture_number++;
1090 if(ff_alloc_picture(s, pic, 0) < 0)
1093 s->current_picture_ptr= pic;
1094 //FIXME use only the vars from current_pic
1095 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1096 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1097 if(s->picture_structure != PICT_FRAME)
1098 s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1100 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence;
1101 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1104 s->current_picture_ptr->f.pict_type = s->pict_type;
1105 // if(s->flags && CODEC_FLAG_QSCALE)
1106 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1107 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1109 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1111 if (s->pict_type != AV_PICTURE_TYPE_B) {
1112 s->last_picture_ptr= s->next_picture_ptr;
1114 s->next_picture_ptr= s->current_picture_ptr;
1116 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1117 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1118 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1119 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1120 s->pict_type, s->dropable);*/
1122 if(s->codec_id != CODEC_ID_H264){
1123 if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.data[0] == NULL) &&
1124 (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
1125 if (s->pict_type != AV_PICTURE_TYPE_I)
1126 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1127 else if (s->picture_structure != PICT_FRAME)
1128 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
1130 /* Allocate a dummy frame */
1131 i= ff_find_unused_picture(s, 0);
1132 s->last_picture_ptr= &s->picture[i];
1133 s->last_picture_ptr->f.key_frame = 0;
1134 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1137 if(s->codec_id == CODEC_ID_FLV1 || s->codec_id == CODEC_ID_H263){
1138 for(i=0; i<s->height; i++)
1139 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, s->width);
1142 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
1143 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
1145 if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) {
1146 /* Allocate a dummy frame */
1147 i= ff_find_unused_picture(s, 0);
1148 s->next_picture_ptr= &s->picture[i];
1149 s->next_picture_ptr->f.key_frame = 0;
1150 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1152 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
1153 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
1157 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1158 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1160 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.data[0]));
1162 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1165 if(s->picture_structure == PICT_BOTTOM_FIELD){
1166 s->current_picture.f.data[i] += s->current_picture.f.linesize[i];
1168 s->current_picture.f.linesize[i] *= 2;
1169 s->last_picture.f.linesize[i] *= 2;
1170 s->next_picture.f.linesize[i] *= 2;
1174 s->error_recognition= avctx->error_recognition;
1176 /* set dequantizer, we can't do it during init as it might change for mpeg4
1177 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1178 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1179 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1180 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1181 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1182 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1183 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1185 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1186 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1189 if(s->dct_error_sum){
1190 assert(s->avctx->noise_reduction && s->encoding);
1192 update_noise_reduction(s);
1195 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1196 return ff_xvmc_field_start(s, avctx);
1201 /* generic function for encode/decode called after a frame has been coded/decoded */
1202 void MPV_frame_end(MpegEncContext *s)
1205 /* redraw edges for the frame if decoding didn't complete */
1206 //just to make sure that all data is rendered.
1207 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1208 ff_xvmc_field_end(s);
1209 }else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND))
1210 && !s->avctx->hwaccel
1211 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1212 && s->unrestricted_mv
1213 && s->current_picture.f.reference
1215 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1216 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1217 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1218 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1219 s->h_edge_pos , s->v_edge_pos,
1220 EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
1221 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1222 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1223 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1224 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1225 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1226 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1231 s->last_pict_type = s->pict_type;
1232 s->last_lambda_for[s->pict_type] = s->current_picture_ptr->f.quality;
1233 if(s->pict_type!=AV_PICTURE_TYPE_B){
1234 s->last_non_b_pict_type= s->pict_type;
1237 /* copy back current_picture variables */
1238 for(i=0; i<MAX_PICTURE_COUNT; i++){
1239 if(s->picture[i].f.data[0] == s->current_picture.f.data[0]){
1240 s->picture[i]= s->current_picture;
1244 assert(i<MAX_PICTURE_COUNT);
1248 /* release non-reference frames */
1249 for(i=0; i<s->picture_count; i++){
1250 if (s->picture[i].f.data[0] && !s->picture[i].f.reference /*&& s->picture[i].type != FF_BUFFER_TYPE_SHARED*/) {
1251 free_frame_buffer(s, &s->picture[i]);
1255 // clear copies, to avoid confusion
1257 memset(&s->last_picture, 0, sizeof(Picture));
1258 memset(&s->next_picture, 0, sizeof(Picture));
1259 memset(&s->current_picture, 0, sizeof(Picture));
1261 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1263 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1264 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
1269 * draws an line from (ex, ey) -> (sx, sy).
1270 * @param w width of the image
1271 * @param h height of the image
1272 * @param stride stride/linesize of the image
1273 * @param color color of the arrow
1275 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1278 sx= av_clip(sx, 0, w-1);
1279 sy= av_clip(sy, 0, h-1);
1280 ex= av_clip(ex, 0, w-1);
1281 ey= av_clip(ey, 0, h-1);
1283 buf[sy*stride + sx]+= color;
1285 if(FFABS(ex - sx) > FFABS(ey - sy)){
1287 FFSWAP(int, sx, ex);
1288 FFSWAP(int, sy, ey);
1290 buf+= sx + sy*stride;
1292 f= ((ey-sy)<<16)/ex;
1293 for(x= 0; x <= ex; x++){
1296 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1297 buf[(y+1)*stride + x]+= (color* fr )>>16;
1301 FFSWAP(int, sx, ex);
1302 FFSWAP(int, sy, ey);
1304 buf+= sx + sy*stride;
1306 if(ey) f= ((ex-sx)<<16)/ey;
1308 for(y= 0; y <= ey; y++){
1311 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1312 buf[y*stride + x+1]+= (color* fr )>>16;
1318 * draws an arrow from (ex, ey) -> (sx, sy).
1319 * @param w width of the image
1320 * @param h height of the image
1321 * @param stride stride/linesize of the image
1322 * @param color color of the arrow
1324 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1327 sx= av_clip(sx, -100, w+100);
1328 sy= av_clip(sy, -100, h+100);
1329 ex= av_clip(ex, -100, w+100);
1330 ey= av_clip(ey, -100, h+100);
1335 if(dx*dx + dy*dy > 3*3){
1338 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1340 //FIXME subpixel accuracy
1341 rx= ROUNDED_DIV(rx*3<<4, length);
1342 ry= ROUNDED_DIV(ry*3<<4, length);
1344 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1345 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1347 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1351 * prints debuging info for the given picture.
1353 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1355 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1357 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1360 av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1361 av_get_picture_type_char(pict->pict_type));
1362 for(y=0; y<s->mb_height; y++){
1363 for(x=0; x<s->mb_width; x++){
1364 if(s->avctx->debug&FF_DEBUG_SKIP){
1365 int count= s->mbskip_table[x + y*s->mb_stride];
1366 if(count>9) count=9;
1367 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1369 if(s->avctx->debug&FF_DEBUG_QP){
1370 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1372 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1373 int mb_type= pict->mb_type[x + y*s->mb_stride];
1374 //Type & MV direction
1376 av_log(s->avctx, AV_LOG_DEBUG, "P");
1377 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1378 av_log(s->avctx, AV_LOG_DEBUG, "A");
1379 else if(IS_INTRA4x4(mb_type))
1380 av_log(s->avctx, AV_LOG_DEBUG, "i");
1381 else if(IS_INTRA16x16(mb_type))
1382 av_log(s->avctx, AV_LOG_DEBUG, "I");
1383 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1384 av_log(s->avctx, AV_LOG_DEBUG, "d");
1385 else if(IS_DIRECT(mb_type))
1386 av_log(s->avctx, AV_LOG_DEBUG, "D");
1387 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1388 av_log(s->avctx, AV_LOG_DEBUG, "g");
1389 else if(IS_GMC(mb_type))
1390 av_log(s->avctx, AV_LOG_DEBUG, "G");
1391 else if(IS_SKIP(mb_type))
1392 av_log(s->avctx, AV_LOG_DEBUG, "S");
1393 else if(!USES_LIST(mb_type, 1))
1394 av_log(s->avctx, AV_LOG_DEBUG, ">");
1395 else if(!USES_LIST(mb_type, 0))
1396 av_log(s->avctx, AV_LOG_DEBUG, "<");
1398 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1399 av_log(s->avctx, AV_LOG_DEBUG, "X");
1404 av_log(s->avctx, AV_LOG_DEBUG, "+");
1405 else if(IS_16X8(mb_type))
1406 av_log(s->avctx, AV_LOG_DEBUG, "-");
1407 else if(IS_8X16(mb_type))
1408 av_log(s->avctx, AV_LOG_DEBUG, "|");
1409 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1410 av_log(s->avctx, AV_LOG_DEBUG, " ");
1412 av_log(s->avctx, AV_LOG_DEBUG, "?");
1415 if(IS_INTERLACED(mb_type))
1416 av_log(s->avctx, AV_LOG_DEBUG, "=");
1418 av_log(s->avctx, AV_LOG_DEBUG, " ");
1420 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1422 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1426 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1427 const int shift= 1 + s->quarter_sample;
1431 int h_chroma_shift, v_chroma_shift, block_height;
1432 const int width = s->avctx->width;
1433 const int height= s->avctx->height;
1434 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1435 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1436 s->low_delay=0; //needed to see the vectors without trashing the buffers
1438 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1440 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1441 pict->data[i]= s->visualization_buffer[i];
1443 pict->type= FF_BUFFER_TYPE_COPY;
1446 block_height = 16>>v_chroma_shift;
1448 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1450 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1451 const int mb_index= mb_x + mb_y*s->mb_stride;
1452 if((s->avctx->debug_mv) && pict->motion_val){
1454 for(type=0; type<3; type++){
1457 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
1461 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1465 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1470 if(!USES_LIST(pict->mb_type[mb_index], direction))
1473 if(IS_8X8(pict->mb_type[mb_index])){
1476 int sx= mb_x*16 + 4 + 8*(i&1);
1477 int sy= mb_y*16 + 4 + 8*(i>>1);
1478 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1479 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1480 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1481 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1483 }else if(IS_16X8(pict->mb_type[mb_index])){
1487 int sy=mb_y*16 + 4 + 8*i;
1488 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1489 int mx=(pict->motion_val[direction][xy][0]>>shift);
1490 int my=(pict->motion_val[direction][xy][1]>>shift);
1492 if(IS_INTERLACED(pict->mb_type[mb_index]))
1495 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1497 }else if(IS_8X16(pict->mb_type[mb_index])){
1500 int sx=mb_x*16 + 4 + 8*i;
1502 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1503 int mx=(pict->motion_val[direction][xy][0]>>shift);
1504 int my=(pict->motion_val[direction][xy][1]>>shift);
1506 if(IS_INTERLACED(pict->mb_type[mb_index]))
1509 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1512 int sx= mb_x*16 + 8;
1513 int sy= mb_y*16 + 8;
1514 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1515 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1516 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1517 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1521 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1522 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1524 for(y=0; y<block_height; y++){
1525 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1526 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1529 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1530 int mb_type= pict->mb_type[mb_index];
1533 #define COLOR(theta, r)\
1534 u= (int)(128 + r*cos(theta*3.141592/180));\
1535 v= (int)(128 + r*sin(theta*3.141592/180));
1539 if(IS_PCM(mb_type)){
1541 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1543 }else if(IS_INTRA4x4(mb_type)){
1545 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1547 }else if(IS_DIRECT(mb_type)){
1549 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1551 }else if(IS_GMC(mb_type)){
1553 }else if(IS_SKIP(mb_type)){
1555 }else if(!USES_LIST(mb_type, 1)){
1557 }else if(!USES_LIST(mb_type, 0)){
1560 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1564 u*= 0x0101010101010101ULL;
1565 v*= 0x0101010101010101ULL;
1566 for(y=0; y<block_height; y++){
1567 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1568 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1572 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1573 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1574 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1576 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1578 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1580 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1581 int dm= 1 << (mv_sample_log2-2);
1583 int sx= mb_x*16 + 8*(i&1);
1584 int sy= mb_y*16 + 8*(i>>1);
1585 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1587 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1588 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1590 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1591 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1592 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1596 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1600 s->mbskip_table[mb_index]=0;
1606 static inline int hpel_motion_lowres(MpegEncContext *s,
1607 uint8_t *dest, uint8_t *src,
1608 int field_based, int field_select,
1609 int src_x, int src_y,
1610 int width, int height, int stride,
1611 int h_edge_pos, int v_edge_pos,
1612 int w, int h, h264_chroma_mc_func *pix_op,
1613 int motion_x, int motion_y)
1615 const int lowres= s->avctx->lowres;
1616 const int op_index= FFMIN(lowres, 2);
1617 const int s_mask= (2<<lowres)-1;
1621 if(s->quarter_sample){
1626 sx= motion_x & s_mask;
1627 sy= motion_y & s_mask;
1628 src_x += motion_x >> (lowres+1);
1629 src_y += motion_y >> (lowres+1);
1631 src += src_y * stride + src_x;
1633 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1634 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1635 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1636 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1637 src= s->edge_emu_buffer;
1641 sx= (sx << 2) >> lowres;
1642 sy= (sy << 2) >> lowres;
1645 pix_op[op_index](dest, src, stride, h, sx, sy);
1649 /* apply one mpeg motion vector to the three components */
1650 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1651 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1652 int field_based, int bottom_field, int field_select,
1653 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1654 int motion_x, int motion_y, int h, int mb_y)
1656 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1657 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1658 const int lowres= s->avctx->lowres;
1659 const int op_index= FFMIN(lowres-1+s->chroma_x_shift, 2);
1660 const int block_s= 8>>lowres;
1661 const int s_mask= (2<<lowres)-1;
1662 const int h_edge_pos = s->h_edge_pos >> lowres;
1663 const int v_edge_pos = s->v_edge_pos >> lowres;
1664 linesize = s->current_picture.f.linesize[0] << field_based;
1665 uvlinesize = s->current_picture.f.linesize[1] << field_based;
1667 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1673 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1676 sx= motion_x & s_mask;
1677 sy= motion_y & s_mask;
1678 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1679 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1681 if (s->out_format == FMT_H263) {
1682 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1683 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1686 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1689 uvsx = (2*mx) & s_mask;
1690 uvsy = (2*my) & s_mask;
1691 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1692 uvsrc_y = mb_y*block_s + (my >> lowres);
1694 if(s->chroma_y_shift){
1699 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1700 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1702 if(s->chroma_x_shift){
1706 uvsy = motion_y & s_mask;
1708 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1711 uvsx = motion_x & s_mask;
1712 uvsy = motion_y & s_mask;
1719 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1720 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1721 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1723 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1724 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1725 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1726 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1727 ptr_y = s->edge_emu_buffer;
1728 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1729 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1730 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1731 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1732 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1733 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1739 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
1740 dest_y += s->linesize;
1741 dest_cb+= s->uvlinesize;
1742 dest_cr+= s->uvlinesize;
1746 ptr_y += s->linesize;
1747 ptr_cb+= s->uvlinesize;
1748 ptr_cr+= s->uvlinesize;
1751 sx= (sx << 2) >> lowres;
1752 sy= (sy << 2) >> lowres;
1753 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1755 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1756 uvsx= (uvsx << 2) >> lowres;
1757 uvsy= (uvsy << 2) >> lowres;
1758 if(h >> s->chroma_y_shift){
1759 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1760 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1763 //FIXME h261 lowres loop filter
1766 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1767 uint8_t *dest_cb, uint8_t *dest_cr,
1768 uint8_t **ref_picture,
1769 h264_chroma_mc_func *pix_op,
1771 const int lowres= s->avctx->lowres;
1772 const int op_index= FFMIN(lowres, 2);
1773 const int block_s= 8>>lowres;
1774 const int s_mask= (2<<lowres)-1;
1775 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1776 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1777 int emu=0, src_x, src_y, offset, sx, sy;
1780 if(s->quarter_sample){
1785 /* In case of 8X8, we construct a single chroma motion vector
1786 with a special rounding */
1787 mx= ff_h263_round_chroma(mx);
1788 my= ff_h263_round_chroma(my);
1792 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1793 src_y = s->mb_y*block_s + (my >> (lowres+1));
1795 offset = src_y * s->uvlinesize + src_x;
1796 ptr = ref_picture[1] + offset;
1797 if(s->flags&CODEC_FLAG_EMU_EDGE){
1798 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1799 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1800 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1801 ptr= s->edge_emu_buffer;
1805 sx= (sx << 2) >> lowres;
1806 sy= (sy << 2) >> lowres;
1807 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1809 ptr = ref_picture[2] + offset;
1811 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1812 ptr= s->edge_emu_buffer;
1814 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1818 * motion compensation of a single macroblock
1820 * @param dest_y luma destination pointer
1821 * @param dest_cb chroma cb/u destination pointer
1822 * @param dest_cr chroma cr/v destination pointer
1823 * @param dir direction (0->forward, 1->backward)
1824 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1825 * @param pix_op halfpel motion compensation function (average or put normally)
1826 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1828 static inline void MPV_motion_lowres(MpegEncContext *s,
1829 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1830 int dir, uint8_t **ref_picture,
1831 h264_chroma_mc_func *pix_op)
1835 const int lowres= s->avctx->lowres;
1836 const int block_s= 8>>lowres;
1841 switch(s->mv_type) {
1843 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1845 ref_picture, pix_op,
1846 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1852 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1853 ref_picture[0], 0, 0,
1854 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1855 s->width, s->height, s->linesize,
1856 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1857 block_s, block_s, pix_op,
1858 s->mv[dir][i][0], s->mv[dir][i][1]);
1860 mx += s->mv[dir][i][0];
1861 my += s->mv[dir][i][1];
1864 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1865 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1868 if (s->picture_structure == PICT_FRAME) {
1870 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1871 1, 0, s->field_select[dir][0],
1872 ref_picture, pix_op,
1873 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1875 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1876 1, 1, s->field_select[dir][1],
1877 ref_picture, pix_op,
1878 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1880 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
1881 ref_picture = s->current_picture_ptr->f.data;
1884 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1885 0, 0, s->field_select[dir][0],
1886 ref_picture, pix_op,
1887 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1892 uint8_t ** ref2picture;
1894 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
1895 ref2picture= ref_picture;
1897 ref2picture = s->current_picture_ptr->f.data;
1900 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1901 0, 0, s->field_select[dir][i],
1902 ref2picture, pix_op,
1903 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1905 dest_y += 2*block_s*s->linesize;
1906 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1907 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1911 if(s->picture_structure == PICT_FRAME){
1915 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1917 ref_picture, pix_op,
1918 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1920 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1924 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1925 0, 0, s->picture_structure != i+1,
1926 ref_picture, pix_op,
1927 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1929 // after put we make avg of the same block
1930 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1932 //opposite parity is always in the same frame if this is second field
1933 if(!s->first_field){
1934 ref_picture = s->current_picture_ptr->f.data;
1944 * find the lowest MB row referenced in the MVs
1946 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1948 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1949 int my, off, i, mvs;
1951 if (s->picture_structure != PICT_FRAME) goto unhandled;
1953 switch (s->mv_type) {
1967 for (i = 0; i < mvs; i++) {
1968 my = s->mv[dir][i][1]<<qpel_shift;
1969 my_max = FFMAX(my_max, my);
1970 my_min = FFMIN(my_min, my);
1973 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1975 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1977 return s->mb_height-1;
1980 /* put block[] to dest[] */
1981 static inline void put_dct(MpegEncContext *s,
1982 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1984 s->dct_unquantize_intra(s, block, i, qscale);
1985 s->dsp.idct_put (dest, line_size, block);
1988 /* add block[] to dest[] */
1989 static inline void add_dct(MpegEncContext *s,
1990 DCTELEM *block, int i, uint8_t *dest, int line_size)
1992 if (s->block_last_index[i] >= 0) {
1993 s->dsp.idct_add (dest, line_size, block);
1997 static inline void add_dequant_dct(MpegEncContext *s,
1998 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2000 if (s->block_last_index[i] >= 0) {
2001 s->dct_unquantize_inter(s, block, i, qscale);
2003 s->dsp.idct_add (dest, line_size, block);
2008 * cleans dc, ac, coded_block for the current non intra MB
2010 void ff_clean_intra_table_entries(MpegEncContext *s)
2012 int wrap = s->b8_stride;
2013 int xy = s->block_index[0];
2016 s->dc_val[0][xy + 1 ] =
2017 s->dc_val[0][xy + wrap] =
2018 s->dc_val[0][xy + 1 + wrap] = 1024;
2020 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2021 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2022 if (s->msmpeg4_version>=3) {
2023 s->coded_block[xy ] =
2024 s->coded_block[xy + 1 ] =
2025 s->coded_block[xy + wrap] =
2026 s->coded_block[xy + 1 + wrap] = 0;
2029 wrap = s->mb_stride;
2030 xy = s->mb_x + s->mb_y * wrap;
2032 s->dc_val[2][xy] = 1024;
2034 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2035 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2037 s->mbintra_table[xy]= 0;
2040 /* generic function called after a macroblock has been parsed by the
2041 decoder or after it has been encoded by the encoder.
2043 Important variables used:
2044 s->mb_intra : true if intra macroblock
2045 s->mv_dir : motion vector direction
2046 s->mv_type : motion vector type
2047 s->mv : motion vector
2048 s->interlaced_dct : true if interlaced dct used (mpeg2)
2050 static av_always_inline
2051 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2052 int lowres_flag, int is_mpeg12)
2054 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2055 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2056 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2060 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2061 /* save DCT coefficients */
2063 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2064 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2066 for(j=0; j<64; j++){
2067 *dct++ = block[i][s->dsp.idct_permutation[j]];
2068 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2070 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2074 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2076 /* update DC predictors for P macroblocks */
2078 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2079 if(s->mbintra_table[mb_xy])
2080 ff_clean_intra_table_entries(s);
2084 s->last_dc[2] = 128 << s->intra_dc_precision;
2087 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2088 s->mbintra_table[mb_xy]=1;
2090 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2091 uint8_t *dest_y, *dest_cb, *dest_cr;
2092 int dct_linesize, dct_offset;
2093 op_pixels_func (*op_pix)[4];
2094 qpel_mc_func (*op_qpix)[16];
2095 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2096 const int uvlinesize = s->current_picture.f.linesize[1];
2097 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2098 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2100 /* avoid copy if macroblock skipped in last frame too */
2101 /* skip only during decoding as we might trash the buffers during encoding a bit */
2103 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2104 const int age = s->current_picture.f.age;
2108 if (s->mb_skipped) {
2110 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2112 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
2113 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2115 /* if previous was skipped too, then nothing to do ! */
2116 if (*mbskip_ptr >= age && s->current_picture.f.reference){
2119 } else if(!s->current_picture.f.reference) {
2120 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2121 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2123 *mbskip_ptr = 0; /* not skipped */
2127 dct_linesize = linesize << s->interlaced_dct;
2128 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
2132 dest_cb= s->dest[1];
2133 dest_cr= s->dest[2];
2135 dest_y = s->b_scratchpad;
2136 dest_cb= s->b_scratchpad+16*linesize;
2137 dest_cr= s->b_scratchpad+32*linesize;
2141 /* motion handling */
2142 /* decoding or more than one mb_type (MC was already done otherwise) */
2145 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2146 if (s->mv_dir & MV_DIR_FORWARD) {
2147 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
2149 if (s->mv_dir & MV_DIR_BACKWARD) {
2150 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
2155 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2157 if (s->mv_dir & MV_DIR_FORWARD) {
2158 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2159 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2161 if (s->mv_dir & MV_DIR_BACKWARD) {
2162 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2165 op_qpix= s->me.qpel_put;
2166 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2167 op_pix = s->dsp.put_pixels_tab;
2169 op_pix = s->dsp.put_no_rnd_pixels_tab;
2171 if (s->mv_dir & MV_DIR_FORWARD) {
2172 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2173 op_pix = s->dsp.avg_pixels_tab;
2174 op_qpix= s->me.qpel_avg;
2176 if (s->mv_dir & MV_DIR_BACKWARD) {
2177 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2182 /* skip dequant / idct if we are really late ;) */
2183 if(s->avctx->skip_idct){
2184 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2185 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2186 || s->avctx->skip_idct >= AVDISCARD_ALL)
2190 /* add dct residue */
2191 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2192 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2193 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2194 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2195 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2196 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2198 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2199 if (s->chroma_y_shift){
2200 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2201 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2205 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2206 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2207 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2208 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2211 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2212 add_dct(s, block[0], 0, dest_y , dct_linesize);
2213 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2214 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2215 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2217 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2218 if(s->chroma_y_shift){//Chroma420
2219 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2220 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2223 dct_linesize = uvlinesize << s->interlaced_dct;
2224 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
2226 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2227 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2228 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2229 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2230 if(!s->chroma_x_shift){//Chroma444
2231 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2232 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2233 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2234 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2239 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2240 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2243 /* dct only in intra block */
2244 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2245 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2246 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2247 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2248 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2250 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2251 if(s->chroma_y_shift){
2252 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2253 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2257 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2258 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2259 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2260 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2264 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2265 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2266 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2267 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2269 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2270 if(s->chroma_y_shift){
2271 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2272 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2275 dct_linesize = uvlinesize << s->interlaced_dct;
2276 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
2278 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2279 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2280 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2281 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2282 if(!s->chroma_x_shift){//Chroma444
2283 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2284 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2285 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2286 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2294 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2295 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2296 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2301 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2303 if(s->out_format == FMT_MPEG1) {
2304 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2305 else MPV_decode_mb_internal(s, block, 0, 1);
2308 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2309 else MPV_decode_mb_internal(s, block, 0, 0);
2314 * @param h is the normal height, this will be reduced automatically if needed for the last row
2316 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2317 const int field_pic= s->picture_structure != PICT_FRAME;
2323 if (!s->avctx->hwaccel
2324 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2325 && s->unrestricted_mv
2326 && s->current_picture.f.reference
2328 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2329 int sides = 0, edge_h;
2330 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2331 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2332 if (y==0) sides |= EDGE_TOP;
2333 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2335 edge_h= FFMIN(h, s->v_edge_pos - y);
2337 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2338 s->linesize, s->h_edge_pos, edge_h,
2339 EDGE_WIDTH, EDGE_WIDTH, sides);
2340 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2341 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2342 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2343 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2344 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2345 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2348 h= FFMIN(h, s->avctx->height - y);
2350 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2352 if (s->avctx->draw_horiz_band) {
2356 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2357 src= (AVFrame*)s->current_picture_ptr;
2358 else if(s->last_picture_ptr)
2359 src= (AVFrame*)s->last_picture_ptr;
2363 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2369 offset[0]= y * s->linesize;
2371 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2377 s->avctx->draw_horiz_band(s->avctx, src, offset,
2378 y, s->picture_structure, h);
2382 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2383 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2384 const int uvlinesize = s->current_picture.f.linesize[1];
2385 const int mb_size= 4 - s->avctx->lowres;
2387 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2388 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2389 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2390 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2391 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2392 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2393 //block_index is not used by mpeg2, so it is not affected by chroma_format
2395 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2396 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2397 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2399 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2401 if(s->picture_structure==PICT_FRAME){
2402 s->dest[0] += s->mb_y * linesize << mb_size;
2403 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2404 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2406 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2407 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2408 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2409 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2414 void ff_mpeg_flush(AVCodecContext *avctx){
2416 MpegEncContext *s = avctx->priv_data;
2418 if(s==NULL || s->picture==NULL)
2421 for(i=0; i<s->picture_count; i++){
2422 if (s->picture[i].f.data[0] &&
2423 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2424 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2425 free_frame_buffer(s, &s->picture[i]);
2427 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2429 s->mb_x= s->mb_y= 0;
2432 s->parse_context.state= -1;
2433 s->parse_context.frame_start_found= 0;
2434 s->parse_context.overread= 0;
2435 s->parse_context.overread_index= 0;
2436 s->parse_context.index= 0;
2437 s->parse_context.last_index= 0;
2438 s->bitstream_buffer_size=0;
2442 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2443 DCTELEM *block, int n, int qscale)
2445 int i, level, nCoeffs;
2446 const uint16_t *quant_matrix;
2448 nCoeffs= s->block_last_index[n];
2451 block[0] = block[0] * s->y_dc_scale;
2453 block[0] = block[0] * s->c_dc_scale;
2454 /* XXX: only mpeg1 */
2455 quant_matrix = s->intra_matrix;
2456 for(i=1;i<=nCoeffs;i++) {
2457 int j= s->intra_scantable.permutated[i];
2462 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2463 level = (level - 1) | 1;
2466 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2467 level = (level - 1) | 1;
2474 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2475 DCTELEM *block, int n, int qscale)
2477 int i, level, nCoeffs;
2478 const uint16_t *quant_matrix;
2480 nCoeffs= s->block_last_index[n];
2482 quant_matrix = s->inter_matrix;
2483 for(i=0; i<=nCoeffs; i++) {
2484 int j= s->intra_scantable.permutated[i];
2489 level = (((level << 1) + 1) * qscale *
2490 ((int) (quant_matrix[j]))) >> 4;
2491 level = (level - 1) | 1;
2494 level = (((level << 1) + 1) * qscale *
2495 ((int) (quant_matrix[j]))) >> 4;
2496 level = (level - 1) | 1;
2503 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2504 DCTELEM *block, int n, int qscale)
2506 int i, level, nCoeffs;
2507 const uint16_t *quant_matrix;
2509 if(s->alternate_scan) nCoeffs= 63;
2510 else nCoeffs= s->block_last_index[n];
2513 block[0] = block[0] * s->y_dc_scale;
2515 block[0] = block[0] * s->c_dc_scale;
2516 quant_matrix = s->intra_matrix;
2517 for(i=1;i<=nCoeffs;i++) {
2518 int j= s->intra_scantable.permutated[i];
2523 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2526 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2533 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2534 DCTELEM *block, int n, int qscale)
2536 int i, level, nCoeffs;
2537 const uint16_t *quant_matrix;
2540 if(s->alternate_scan) nCoeffs= 63;
2541 else nCoeffs= s->block_last_index[n];
2544 block[0] = block[0] * s->y_dc_scale;
2546 block[0] = block[0] * s->c_dc_scale;
2547 quant_matrix = s->intra_matrix;
2548 for(i=1;i<=nCoeffs;i++) {
2549 int j= s->intra_scantable.permutated[i];
2554 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2557 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2566 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2567 DCTELEM *block, int n, int qscale)
2569 int i, level, nCoeffs;
2570 const uint16_t *quant_matrix;
2573 if(s->alternate_scan) nCoeffs= 63;
2574 else nCoeffs= s->block_last_index[n];
2576 quant_matrix = s->inter_matrix;
2577 for(i=0; i<=nCoeffs; i++) {
2578 int j= s->intra_scantable.permutated[i];
2583 level = (((level << 1) + 1) * qscale *
2584 ((int) (quant_matrix[j]))) >> 4;
2587 level = (((level << 1) + 1) * qscale *
2588 ((int) (quant_matrix[j]))) >> 4;
2597 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2598 DCTELEM *block, int n, int qscale)
2600 int i, level, qmul, qadd;
2603 assert(s->block_last_index[n]>=0);
2609 block[0] = block[0] * s->y_dc_scale;
2611 block[0] = block[0] * s->c_dc_scale;
2612 qadd = (qscale - 1) | 1;
2619 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2621 for(i=1; i<=nCoeffs; i++) {
2625 level = level * qmul - qadd;
2627 level = level * qmul + qadd;
2634 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2635 DCTELEM *block, int n, int qscale)
2637 int i, level, qmul, qadd;
2640 assert(s->block_last_index[n]>=0);
2642 qadd = (qscale - 1) | 1;
2645 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2647 for(i=0; i<=nCoeffs; i++) {
2651 level = level * qmul - qadd;
2653 level = level * qmul + qadd;
2661 * set qscale and update qscale dependent variables.
2663 void ff_set_qscale(MpegEncContext * s, int qscale)
2667 else if (qscale > 31)
2671 s->chroma_qscale= s->chroma_qscale_table[qscale];
2673 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2674 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2677 void MPV_report_decode_progress(MpegEncContext *s)
2679 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2680 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);