2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32]={
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
74 const uint8_t ff_mpeg1_dc_scale_table[128]={
75 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 static const uint8_t mpeg2_dc_scale_table1[128]={
83 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128]={
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 static const uint8_t mpeg2_dc_scale_table3[128]={
99 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
107 ff_mpeg1_dc_scale_table,
108 mpeg2_dc_scale_table1,
109 mpeg2_dc_scale_table2,
110 mpeg2_dc_scale_table3,
113 const enum PixelFormat ff_pixfmt_list_420[] = {
118 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
125 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
133 uint32_t tmp= *state << 8;
134 *state= tmp + *(p++);
135 if(tmp == 0x100 || p==end)
140 if (p[-1] > 1 ) p+= 3;
141 else if(p[-2] ) p+= 2;
142 else if(p[-3]|(p[-1]-1)) p++;
155 /* init common dct for both encoder and decoder */
156 av_cold int ff_dct_common_init(MpegEncContext *s)
158 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
159 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
160 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
161 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
162 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
163 if(s->flags & CODEC_FLAG_BITEXACT)
164 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
165 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
168 MPV_common_init_mmx(s);
170 MPV_common_init_axp(s);
172 MPV_common_init_mlib(s);
174 MPV_common_init_mmi(s);
176 MPV_common_init_arm(s);
178 MPV_common_init_altivec(s);
180 MPV_common_init_bfin(s);
183 /* load & permutate scantables
184 note: only wmv uses different ones
186 if(s->alternate_scan){
187 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
190 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
194 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
199 void ff_copy_picture(Picture *dst, Picture *src){
201 dst->type= FF_BUFFER_TYPE_COPY;
205 * Release a frame buffer
207 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
209 ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
210 av_freep(&pic->hwaccel_picture_private);
214 * Allocate a frame buffer
216 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
220 if (s->avctx->hwaccel) {
221 assert(!pic->hwaccel_picture_private);
222 if (s->avctx->hwaccel->priv_data_size) {
223 pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
224 if (!pic->hwaccel_picture_private) {
225 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
231 r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
233 if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
234 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
235 av_freep(&pic->hwaccel_picture_private);
239 if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
240 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
241 free_frame_buffer(s, pic);
245 if (pic->linesize[1] != pic->linesize[2]) {
246 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
247 free_frame_buffer(s, pic);
255 * allocates a Picture
256 * The pixels are allocated/set by calling get_buffer() if shared=0
258 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
259 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
260 const int mb_array_size= s->mb_stride*s->mb_height;
261 const int b8_array_size= s->b8_stride*s->mb_height*2;
262 const int b4_array_size= s->b4_stride*s->mb_height*4;
267 assert(pic->data[0]);
268 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
269 pic->type= FF_BUFFER_TYPE_SHARED;
271 assert(!pic->data[0]);
273 if (alloc_frame_buffer(s, pic) < 0)
276 s->linesize = pic->linesize[0];
277 s->uvlinesize= pic->linesize[1];
280 if(pic->qscale_table==NULL){
282 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
283 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
284 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
287 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check
288 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t) , fail)
289 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
290 pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
291 pic->qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1;
292 if(s->out_format == FMT_H264){
294 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
295 pic->motion_val[i]= pic->motion_val_base[i]+4;
296 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
298 pic->motion_subsample_log2= 2;
299 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
301 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
302 pic->motion_val[i]= pic->motion_val_base[i]+4;
303 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
305 pic->motion_subsample_log2= 3;
307 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
308 FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
310 pic->qstride= s->mb_stride;
311 FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
314 /* It might be nicer if the application would keep track of these
315 * but it would require an API change. */
316 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
317 s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
318 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == AV_PICTURE_TYPE_B)
319 pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
323 fail: //for the FF_ALLOCZ_OR_GOTO macro
325 free_frame_buffer(s, pic);
330 * deallocates a picture
332 static void free_picture(MpegEncContext *s, Picture *pic){
335 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
336 free_frame_buffer(s, pic);
339 av_freep(&pic->mb_var);
340 av_freep(&pic->mc_mb_var);
341 av_freep(&pic->mb_mean);
342 av_freep(&pic->mbskip_table);
343 av_freep(&pic->qscale_table_base);
344 av_freep(&pic->mb_type_base);
345 av_freep(&pic->dct_coeff);
346 av_freep(&pic->pan_scan);
349 av_freep(&pic->motion_val_base[i]);
350 av_freep(&pic->ref_index[i]);
353 if(pic->type == FF_BUFFER_TYPE_SHARED){
362 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
363 int y_size = s->b8_stride * (2 * s->mb_height + 1);
364 int c_size = s->mb_stride * (s->mb_height + 1);
365 int yc_size = y_size + 2 * c_size;
368 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
369 FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
370 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
372 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
373 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
374 s->me.temp= s->me.scratchpad;
375 s->rd_scratchpad= s->me.scratchpad;
376 s->b_scratchpad= s->me.scratchpad;
377 s->obmc_scratchpad= s->me.scratchpad + 16;
379 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
380 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
381 if(s->avctx->noise_reduction){
382 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
385 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
386 s->block= s->blocks[0];
389 s->pblocks[i] = &s->block[i];
392 if (s->out_format == FMT_H263) {
394 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
395 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
396 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
397 s->ac_val[2] = s->ac_val[1] + c_size;
402 return -1; //free() through MPV_common_end()
405 static void free_duplicate_context(MpegEncContext *s){
408 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
409 av_freep(&s->me.scratchpad);
413 s->obmc_scratchpad= NULL;
415 av_freep(&s->dct_error_sum);
416 av_freep(&s->me.map);
417 av_freep(&s->me.score_map);
418 av_freep(&s->blocks);
419 av_freep(&s->ac_val_base);
423 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
424 #define COPY(a) bak->a= src->a
425 COPY(allocated_edge_emu_buffer);
426 COPY(edge_emu_buffer);
431 COPY(obmc_scratchpad);
438 COPY(me.map_generation);
450 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
453 //FIXME copy only needed parts
455 backup_duplicate_context(&bak, dst);
456 memcpy(dst, src, sizeof(MpegEncContext));
457 backup_duplicate_context(dst, &bak);
459 dst->pblocks[i] = &dst->block[i];
461 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
464 int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
466 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
468 if(dst == src || !s1->context_initialized) return 0;
470 //FIXME can parameters change on I-frames? in that case dst may need a reinit
471 if(!s->context_initialized){
472 memcpy(s, s1, sizeof(MpegEncContext));
475 s->picture_range_start += MAX_PICTURE_COUNT;
476 s->picture_range_end += MAX_PICTURE_COUNT;
477 s->bitstream_buffer = NULL;
478 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
483 s->avctx->coded_height = s1->avctx->coded_height;
484 s->avctx->coded_width = s1->avctx->coded_width;
485 s->avctx->width = s1->avctx->width;
486 s->avctx->height = s1->avctx->height;
488 s->coded_picture_number = s1->coded_picture_number;
489 s->picture_number = s1->picture_number;
490 s->input_picture_number = s1->input_picture_number;
492 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
493 memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture);
495 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
496 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
497 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
499 memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
501 //Error/bug resilience
502 s->next_p_frame_damaged = s1->next_p_frame_damaged;
503 s->workaround_bugs = s1->workaround_bugs;
506 memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits);
509 s->max_b_frames = s1->max_b_frames;
510 s->low_delay = s1->low_delay;
511 s->dropable = s1->dropable;
513 //DivX handling (doesn't work)
514 s->divx_packed = s1->divx_packed;
516 if(s1->bitstream_buffer){
517 if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
518 av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size);
519 s->bitstream_buffer_size = s1->bitstream_buffer_size;
520 memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size);
521 memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
524 //MPEG2/interlacing info
525 memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence);
527 if(!s1->first_field){
528 s->last_pict_type= s1->pict_type;
529 if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->quality;
531 if(s1->pict_type!=FF_B_TYPE){
532 s->last_non_b_pict_type= s1->pict_type;
540 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
541 * the changed fields will not depend upon the prior state of the MpegEncContext.
543 void MPV_common_defaults(MpegEncContext *s){
545 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
546 s->chroma_qscale_table= ff_default_chroma_qscale_table;
547 s->progressive_frame= 1;
548 s->progressive_sequence= 1;
549 s->picture_structure= PICT_FRAME;
551 s->coded_picture_number = 0;
552 s->picture_number = 0;
553 s->input_picture_number = 0;
555 s->picture_in_gop_number = 0;
560 s->picture_range_start = 0;
561 s->picture_range_end = MAX_PICTURE_COUNT;
565 * sets the given MpegEncContext to defaults for decoding.
566 * the changed fields will not depend upon the prior state of the MpegEncContext.
568 void MPV_decode_defaults(MpegEncContext *s){
569 MPV_common_defaults(s);
573 * init common structure for both encoder and decoder.
574 * this assumes that some variables like width/height are already set
576 av_cold int MPV_common_init(MpegEncContext *s)
578 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
580 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
581 s->mb_height = (s->height + 31) / 32 * 2;
582 else if (s->codec_id != CODEC_ID_H264)
583 s->mb_height = (s->height + 15) / 16;
585 if(s->avctx->pix_fmt == PIX_FMT_NONE){
586 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
590 if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
591 (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
592 av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
596 if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
599 dsputil_init(&s->dsp, s->avctx);
600 ff_dct_common_init(s);
602 s->flags= s->avctx->flags;
603 s->flags2= s->avctx->flags2;
605 s->mb_width = (s->width + 15) / 16;
606 s->mb_stride = s->mb_width + 1;
607 s->b8_stride = s->mb_width*2 + 1;
608 s->b4_stride = s->mb_width*4 + 1;
609 mb_array_size= s->mb_height * s->mb_stride;
610 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
612 /* set chroma shifts */
613 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
614 &(s->chroma_y_shift) );
616 /* set default edge pos, will be overriden in decode_header if needed */
617 s->h_edge_pos= s->mb_width*16;
618 s->v_edge_pos= s->mb_height*16;
620 s->mb_num = s->mb_width * s->mb_height;
625 s->block_wrap[3]= s->b8_stride;
627 s->block_wrap[5]= s->mb_stride;
629 y_size = s->b8_stride * (2 * s->mb_height + 1);
630 c_size = s->mb_stride * (s->mb_height + 1);
631 yc_size = y_size + 2 * c_size;
633 /* convert fourcc to upper case */
634 s->codec_tag = ff_toupper4(s->avctx->codec_tag);
636 s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
638 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
640 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
641 for(y=0; y<s->mb_height; y++){
642 for(x=0; x<s->mb_width; x++){
643 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
646 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
649 /* Allocate MV tables */
650 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
651 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
652 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
653 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
654 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
655 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
656 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
657 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
658 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
659 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
660 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
661 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
663 if(s->msmpeg4_version){
664 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
666 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
668 /* Allocate MB type table */
669 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
671 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
673 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
674 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
675 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
676 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
677 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
678 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
680 if(s->avctx->noise_reduction){
681 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
685 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
686 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
687 for(i = 0; i < s->picture_count; i++) {
688 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
691 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
693 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
694 /* interlaced direct mode decoding tables */
699 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
700 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
702 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
703 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
704 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
706 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
709 if (s->out_format == FMT_H263) {
711 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
712 s->coded_block= s->coded_block_base + s->b8_stride + 1;
714 /* cbp, ac_pred, pred_dir */
715 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
716 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
719 if (s->h263_pred || s->h263_plus || !s->encoding) {
721 //MN: we need these for error resilience of intra-frames
722 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
723 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
724 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
725 s->dc_val[2] = s->dc_val[1] + c_size;
726 for(i=0;i<yc_size;i++)
727 s->dc_val_base[i] = 1024;
730 /* which mb is a intra block */
731 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
732 memset(s->mbintra_table, 1, mb_array_size);
734 /* init macroblock skip table */
735 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
736 //Note the +1 is for a quicker mpeg4 slice_end detection
737 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
739 s->parse_context.state= -1;
740 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
741 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
742 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
743 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
746 s->context_initialized = 1;
747 s->thread_context[0]= s;
749 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
750 threads = s->avctx->thread_count;
752 for(i=1; i<threads; i++){
753 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
754 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
757 for(i=0; i<threads; i++){
758 if(init_duplicate_context(s->thread_context[i], s) < 0)
760 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
761 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
764 if(init_duplicate_context(s, s) < 0) goto fail;
766 s->end_mb_y = s->mb_height;
776 /* init common structure for both encoder and decoder */
777 void MPV_common_end(MpegEncContext *s)
781 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
782 for(i=0; i<s->avctx->thread_count; i++){
783 free_duplicate_context(s->thread_context[i]);
785 for(i=1; i<s->avctx->thread_count; i++){
786 av_freep(&s->thread_context[i]);
788 } else free_duplicate_context(s);
790 av_freep(&s->parse_context.buffer);
791 s->parse_context.buffer_size=0;
793 av_freep(&s->mb_type);
794 av_freep(&s->p_mv_table_base);
795 av_freep(&s->b_forw_mv_table_base);
796 av_freep(&s->b_back_mv_table_base);
797 av_freep(&s->b_bidir_forw_mv_table_base);
798 av_freep(&s->b_bidir_back_mv_table_base);
799 av_freep(&s->b_direct_mv_table_base);
801 s->b_forw_mv_table= NULL;
802 s->b_back_mv_table= NULL;
803 s->b_bidir_forw_mv_table= NULL;
804 s->b_bidir_back_mv_table= NULL;
805 s->b_direct_mv_table= NULL;
809 av_freep(&s->b_field_mv_table_base[i][j][k]);
810 s->b_field_mv_table[i][j][k]=NULL;
812 av_freep(&s->b_field_select_table[i][j]);
813 av_freep(&s->p_field_mv_table_base[i][j]);
814 s->p_field_mv_table[i][j]=NULL;
816 av_freep(&s->p_field_select_table[i]);
819 av_freep(&s->dc_val_base);
820 av_freep(&s->coded_block_base);
821 av_freep(&s->mbintra_table);
822 av_freep(&s->cbp_table);
823 av_freep(&s->pred_dir_table);
825 av_freep(&s->mbskip_table);
826 av_freep(&s->prev_pict_types);
827 av_freep(&s->bitstream_buffer);
828 s->allocated_bitstream_buffer_size=0;
830 av_freep(&s->avctx->stats_out);
831 av_freep(&s->ac_stats);
832 av_freep(&s->error_status_table);
833 av_freep(&s->mb_index2xy);
834 av_freep(&s->lambda_table);
835 av_freep(&s->q_intra_matrix);
836 av_freep(&s->q_inter_matrix);
837 av_freep(&s->q_intra_matrix16);
838 av_freep(&s->q_inter_matrix16);
839 av_freep(&s->input_picture);
840 av_freep(&s->reordered_input_picture);
841 av_freep(&s->dct_offset);
843 if(s->picture && !s->avctx->is_copy){
844 for(i=0; i<s->picture_count; i++){
845 free_picture(s, &s->picture[i]);
848 av_freep(&s->picture);
849 s->context_initialized = 0;
852 s->current_picture_ptr= NULL;
853 s->linesize= s->uvlinesize= 0;
856 av_freep(&s->visualization_buffer[i]);
858 if(!(s->avctx->active_thread_type&FF_THREAD_FRAME))
859 avcodec_default_free_buffers(s->avctx);
862 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
864 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
865 uint8_t index_run[MAX_RUN+1];
866 int last, run, level, start, end, i;
868 /* If table is static, we can quit if rl->max_level[0] is not NULL */
869 if(static_store && rl->max_level[0])
872 /* compute max_level[], max_run[] and index_run[] */
873 for(last=0;last<2;last++) {
882 memset(max_level, 0, MAX_RUN + 1);
883 memset(max_run, 0, MAX_LEVEL + 1);
884 memset(index_run, rl->n, MAX_RUN + 1);
885 for(i=start;i<end;i++) {
886 run = rl->table_run[i];
887 level = rl->table_level[i];
888 if (index_run[run] == rl->n)
890 if (level > max_level[run])
891 max_level[run] = level;
892 if (run > max_run[level])
893 max_run[level] = run;
896 rl->max_level[last] = static_store[last];
898 rl->max_level[last] = av_malloc(MAX_RUN + 1);
899 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
901 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
903 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
904 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
906 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
908 rl->index_run[last] = av_malloc(MAX_RUN + 1);
909 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
913 void init_vlc_rl(RLTable *rl)
925 for(i=0; i<rl->vlc.table_size; i++){
926 int code= rl->vlc.table[i][0];
927 int len = rl->vlc.table[i][1];
930 if(len==0){ // illegal code
933 }else if(len<0){ //more bits needed
937 if(code==rl->n){ //esc
941 run= rl->table_run [code] + 1;
942 level= rl->table_level[code] * qmul + qadd;
943 if(code >= rl->last) run+=192;
946 rl->rl_vlc[q][i].len= len;
947 rl->rl_vlc[q][i].level= level;
948 rl->rl_vlc[q][i].run= run;
953 void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
957 /* release non reference frames */
958 for(i=0; i<s->picture_count; i++){
959 if(s->picture[i].data[0] && !s->picture[i].reference
960 && (!s->picture[i].owner2 || s->picture[i].owner2 == s)
961 && (remove_current || &s->picture[i] != s->current_picture_ptr)
962 /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
963 free_frame_buffer(s, &s->picture[i]);
968 int ff_find_unused_picture(MpegEncContext *s, int shared){
972 for(i=s->picture_range_start; i<s->picture_range_end; i++){
973 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
976 for(i=s->picture_range_start; i<s->picture_range_end; i++){
977 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
979 for(i=s->picture_range_start; i<s->picture_range_end; i++){
980 if(s->picture[i].data[0]==NULL) return i;
984 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
985 /* We could return -1, but the codec would crash trying to draw into a
986 * non-existing frame anyway. This is safer than waiting for a random crash.
987 * Also the return of this is never useful, an encoder must only allocate
988 * as much as allowed in the specification. This has no relationship to how
989 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
990 * enough for such valid streams).
991 * Plus, a decoder has to check stream validity and remove frames if too
992 * many reference frames are around. Waiting for "OOM" is not correct at
993 * all. Similarly, missing reference frames have to be replaced by
994 * interpolated/MC frames, anything else is a bug in the codec ...
1000 static void update_noise_reduction(MpegEncContext *s){
1003 for(intra=0; intra<2; intra++){
1004 if(s->dct_count[intra] > (1<<16)){
1005 for(i=0; i<64; i++){
1006 s->dct_error_sum[intra][i] >>=1;
1008 s->dct_count[intra] >>= 1;
1011 for(i=0; i<64; i++){
1012 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1018 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1020 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1026 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1028 /* mark&release old frames */
1029 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
1030 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1031 free_frame_buffer(s, s->last_picture_ptr);
1033 /* release forgotten pictures */
1034 /* if(mpeg124/h263) */
1036 for(i=0; i<s->picture_count; i++){
1037 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
1038 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1039 free_frame_buffer(s, &s->picture[i]);
1047 ff_release_unused_pictures(s, 1);
1049 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
1050 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
1052 i= ff_find_unused_picture(s, 0);
1053 pic= &s->picture[i];
1058 if (s->codec_id == CODEC_ID_H264)
1059 pic->reference = s->picture_structure;
1060 else if (s->pict_type != AV_PICTURE_TYPE_B)
1064 pic->coded_picture_number= s->coded_picture_number++;
1066 if(ff_alloc_picture(s, pic, 0) < 0)
1069 s->current_picture_ptr= pic;
1070 //FIXME use only the vars from current_pic
1071 s->current_picture_ptr->top_field_first= s->top_field_first;
1072 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1073 if(s->picture_structure != PICT_FRAME)
1074 s->current_picture_ptr->top_field_first= (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1076 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
1077 s->current_picture_ptr->field_picture= s->picture_structure != PICT_FRAME;
1080 s->current_picture_ptr->pict_type= s->pict_type;
1081 // if(s->flags && CODEC_FLAG_QSCALE)
1082 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1083 s->current_picture_ptr->key_frame= s->pict_type == AV_PICTURE_TYPE_I;
1085 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1087 if (s->pict_type != AV_PICTURE_TYPE_B) {
1088 s->last_picture_ptr= s->next_picture_ptr;
1090 s->next_picture_ptr= s->current_picture_ptr;
1092 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1093 s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
1094 s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
1095 s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
1096 s->pict_type, s->dropable);*/
1098 if(s->codec_id != CODEC_ID_H264){
1099 if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) &&
1100 (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
1101 if (s->pict_type != AV_PICTURE_TYPE_I)
1102 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1103 else if (s->picture_structure != PICT_FRAME)
1104 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
1106 /* Allocate a dummy frame */
1107 i= ff_find_unused_picture(s, 0);
1108 s->last_picture_ptr= &s->picture[i];
1109 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1111 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
1112 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
1114 if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==AV_PICTURE_TYPE_B){
1115 /* Allocate a dummy frame */
1116 i= ff_find_unused_picture(s, 0);
1117 s->next_picture_ptr= &s->picture[i];
1118 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1120 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
1121 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
1125 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1126 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1128 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1130 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1133 if(s->picture_structure == PICT_BOTTOM_FIELD){
1134 s->current_picture.data[i] += s->current_picture.linesize[i];
1136 s->current_picture.linesize[i] *= 2;
1137 s->last_picture.linesize[i] *=2;
1138 s->next_picture.linesize[i] *=2;
1142 s->error_recognition= avctx->error_recognition;
1144 /* set dequantizer, we can't do it during init as it might change for mpeg4
1145 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1146 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1147 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1148 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1149 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1150 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1151 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1153 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1154 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1157 if(s->dct_error_sum){
1158 assert(s->avctx->noise_reduction && s->encoding);
1160 update_noise_reduction(s);
1163 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1164 return ff_xvmc_field_start(s, avctx);
1169 /* generic function for encode/decode called after a frame has been coded/decoded */
1170 void MPV_frame_end(MpegEncContext *s)
1173 /* redraw edges for the frame if decoding didn't complete */
1174 //just to make sure that all data is rendered.
1175 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1176 ff_xvmc_field_end(s);
1177 }else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND))
1178 && !s->avctx->hwaccel
1179 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1180 && s->unrestricted_mv
1181 && s->current_picture.reference
1183 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1184 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1185 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1186 s->dsp.draw_edges(s->current_picture.data[0], s->linesize ,
1187 s->h_edge_pos , s->v_edge_pos,
1188 EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
1189 s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize,
1190 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1191 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1192 s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize,
1193 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1194 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1199 s->last_pict_type = s->pict_type;
1200 s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
1201 if(s->pict_type!=AV_PICTURE_TYPE_B){
1202 s->last_non_b_pict_type= s->pict_type;
1205 /* copy back current_picture variables */
1206 for(i=0; i<MAX_PICTURE_COUNT; i++){
1207 if(s->picture[i].data[0] == s->current_picture.data[0]){
1208 s->picture[i]= s->current_picture;
1212 assert(i<MAX_PICTURE_COUNT);
1216 /* release non-reference frames */
1217 for(i=0; i<s->picture_count; i++){
1218 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1219 free_frame_buffer(s, &s->picture[i]);
1223 // clear copies, to avoid confusion
1225 memset(&s->last_picture, 0, sizeof(Picture));
1226 memset(&s->next_picture, 0, sizeof(Picture));
1227 memset(&s->current_picture, 0, sizeof(Picture));
1229 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1231 if (s->codec_id != CODEC_ID_H264 && s->current_picture.reference) {
1232 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
1237 * draws an line from (ex, ey) -> (sx, sy).
1238 * @param w width of the image
1239 * @param h height of the image
1240 * @param stride stride/linesize of the image
1241 * @param color color of the arrow
1243 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1246 sx= av_clip(sx, 0, w-1);
1247 sy= av_clip(sy, 0, h-1);
1248 ex= av_clip(ex, 0, w-1);
1249 ey= av_clip(ey, 0, h-1);
1251 buf[sy*stride + sx]+= color;
1253 if(FFABS(ex - sx) > FFABS(ey - sy)){
1255 FFSWAP(int, sx, ex);
1256 FFSWAP(int, sy, ey);
1258 buf+= sx + sy*stride;
1260 f= ((ey-sy)<<16)/ex;
1261 for(x= 0; x <= ex; x++){
1264 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1265 buf[(y+1)*stride + x]+= (color* fr )>>16;
1269 FFSWAP(int, sx, ex);
1270 FFSWAP(int, sy, ey);
1272 buf+= sx + sy*stride;
1274 if(ey) f= ((ex-sx)<<16)/ey;
1276 for(y= 0; y <= ey; y++){
1279 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1280 buf[y*stride + x+1]+= (color* fr )>>16;
1286 * draws an arrow from (ex, ey) -> (sx, sy).
1287 * @param w width of the image
1288 * @param h height of the image
1289 * @param stride stride/linesize of the image
1290 * @param color color of the arrow
1292 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1295 sx= av_clip(sx, -100, w+100);
1296 sy= av_clip(sy, -100, h+100);
1297 ex= av_clip(ex, -100, w+100);
1298 ey= av_clip(ey, -100, h+100);
1303 if(dx*dx + dy*dy > 3*3){
1306 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1308 //FIXME subpixel accuracy
1309 rx= ROUNDED_DIV(rx*3<<4, length);
1310 ry= ROUNDED_DIV(ry*3<<4, length);
1312 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1313 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1315 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1319 * prints debuging info for the given picture.
1321 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1323 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1325 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1328 av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1329 av_get_picture_type_char(pict->pict_type));
1330 for(y=0; y<s->mb_height; y++){
1331 for(x=0; x<s->mb_width; x++){
1332 if(s->avctx->debug&FF_DEBUG_SKIP){
1333 int count= s->mbskip_table[x + y*s->mb_stride];
1334 if(count>9) count=9;
1335 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1337 if(s->avctx->debug&FF_DEBUG_QP){
1338 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1340 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1341 int mb_type= pict->mb_type[x + y*s->mb_stride];
1342 //Type & MV direction
1344 av_log(s->avctx, AV_LOG_DEBUG, "P");
1345 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1346 av_log(s->avctx, AV_LOG_DEBUG, "A");
1347 else if(IS_INTRA4x4(mb_type))
1348 av_log(s->avctx, AV_LOG_DEBUG, "i");
1349 else if(IS_INTRA16x16(mb_type))
1350 av_log(s->avctx, AV_LOG_DEBUG, "I");
1351 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1352 av_log(s->avctx, AV_LOG_DEBUG, "d");
1353 else if(IS_DIRECT(mb_type))
1354 av_log(s->avctx, AV_LOG_DEBUG, "D");
1355 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1356 av_log(s->avctx, AV_LOG_DEBUG, "g");
1357 else if(IS_GMC(mb_type))
1358 av_log(s->avctx, AV_LOG_DEBUG, "G");
1359 else if(IS_SKIP(mb_type))
1360 av_log(s->avctx, AV_LOG_DEBUG, "S");
1361 else if(!USES_LIST(mb_type, 1))
1362 av_log(s->avctx, AV_LOG_DEBUG, ">");
1363 else if(!USES_LIST(mb_type, 0))
1364 av_log(s->avctx, AV_LOG_DEBUG, "<");
1366 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1367 av_log(s->avctx, AV_LOG_DEBUG, "X");
1372 av_log(s->avctx, AV_LOG_DEBUG, "+");
1373 else if(IS_16X8(mb_type))
1374 av_log(s->avctx, AV_LOG_DEBUG, "-");
1375 else if(IS_8X16(mb_type))
1376 av_log(s->avctx, AV_LOG_DEBUG, "|");
1377 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1378 av_log(s->avctx, AV_LOG_DEBUG, " ");
1380 av_log(s->avctx, AV_LOG_DEBUG, "?");
1383 if(IS_INTERLACED(mb_type))
1384 av_log(s->avctx, AV_LOG_DEBUG, "=");
1386 av_log(s->avctx, AV_LOG_DEBUG, " ");
1388 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1390 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1394 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1395 const int shift= 1 + s->quarter_sample;
1399 int h_chroma_shift, v_chroma_shift, block_height;
1400 const int width = s->avctx->width;
1401 const int height= s->avctx->height;
1402 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1403 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1404 s->low_delay=0; //needed to see the vectors without trashing the buffers
1406 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1408 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1409 pict->data[i]= s->visualization_buffer[i];
1411 pict->type= FF_BUFFER_TYPE_COPY;
1414 block_height = 16>>v_chroma_shift;
1416 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1418 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1419 const int mb_index= mb_x + mb_y*s->mb_stride;
1420 if((s->avctx->debug_mv) && pict->motion_val){
1422 for(type=0; type<3; type++){
1425 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
1429 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1433 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1438 if(!USES_LIST(pict->mb_type[mb_index], direction))
1441 if(IS_8X8(pict->mb_type[mb_index])){
1444 int sx= mb_x*16 + 4 + 8*(i&1);
1445 int sy= mb_y*16 + 4 + 8*(i>>1);
1446 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1447 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1448 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1449 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1451 }else if(IS_16X8(pict->mb_type[mb_index])){
1455 int sy=mb_y*16 + 4 + 8*i;
1456 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1457 int mx=(pict->motion_val[direction][xy][0]>>shift);
1458 int my=(pict->motion_val[direction][xy][1]>>shift);
1460 if(IS_INTERLACED(pict->mb_type[mb_index]))
1463 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1465 }else if(IS_8X16(pict->mb_type[mb_index])){
1468 int sx=mb_x*16 + 4 + 8*i;
1470 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1471 int mx=(pict->motion_val[direction][xy][0]>>shift);
1472 int my=(pict->motion_val[direction][xy][1]>>shift);
1474 if(IS_INTERLACED(pict->mb_type[mb_index]))
1477 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1480 int sx= mb_x*16 + 8;
1481 int sy= mb_y*16 + 8;
1482 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1483 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1484 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1485 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1489 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1490 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1492 for(y=0; y<block_height; y++){
1493 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1494 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1497 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1498 int mb_type= pict->mb_type[mb_index];
1501 #define COLOR(theta, r)\
1502 u= (int)(128 + r*cos(theta*3.141592/180));\
1503 v= (int)(128 + r*sin(theta*3.141592/180));
1507 if(IS_PCM(mb_type)){
1509 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1511 }else if(IS_INTRA4x4(mb_type)){
1513 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1515 }else if(IS_DIRECT(mb_type)){
1517 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1519 }else if(IS_GMC(mb_type)){
1521 }else if(IS_SKIP(mb_type)){
1523 }else if(!USES_LIST(mb_type, 1)){
1525 }else if(!USES_LIST(mb_type, 0)){
1528 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1532 u*= 0x0101010101010101ULL;
1533 v*= 0x0101010101010101ULL;
1534 for(y=0; y<block_height; y++){
1535 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1536 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1540 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1541 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1542 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1544 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1546 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1548 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1549 int dm= 1 << (mv_sample_log2-2);
1551 int sx= mb_x*16 + 8*(i&1);
1552 int sy= mb_y*16 + 8*(i>>1);
1553 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1555 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1556 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1558 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1559 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1560 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1564 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1568 s->mbskip_table[mb_index]=0;
1574 static inline int hpel_motion_lowres(MpegEncContext *s,
1575 uint8_t *dest, uint8_t *src,
1576 int field_based, int field_select,
1577 int src_x, int src_y,
1578 int width, int height, int stride,
1579 int h_edge_pos, int v_edge_pos,
1580 int w, int h, h264_chroma_mc_func *pix_op,
1581 int motion_x, int motion_y)
1583 const int lowres= s->avctx->lowres;
1584 const int op_index= FFMIN(lowres, 2);
1585 const int s_mask= (2<<lowres)-1;
1589 if(s->quarter_sample){
1594 sx= motion_x & s_mask;
1595 sy= motion_y & s_mask;
1596 src_x += motion_x >> (lowres+1);
1597 src_y += motion_y >> (lowres+1);
1599 src += src_y * stride + src_x;
1601 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1602 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1603 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1604 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1605 src= s->edge_emu_buffer;
1609 sx= (sx << 2) >> lowres;
1610 sy= (sy << 2) >> lowres;
1613 pix_op[op_index](dest, src, stride, h, sx, sy);
1617 /* apply one mpeg motion vector to the three components */
1618 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1619 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1620 int field_based, int bottom_field, int field_select,
1621 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1622 int motion_x, int motion_y, int h, int mb_y)
1624 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1625 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1626 const int lowres= s->avctx->lowres;
1627 const int op_index= FFMIN(lowres-1+s->chroma_x_shift, 2);
1628 const int block_s= 8>>lowres;
1629 const int s_mask= (2<<lowres)-1;
1630 const int h_edge_pos = s->h_edge_pos >> lowres;
1631 const int v_edge_pos = s->v_edge_pos >> lowres;
1632 linesize = s->current_picture.linesize[0] << field_based;
1633 uvlinesize = s->current_picture.linesize[1] << field_based;
1635 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1641 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1644 sx= motion_x & s_mask;
1645 sy= motion_y & s_mask;
1646 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1647 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1649 if (s->out_format == FMT_H263) {
1650 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1651 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1654 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1657 uvsx = (2*mx) & s_mask;
1658 uvsy = (2*my) & s_mask;
1659 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1660 uvsrc_y = mb_y*block_s + (my >> lowres);
1662 if(s->chroma_y_shift){
1667 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1668 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1670 if(s->chroma_x_shift){
1674 uvsy = motion_y & s_mask;
1676 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1679 uvsx = motion_x & s_mask;
1680 uvsy = motion_y & s_mask;
1687 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1688 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1689 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1691 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1692 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1693 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1694 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1695 ptr_y = s->edge_emu_buffer;
1696 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1697 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1698 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1699 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1700 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1701 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1707 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
1708 dest_y += s->linesize;
1709 dest_cb+= s->uvlinesize;
1710 dest_cr+= s->uvlinesize;
1714 ptr_y += s->linesize;
1715 ptr_cb+= s->uvlinesize;
1716 ptr_cr+= s->uvlinesize;
1719 sx= (sx << 2) >> lowres;
1720 sy= (sy << 2) >> lowres;
1721 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1723 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1724 uvsx= (uvsx << 2) >> lowres;
1725 uvsy= (uvsy << 2) >> lowres;
1726 if(h >> s->chroma_y_shift){
1727 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1728 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1731 //FIXME h261 lowres loop filter
1734 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1735 uint8_t *dest_cb, uint8_t *dest_cr,
1736 uint8_t **ref_picture,
1737 h264_chroma_mc_func *pix_op,
1739 const int lowres= s->avctx->lowres;
1740 const int op_index= FFMIN(lowres, 2);
1741 const int block_s= 8>>lowres;
1742 const int s_mask= (2<<lowres)-1;
1743 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1744 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1745 int emu=0, src_x, src_y, offset, sx, sy;
1748 if(s->quarter_sample){
1753 /* In case of 8X8, we construct a single chroma motion vector
1754 with a special rounding */
1755 mx= ff_h263_round_chroma(mx);
1756 my= ff_h263_round_chroma(my);
1760 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1761 src_y = s->mb_y*block_s + (my >> (lowres+1));
1763 offset = src_y * s->uvlinesize + src_x;
1764 ptr = ref_picture[1] + offset;
1765 if(s->flags&CODEC_FLAG_EMU_EDGE){
1766 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1767 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1768 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1769 ptr= s->edge_emu_buffer;
1773 sx= (sx << 2) >> lowres;
1774 sy= (sy << 2) >> lowres;
1775 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1777 ptr = ref_picture[2] + offset;
1779 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1780 ptr= s->edge_emu_buffer;
1782 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1786 * motion compensation of a single macroblock
1788 * @param dest_y luma destination pointer
1789 * @param dest_cb chroma cb/u destination pointer
1790 * @param dest_cr chroma cr/v destination pointer
1791 * @param dir direction (0->forward, 1->backward)
1792 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1793 * @param pix_op halfpel motion compensation function (average or put normally)
1794 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1796 static inline void MPV_motion_lowres(MpegEncContext *s,
1797 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1798 int dir, uint8_t **ref_picture,
1799 h264_chroma_mc_func *pix_op)
1803 const int lowres= s->avctx->lowres;
1804 const int block_s= 8>>lowres;
1809 switch(s->mv_type) {
1811 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1813 ref_picture, pix_op,
1814 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1820 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1821 ref_picture[0], 0, 0,
1822 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1823 s->width, s->height, s->linesize,
1824 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1825 block_s, block_s, pix_op,
1826 s->mv[dir][i][0], s->mv[dir][i][1]);
1828 mx += s->mv[dir][i][0];
1829 my += s->mv[dir][i][1];
1832 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1833 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1836 if (s->picture_structure == PICT_FRAME) {
1838 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1839 1, 0, s->field_select[dir][0],
1840 ref_picture, pix_op,
1841 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1843 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1844 1, 1, s->field_select[dir][1],
1845 ref_picture, pix_op,
1846 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1848 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
1849 ref_picture= s->current_picture_ptr->data;
1852 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1853 0, 0, s->field_select[dir][0],
1854 ref_picture, pix_op,
1855 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1860 uint8_t ** ref2picture;
1862 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
1863 ref2picture= ref_picture;
1865 ref2picture= s->current_picture_ptr->data;
1868 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1869 0, 0, s->field_select[dir][i],
1870 ref2picture, pix_op,
1871 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1873 dest_y += 2*block_s*s->linesize;
1874 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1875 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1879 if(s->picture_structure == PICT_FRAME){
1883 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1885 ref_picture, pix_op,
1886 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1888 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1892 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1893 0, 0, s->picture_structure != i+1,
1894 ref_picture, pix_op,
1895 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1897 // after put we make avg of the same block
1898 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1900 //opposite parity is always in the same frame if this is second field
1901 if(!s->first_field){
1902 ref_picture = s->current_picture_ptr->data;
1912 * find the lowest MB row referenced in the MVs
1914 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1916 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1917 int my, off, i, mvs;
1919 if (s->picture_structure != PICT_FRAME) goto unhandled;
1921 switch (s->mv_type) {
1935 for (i = 0; i < mvs; i++) {
1936 my = s->mv[dir][i][1]<<qpel_shift;
1937 my_max = FFMAX(my_max, my);
1938 my_min = FFMIN(my_min, my);
1941 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1943 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1945 return s->mb_height-1;
1948 /* put block[] to dest[] */
1949 static inline void put_dct(MpegEncContext *s,
1950 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1952 s->dct_unquantize_intra(s, block, i, qscale);
1953 s->dsp.idct_put (dest, line_size, block);
1956 /* add block[] to dest[] */
1957 static inline void add_dct(MpegEncContext *s,
1958 DCTELEM *block, int i, uint8_t *dest, int line_size)
1960 if (s->block_last_index[i] >= 0) {
1961 s->dsp.idct_add (dest, line_size, block);
1965 static inline void add_dequant_dct(MpegEncContext *s,
1966 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1968 if (s->block_last_index[i] >= 0) {
1969 s->dct_unquantize_inter(s, block, i, qscale);
1971 s->dsp.idct_add (dest, line_size, block);
1976 * cleans dc, ac, coded_block for the current non intra MB
1978 void ff_clean_intra_table_entries(MpegEncContext *s)
1980 int wrap = s->b8_stride;
1981 int xy = s->block_index[0];
1984 s->dc_val[0][xy + 1 ] =
1985 s->dc_val[0][xy + wrap] =
1986 s->dc_val[0][xy + 1 + wrap] = 1024;
1988 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1989 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1990 if (s->msmpeg4_version>=3) {
1991 s->coded_block[xy ] =
1992 s->coded_block[xy + 1 ] =
1993 s->coded_block[xy + wrap] =
1994 s->coded_block[xy + 1 + wrap] = 0;
1997 wrap = s->mb_stride;
1998 xy = s->mb_x + s->mb_y * wrap;
2000 s->dc_val[2][xy] = 1024;
2002 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2003 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2005 s->mbintra_table[xy]= 0;
2008 /* generic function called after a macroblock has been parsed by the
2009 decoder or after it has been encoded by the encoder.
2011 Important variables used:
2012 s->mb_intra : true if intra macroblock
2013 s->mv_dir : motion vector direction
2014 s->mv_type : motion vector type
2015 s->mv : motion vector
2016 s->interlaced_dct : true if interlaced dct used (mpeg2)
2018 static av_always_inline
2019 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2020 int lowres_flag, int is_mpeg12)
2022 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2023 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2024 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2028 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2029 /* save DCT coefficients */
2031 DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
2032 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2034 for(j=0; j<64; j++){
2035 *dct++ = block[i][s->dsp.idct_permutation[j]];
2036 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2038 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2042 s->current_picture.qscale_table[mb_xy]= s->qscale;
2044 /* update DC predictors for P macroblocks */
2046 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2047 if(s->mbintra_table[mb_xy])
2048 ff_clean_intra_table_entries(s);
2052 s->last_dc[2] = 128 << s->intra_dc_precision;
2055 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2056 s->mbintra_table[mb_xy]=1;
2058 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2059 uint8_t *dest_y, *dest_cb, *dest_cr;
2060 int dct_linesize, dct_offset;
2061 op_pixels_func (*op_pix)[4];
2062 qpel_mc_func (*op_qpix)[16];
2063 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2064 const int uvlinesize= s->current_picture.linesize[1];
2065 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2066 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2068 /* avoid copy if macroblock skipped in last frame too */
2069 /* skip only during decoding as we might trash the buffers during encoding a bit */
2071 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2072 const int age= s->current_picture.age;
2076 if (s->mb_skipped) {
2078 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2080 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
2081 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2083 /* if previous was skipped too, then nothing to do ! */
2084 if (*mbskip_ptr >= age && s->current_picture.reference){
2087 } else if(!s->current_picture.reference){
2088 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2089 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2091 *mbskip_ptr = 0; /* not skipped */
2095 dct_linesize = linesize << s->interlaced_dct;
2096 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
2100 dest_cb= s->dest[1];
2101 dest_cr= s->dest[2];
2103 dest_y = s->b_scratchpad;
2104 dest_cb= s->b_scratchpad+16*linesize;
2105 dest_cr= s->b_scratchpad+32*linesize;
2109 /* motion handling */
2110 /* decoding or more than one mb_type (MC was already done otherwise) */
2113 if(HAVE_PTHREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2114 if (s->mv_dir & MV_DIR_FORWARD) {
2115 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
2117 if (s->mv_dir & MV_DIR_BACKWARD) {
2118 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
2123 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2125 if (s->mv_dir & MV_DIR_FORWARD) {
2126 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
2127 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2129 if (s->mv_dir & MV_DIR_BACKWARD) {
2130 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
2133 op_qpix= s->me.qpel_put;
2134 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2135 op_pix = s->dsp.put_pixels_tab;
2137 op_pix = s->dsp.put_no_rnd_pixels_tab;
2139 if (s->mv_dir & MV_DIR_FORWARD) {
2140 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2141 op_pix = s->dsp.avg_pixels_tab;
2142 op_qpix= s->me.qpel_avg;
2144 if (s->mv_dir & MV_DIR_BACKWARD) {
2145 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2150 /* skip dequant / idct if we are really late ;) */
2151 if(s->avctx->skip_idct){
2152 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2153 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2154 || s->avctx->skip_idct >= AVDISCARD_ALL)
2158 /* add dct residue */
2159 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2160 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2161 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2162 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2163 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2164 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2166 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2167 if (s->chroma_y_shift){
2168 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2169 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2173 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2174 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2175 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2176 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2179 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2180 add_dct(s, block[0], 0, dest_y , dct_linesize);
2181 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2182 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2183 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2185 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2186 if(s->chroma_y_shift){//Chroma420
2187 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2188 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2191 dct_linesize = uvlinesize << s->interlaced_dct;
2192 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
2194 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2195 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2196 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2197 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2198 if(!s->chroma_x_shift){//Chroma444
2199 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2200 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2201 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2202 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2207 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2208 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2211 /* dct only in intra block */
2212 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2213 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2214 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2215 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2216 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2218 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2219 if(s->chroma_y_shift){
2220 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2221 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2225 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2226 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2227 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2228 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2232 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2233 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2234 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2235 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2237 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2238 if(s->chroma_y_shift){
2239 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2240 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2243 dct_linesize = uvlinesize << s->interlaced_dct;
2244 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
2246 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2247 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2248 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2249 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2250 if(!s->chroma_x_shift){//Chroma444
2251 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2252 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2253 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2254 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2262 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2263 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2264 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2269 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2271 if(s->out_format == FMT_MPEG1) {
2272 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2273 else MPV_decode_mb_internal(s, block, 0, 1);
2276 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2277 else MPV_decode_mb_internal(s, block, 0, 0);
2282 * @param h is the normal height, this will be reduced automatically if needed for the last row
2284 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2285 const int field_pic= s->picture_structure != PICT_FRAME;
2291 if (!s->avctx->hwaccel
2292 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2293 && s->unrestricted_mv
2294 && s->current_picture.reference
2296 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2297 int sides = 0, edge_h;
2298 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2299 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2300 if (y==0) sides |= EDGE_TOP;
2301 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2303 edge_h= FFMIN(h, s->v_edge_pos - y);
2305 s->dsp.draw_edges(s->current_picture_ptr->data[0] + y *s->linesize , s->linesize,
2306 s->h_edge_pos , edge_h , EDGE_WIDTH , EDGE_WIDTH , sides);
2307 s->dsp.draw_edges(s->current_picture_ptr->data[1] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
2308 s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2309 s->dsp.draw_edges(s->current_picture_ptr->data[2] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
2310 s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2313 h= FFMIN(h, s->avctx->height - y);
2315 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2317 if (s->avctx->draw_horiz_band) {
2321 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2322 src= (AVFrame*)s->current_picture_ptr;
2323 else if(s->last_picture_ptr)
2324 src= (AVFrame*)s->last_picture_ptr;
2328 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2334 offset[0]= y * s->linesize;
2336 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2342 s->avctx->draw_horiz_band(s->avctx, src, offset,
2343 y, s->picture_structure, h);
2347 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2348 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2349 const int uvlinesize= s->current_picture.linesize[1];
2350 const int mb_size= 4 - s->avctx->lowres;
2352 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2353 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2354 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2355 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2356 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2357 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2358 //block_index is not used by mpeg2, so it is not affected by chroma_format
2360 s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
2361 s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2362 s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2364 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2366 if(s->picture_structure==PICT_FRAME){
2367 s->dest[0] += s->mb_y * linesize << mb_size;
2368 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2369 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2371 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2372 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2373 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2374 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2379 void ff_mpeg_flush(AVCodecContext *avctx){
2381 MpegEncContext *s = avctx->priv_data;
2383 if(s==NULL || s->picture==NULL)
2386 for(i=0; i<s->picture_count; i++){
2387 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
2388 || s->picture[i].type == FF_BUFFER_TYPE_USER))
2389 free_frame_buffer(s, &s->picture[i]);
2391 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2393 s->mb_x= s->mb_y= 0;
2396 s->parse_context.state= -1;
2397 s->parse_context.frame_start_found= 0;
2398 s->parse_context.overread= 0;
2399 s->parse_context.overread_index= 0;
2400 s->parse_context.index= 0;
2401 s->parse_context.last_index= 0;
2402 s->bitstream_buffer_size=0;
2406 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2407 DCTELEM *block, int n, int qscale)
2409 int i, level, nCoeffs;
2410 const uint16_t *quant_matrix;
2412 nCoeffs= s->block_last_index[n];
2415 block[0] = block[0] * s->y_dc_scale;
2417 block[0] = block[0] * s->c_dc_scale;
2418 /* XXX: only mpeg1 */
2419 quant_matrix = s->intra_matrix;
2420 for(i=1;i<=nCoeffs;i++) {
2421 int j= s->intra_scantable.permutated[i];
2426 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2427 level = (level - 1) | 1;
2430 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2431 level = (level - 1) | 1;
2438 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2439 DCTELEM *block, int n, int qscale)
2441 int i, level, nCoeffs;
2442 const uint16_t *quant_matrix;
2444 nCoeffs= s->block_last_index[n];
2446 quant_matrix = s->inter_matrix;
2447 for(i=0; i<=nCoeffs; i++) {
2448 int j= s->intra_scantable.permutated[i];
2453 level = (((level << 1) + 1) * qscale *
2454 ((int) (quant_matrix[j]))) >> 4;
2455 level = (level - 1) | 1;
2458 level = (((level << 1) + 1) * qscale *
2459 ((int) (quant_matrix[j]))) >> 4;
2460 level = (level - 1) | 1;
2467 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2468 DCTELEM *block, int n, int qscale)
2470 int i, level, nCoeffs;
2471 const uint16_t *quant_matrix;
2473 if(s->alternate_scan) nCoeffs= 63;
2474 else nCoeffs= s->block_last_index[n];
2477 block[0] = block[0] * s->y_dc_scale;
2479 block[0] = block[0] * s->c_dc_scale;
2480 quant_matrix = s->intra_matrix;
2481 for(i=1;i<=nCoeffs;i++) {
2482 int j= s->intra_scantable.permutated[i];
2487 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2490 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2497 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2498 DCTELEM *block, int n, int qscale)
2500 int i, level, nCoeffs;
2501 const uint16_t *quant_matrix;
2504 if(s->alternate_scan) nCoeffs= 63;
2505 else nCoeffs= s->block_last_index[n];
2508 block[0] = block[0] * s->y_dc_scale;
2510 block[0] = block[0] * s->c_dc_scale;
2511 quant_matrix = s->intra_matrix;
2512 for(i=1;i<=nCoeffs;i++) {
2513 int j= s->intra_scantable.permutated[i];
2518 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2521 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2530 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2531 DCTELEM *block, int n, int qscale)
2533 int i, level, nCoeffs;
2534 const uint16_t *quant_matrix;
2537 if(s->alternate_scan) nCoeffs= 63;
2538 else nCoeffs= s->block_last_index[n];
2540 quant_matrix = s->inter_matrix;
2541 for(i=0; i<=nCoeffs; i++) {
2542 int j= s->intra_scantable.permutated[i];
2547 level = (((level << 1) + 1) * qscale *
2548 ((int) (quant_matrix[j]))) >> 4;
2551 level = (((level << 1) + 1) * qscale *
2552 ((int) (quant_matrix[j]))) >> 4;
2561 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2562 DCTELEM *block, int n, int qscale)
2564 int i, level, qmul, qadd;
2567 assert(s->block_last_index[n]>=0);
2573 block[0] = block[0] * s->y_dc_scale;
2575 block[0] = block[0] * s->c_dc_scale;
2576 qadd = (qscale - 1) | 1;
2583 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2585 for(i=1; i<=nCoeffs; i++) {
2589 level = level * qmul - qadd;
2591 level = level * qmul + qadd;
2598 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2599 DCTELEM *block, int n, int qscale)
2601 int i, level, qmul, qadd;
2604 assert(s->block_last_index[n]>=0);
2606 qadd = (qscale - 1) | 1;
2609 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2611 for(i=0; i<=nCoeffs; i++) {
2615 level = level * qmul - qadd;
2617 level = level * qmul + qadd;
2625 * set qscale and update qscale dependent variables.
2627 void ff_set_qscale(MpegEncContext * s, int qscale)
2631 else if (qscale > 31)
2635 s->chroma_qscale= s->chroma_qscale_table[qscale];
2637 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2638 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2641 void MPV_report_decode_progress(MpegEncContext *s)
2643 if (s->pict_type != FF_B_TYPE && !s->partitioned_frame && !s->error_occurred)
2644 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);