2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
33 #include "mpegvideo.h"
34 #include "mpegvideo_common.h"
38 #include "xvmc_internal.h"
44 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
45 DCTELEM *block, int n, int qscale);
46 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
47 DCTELEM *block, int n, int qscale);
48 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
49 DCTELEM *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
51 DCTELEM *block, int n, int qscale);
52 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
53 DCTELEM *block, int n, int qscale);
54 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
55 DCTELEM *block, int n, int qscale);
56 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
57 DCTELEM *block, int n, int qscale);
60 /* enable all paranoid tests for rounding, overflows, etc... */
66 static const uint8_t ff_default_chroma_qscale_table[32]={
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
68 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
71 const uint8_t ff_mpeg1_dc_scale_table[128]={
72 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 static const uint8_t mpeg2_dc_scale_table1[128]={
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 static const uint8_t mpeg2_dc_scale_table2[128]={
88 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
89 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
90 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
91 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 static const uint8_t mpeg2_dc_scale_table3[128]={
96 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
104 ff_mpeg1_dc_scale_table,
105 mpeg2_dc_scale_table1,
106 mpeg2_dc_scale_table2,
107 mpeg2_dc_scale_table3,
110 const enum PixelFormat ff_pixfmt_list_420[] = {
115 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
122 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
130 uint32_t tmp= *state << 8;
131 *state= tmp + *(p++);
132 if(tmp == 0x100 || p==end)
137 if (p[-1] > 1 ) p+= 3;
138 else if(p[-2] ) p+= 2;
139 else if(p[-3]|(p[-1]-1)) p++;
152 /* init common dct for both encoder and decoder */
153 av_cold int ff_dct_common_init(MpegEncContext *s)
155 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
156 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
157 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
158 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
159 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
160 if(s->flags & CODEC_FLAG_BITEXACT)
161 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
162 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
165 MPV_common_init_mmx(s);
167 MPV_common_init_axp(s);
169 MPV_common_init_mlib(s);
171 MPV_common_init_mmi(s);
173 MPV_common_init_arm(s);
175 MPV_common_init_altivec(s);
177 MPV_common_init_bfin(s);
180 /* load & permutate scantables
181 note: only wmv uses different ones
183 if(s->alternate_scan){
184 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
185 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
187 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
190 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
196 void ff_copy_picture(Picture *dst, Picture *src){
198 dst->type= FF_BUFFER_TYPE_COPY;
202 * Releases a frame buffer
204 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
206 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
207 av_freep(&pic->hwaccel_picture_private);
211 * Allocates a frame buffer
213 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
217 if (s->avctx->hwaccel) {
218 assert(!pic->hwaccel_picture_private);
219 if (s->avctx->hwaccel->priv_data_size) {
220 pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
221 if (!pic->hwaccel_picture_private) {
222 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
228 r = s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
230 if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
231 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
232 av_freep(&pic->hwaccel_picture_private);
236 if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
237 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
238 free_frame_buffer(s, pic);
242 if (pic->linesize[1] != pic->linesize[2]) {
243 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
244 free_frame_buffer(s, pic);
252 * allocates a Picture
253 * The pixels are allocated/set by calling get_buffer() if shared=0
255 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
256 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
257 const int mb_array_size= s->mb_stride*s->mb_height;
258 const int b8_array_size= s->b8_stride*s->mb_height*2;
259 const int b4_array_size= s->b4_stride*s->mb_height*4;
264 assert(pic->data[0]);
265 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
266 pic->type= FF_BUFFER_TYPE_SHARED;
268 assert(!pic->data[0]);
270 if (alloc_frame_buffer(s, pic) < 0)
273 s->linesize = pic->linesize[0];
274 s->uvlinesize= pic->linesize[1];
277 if(pic->qscale_table==NULL){
279 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
280 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
281 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
284 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check
285 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table , mb_array_size * sizeof(uint8_t) , fail)
286 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
287 pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
288 if(s->out_format == FMT_H264){
290 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
291 pic->motion_val[i]= pic->motion_val_base[i]+4;
292 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
294 pic->motion_subsample_log2= 2;
295 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
297 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
298 pic->motion_val[i]= pic->motion_val_base[i]+4;
299 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
301 pic->motion_subsample_log2= 3;
303 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
304 FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
306 pic->qstride= s->mb_stride;
307 FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
310 /* It might be nicer if the application would keep track of these
311 * but it would require an API change. */
312 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
313 s->prev_pict_types[0]= s->dropable ? FF_B_TYPE : s->pict_type;
314 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == FF_B_TYPE)
315 pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
318 fail: //for the FF_ALLOCZ_OR_GOTO macro
320 free_frame_buffer(s, pic);
325 * deallocates a picture
327 static void free_picture(MpegEncContext *s, Picture *pic){
330 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
331 free_frame_buffer(s, pic);
334 av_freep(&pic->mb_var);
335 av_freep(&pic->mc_mb_var);
336 av_freep(&pic->mb_mean);
337 av_freep(&pic->mbskip_table);
338 av_freep(&pic->qscale_table);
339 av_freep(&pic->mb_type_base);
340 av_freep(&pic->dct_coeff);
341 av_freep(&pic->pan_scan);
344 av_freep(&pic->motion_val_base[i]);
345 av_freep(&pic->ref_index[i]);
348 if(pic->type == FF_BUFFER_TYPE_SHARED){
357 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
360 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
361 FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
362 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
364 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
365 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
366 s->me.temp= s->me.scratchpad;
367 s->rd_scratchpad= s->me.scratchpad;
368 s->b_scratchpad= s->me.scratchpad;
369 s->obmc_scratchpad= s->me.scratchpad + 16;
371 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
372 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
373 if(s->avctx->noise_reduction){
374 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
377 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
378 s->block= s->blocks[0];
381 s->pblocks[i] = &s->block[i];
385 return -1; //free() through MPV_common_end()
388 static void free_duplicate_context(MpegEncContext *s){
391 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
392 av_freep(&s->me.scratchpad);
396 s->obmc_scratchpad= NULL;
398 av_freep(&s->dct_error_sum);
399 av_freep(&s->me.map);
400 av_freep(&s->me.score_map);
401 av_freep(&s->blocks);
405 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
406 #define COPY(a) bak->a= src->a
407 COPY(allocated_edge_emu_buffer);
408 COPY(edge_emu_buffer);
413 COPY(obmc_scratchpad);
420 COPY(me.map_generation);
428 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
431 //FIXME copy only needed parts
433 backup_duplicate_context(&bak, dst);
434 memcpy(dst, src, sizeof(MpegEncContext));
435 backup_duplicate_context(dst, &bak);
437 dst->pblocks[i] = &dst->block[i];
439 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
443 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
444 * the changed fields will not depend upon the prior state of the MpegEncContext.
446 void MPV_common_defaults(MpegEncContext *s){
448 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
449 s->chroma_qscale_table= ff_default_chroma_qscale_table;
450 s->progressive_frame= 1;
451 s->progressive_sequence= 1;
452 s->picture_structure= PICT_FRAME;
454 s->coded_picture_number = 0;
455 s->picture_number = 0;
456 s->input_picture_number = 0;
458 s->picture_in_gop_number = 0;
465 * sets the given MpegEncContext to defaults for decoding.
466 * the changed fields will not depend upon the prior state of the MpegEncContext.
468 void MPV_decode_defaults(MpegEncContext *s){
469 MPV_common_defaults(s);
473 * init common structure for both encoder and decoder.
474 * this assumes that some variables like width/height are already set
476 av_cold int MPV_common_init(MpegEncContext *s)
478 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
480 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
481 s->mb_height = (s->height + 31) / 32 * 2;
483 s->mb_height = (s->height + 15) / 16;
485 if(s->avctx->pix_fmt == PIX_FMT_NONE){
486 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
490 if(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height)){
491 av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
495 if((s->width || s->height) && avcodec_check_dimensions(s->avctx, s->width, s->height))
498 dsputil_init(&s->dsp, s->avctx);
499 ff_dct_common_init(s);
501 s->flags= s->avctx->flags;
502 s->flags2= s->avctx->flags2;
504 s->mb_width = (s->width + 15) / 16;
505 s->mb_stride = s->mb_width + 1;
506 s->b8_stride = s->mb_width*2 + 1;
507 s->b4_stride = s->mb_width*4 + 1;
508 mb_array_size= s->mb_height * s->mb_stride;
509 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
511 /* set chroma shifts */
512 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
513 &(s->chroma_y_shift) );
515 /* set default edge pos, will be overriden in decode_header if needed */
516 s->h_edge_pos= s->mb_width*16;
517 s->v_edge_pos= s->mb_height*16;
519 s->mb_num = s->mb_width * s->mb_height;
524 s->block_wrap[3]= s->b8_stride;
526 s->block_wrap[5]= s->mb_stride;
528 y_size = s->b8_stride * (2 * s->mb_height + 1);
529 c_size = s->mb_stride * (s->mb_height + 1);
530 yc_size = y_size + 2 * c_size;
532 /* convert fourcc to upper case */
533 s->codec_tag= toupper( s->avctx->codec_tag &0xFF)
534 + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
535 + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
536 + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
538 s->stream_codec_tag= toupper( s->avctx->stream_codec_tag &0xFF)
539 + (toupper((s->avctx->stream_codec_tag>>8 )&0xFF)<<8 )
540 + (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16)
541 + (toupper((s->avctx->stream_codec_tag>>24)&0xFF)<<24);
543 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
545 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
546 for(y=0; y<s->mb_height; y++){
547 for(x=0; x<s->mb_width; x++){
548 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
551 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
554 /* Allocate MV tables */
555 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
556 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
557 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
558 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
559 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
560 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
561 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
562 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
563 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
564 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
565 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
566 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
568 if(s->msmpeg4_version){
569 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
571 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
573 /* Allocate MB type table */
574 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
576 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
578 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
579 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
580 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
581 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
582 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
583 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
585 if(s->avctx->noise_reduction){
586 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
589 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, MAX_PICTURE_COUNT * sizeof(Picture), fail)
590 for(i = 0; i < MAX_PICTURE_COUNT; i++) {
591 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
594 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
596 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
597 /* interlaced direct mode decoding tables */
602 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
603 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
605 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
606 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
607 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
609 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
612 if (s->out_format == FMT_H263) {
614 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
615 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
616 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
617 s->ac_val[2] = s->ac_val[1] + c_size;
620 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
621 s->coded_block= s->coded_block_base + s->b8_stride + 1;
623 /* cbp, ac_pred, pred_dir */
624 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
625 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
628 if (s->h263_pred || s->h263_plus || !s->encoding) {
630 //MN: we need these for error resilience of intra-frames
631 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
632 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
633 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
634 s->dc_val[2] = s->dc_val[1] + c_size;
635 for(i=0;i<yc_size;i++)
636 s->dc_val_base[i] = 1024;
639 /* which mb is a intra block */
640 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
641 memset(s->mbintra_table, 1, mb_array_size);
643 /* init macroblock skip table */
644 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
645 //Note the +1 is for a quicker mpeg4 slice_end detection
646 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
648 s->parse_context.state= -1;
649 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
650 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
651 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
652 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
655 s->context_initialized = 1;
657 s->thread_context[0]= s;
658 threads = s->avctx->thread_count;
660 for(i=1; i<threads; i++){
661 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
662 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
665 for(i=0; i<threads; i++){
666 if(init_duplicate_context(s->thread_context[i], s) < 0)
668 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
669 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
678 /* init common structure for both encoder and decoder */
679 void MPV_common_end(MpegEncContext *s)
683 for(i=0; i<s->avctx->thread_count; i++){
684 free_duplicate_context(s->thread_context[i]);
686 for(i=1; i<s->avctx->thread_count; i++){
687 av_freep(&s->thread_context[i]);
690 av_freep(&s->parse_context.buffer);
691 s->parse_context.buffer_size=0;
693 av_freep(&s->mb_type);
694 av_freep(&s->p_mv_table_base);
695 av_freep(&s->b_forw_mv_table_base);
696 av_freep(&s->b_back_mv_table_base);
697 av_freep(&s->b_bidir_forw_mv_table_base);
698 av_freep(&s->b_bidir_back_mv_table_base);
699 av_freep(&s->b_direct_mv_table_base);
701 s->b_forw_mv_table= NULL;
702 s->b_back_mv_table= NULL;
703 s->b_bidir_forw_mv_table= NULL;
704 s->b_bidir_back_mv_table= NULL;
705 s->b_direct_mv_table= NULL;
709 av_freep(&s->b_field_mv_table_base[i][j][k]);
710 s->b_field_mv_table[i][j][k]=NULL;
712 av_freep(&s->b_field_select_table[i][j]);
713 av_freep(&s->p_field_mv_table_base[i][j]);
714 s->p_field_mv_table[i][j]=NULL;
716 av_freep(&s->p_field_select_table[i]);
719 av_freep(&s->dc_val_base);
720 av_freep(&s->ac_val_base);
721 av_freep(&s->coded_block_base);
722 av_freep(&s->mbintra_table);
723 av_freep(&s->cbp_table);
724 av_freep(&s->pred_dir_table);
726 av_freep(&s->mbskip_table);
727 av_freep(&s->prev_pict_types);
728 av_freep(&s->bitstream_buffer);
729 s->allocated_bitstream_buffer_size=0;
731 av_freep(&s->avctx->stats_out);
732 av_freep(&s->ac_stats);
733 av_freep(&s->error_status_table);
734 av_freep(&s->mb_index2xy);
735 av_freep(&s->lambda_table);
736 av_freep(&s->q_intra_matrix);
737 av_freep(&s->q_inter_matrix);
738 av_freep(&s->q_intra_matrix16);
739 av_freep(&s->q_inter_matrix16);
740 av_freep(&s->input_picture);
741 av_freep(&s->reordered_input_picture);
742 av_freep(&s->dct_offset);
745 for(i=0; i<MAX_PICTURE_COUNT; i++){
746 free_picture(s, &s->picture[i]);
749 av_freep(&s->picture);
750 s->context_initialized = 0;
753 s->current_picture_ptr= NULL;
754 s->linesize= s->uvlinesize= 0;
757 av_freep(&s->visualization_buffer[i]);
759 avcodec_default_free_buffers(s->avctx);
762 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
764 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
765 uint8_t index_run[MAX_RUN+1];
766 int last, run, level, start, end, i;
768 /* If table is static, we can quit if rl->max_level[0] is not NULL */
769 if(static_store && rl->max_level[0])
772 /* compute max_level[], max_run[] and index_run[] */
773 for(last=0;last<2;last++) {
782 memset(max_level, 0, MAX_RUN + 1);
783 memset(max_run, 0, MAX_LEVEL + 1);
784 memset(index_run, rl->n, MAX_RUN + 1);
785 for(i=start;i<end;i++) {
786 run = rl->table_run[i];
787 level = rl->table_level[i];
788 if (index_run[run] == rl->n)
790 if (level > max_level[run])
791 max_level[run] = level;
792 if (run > max_run[level])
793 max_run[level] = run;
796 rl->max_level[last] = static_store[last];
798 rl->max_level[last] = av_malloc(MAX_RUN + 1);
799 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
801 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
803 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
804 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
806 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
808 rl->index_run[last] = av_malloc(MAX_RUN + 1);
809 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
813 void init_vlc_rl(RLTable *rl)
825 for(i=0; i<rl->vlc.table_size; i++){
826 int code= rl->vlc.table[i][0];
827 int len = rl->vlc.table[i][1];
830 if(len==0){ // illegal code
833 }else if(len<0){ //more bits needed
837 if(code==rl->n){ //esc
841 run= rl->table_run [code] + 1;
842 level= rl->table_level[code] * qmul + qadd;
843 if(code >= rl->last) run+=192;
846 rl->rl_vlc[q][i].len= len;
847 rl->rl_vlc[q][i].level= level;
848 rl->rl_vlc[q][i].run= run;
853 int ff_find_unused_picture(MpegEncContext *s, int shared){
857 for(i=0; i<MAX_PICTURE_COUNT; i++){
858 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
861 for(i=0; i<MAX_PICTURE_COUNT; i++){
862 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
864 for(i=0; i<MAX_PICTURE_COUNT; i++){
865 if(s->picture[i].data[0]==NULL) return i;
869 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
870 /* We could return -1, but the codec would crash trying to draw into a
871 * non-existing frame anyway. This is safer than waiting for a random crash.
872 * Also the return of this is never useful, an encoder must only allocate
873 * as much as allowed in the specification. This has no relationship to how
874 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
875 * enough for such valid streams).
876 * Plus, a decoder has to check stream validity and remove frames if too
877 * many reference frames are around. Waiting for "OOM" is not correct at
878 * all. Similarly, missing reference frames have to be replaced by
879 * interpolated/MC frames, anything else is a bug in the codec ...
885 static void update_noise_reduction(MpegEncContext *s){
888 for(intra=0; intra<2; intra++){
889 if(s->dct_count[intra] > (1<<16)){
891 s->dct_error_sum[intra][i] >>=1;
893 s->dct_count[intra] >>= 1;
897 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
903 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
905 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
911 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
913 /* mark&release old frames */
914 if (s->pict_type != FF_B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
915 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
916 free_frame_buffer(s, s->last_picture_ptr);
918 /* release forgotten pictures */
919 /* if(mpeg124/h263) */
921 for(i=0; i<MAX_PICTURE_COUNT; i++){
922 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
923 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
924 free_frame_buffer(s, &s->picture[i]);
932 /* release non reference frames */
933 for(i=0; i<MAX_PICTURE_COUNT; i++){
934 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
935 free_frame_buffer(s, &s->picture[i]);
939 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
940 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
942 i= ff_find_unused_picture(s, 0);
948 if (s->codec_id == CODEC_ID_H264)
949 pic->reference = s->picture_structure;
950 else if (s->pict_type != FF_B_TYPE)
954 pic->coded_picture_number= s->coded_picture_number++;
956 if(ff_alloc_picture(s, pic, 0) < 0)
959 s->current_picture_ptr= pic;
960 s->current_picture_ptr->top_field_first= s->top_field_first; //FIXME use only the vars from current_pic
961 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
964 s->current_picture_ptr->pict_type= s->pict_type;
965 // if(s->flags && CODEC_FLAG_QSCALE)
966 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
967 s->current_picture_ptr->key_frame= s->pict_type == FF_I_TYPE;
969 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
971 if (s->pict_type != FF_B_TYPE) {
972 s->last_picture_ptr= s->next_picture_ptr;
974 s->next_picture_ptr= s->current_picture_ptr;
976 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
977 s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
978 s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
979 s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
980 s->pict_type, s->dropable);*/
982 if(s->codec_id != CODEC_ID_H264){
983 if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && s->pict_type!=FF_I_TYPE){
984 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
985 /* Allocate a dummy frame */
986 i= ff_find_unused_picture(s, 0);
987 s->last_picture_ptr= &s->picture[i];
988 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
991 if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==FF_B_TYPE){
992 /* Allocate a dummy frame */
993 i= ff_find_unused_picture(s, 0);
994 s->next_picture_ptr= &s->picture[i];
995 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1000 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1001 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1003 assert(s->pict_type == FF_I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1005 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1008 if(s->picture_structure == PICT_BOTTOM_FIELD){
1009 s->current_picture.data[i] += s->current_picture.linesize[i];
1011 s->current_picture.linesize[i] *= 2;
1012 s->last_picture.linesize[i] *=2;
1013 s->next_picture.linesize[i] *=2;
1017 s->hurry_up= s->avctx->hurry_up;
1018 s->error_recognition= avctx->error_recognition;
1020 /* set dequantizer, we can't do it during init as it might change for mpeg4
1021 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1022 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1023 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1024 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1025 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1026 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1027 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1029 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1030 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1033 if(s->dct_error_sum){
1034 assert(s->avctx->noise_reduction && s->encoding);
1036 update_noise_reduction(s);
1039 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1040 return ff_xvmc_field_start(s, avctx);
1045 /* generic function for encode/decode called after a frame has been coded/decoded */
1046 void MPV_frame_end(MpegEncContext *s)
1049 /* draw edge for correct motion prediction if outside */
1050 //just to make sure that all data is rendered.
1051 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1052 ff_xvmc_field_end(s);
1053 }else if(!s->avctx->hwaccel
1054 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1055 && s->unrestricted_mv
1056 && s->current_picture.reference
1058 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1059 s->dsp.draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
1060 s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1061 s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1065 s->last_pict_type = s->pict_type;
1066 s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
1067 if(s->pict_type!=FF_B_TYPE){
1068 s->last_non_b_pict_type= s->pict_type;
1071 /* copy back current_picture variables */
1072 for(i=0; i<MAX_PICTURE_COUNT; i++){
1073 if(s->picture[i].data[0] == s->current_picture.data[0]){
1074 s->picture[i]= s->current_picture;
1078 assert(i<MAX_PICTURE_COUNT);
1082 /* release non-reference frames */
1083 for(i=0; i<MAX_PICTURE_COUNT; i++){
1084 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1085 free_frame_buffer(s, &s->picture[i]);
1089 // clear copies, to avoid confusion
1091 memset(&s->last_picture, 0, sizeof(Picture));
1092 memset(&s->next_picture, 0, sizeof(Picture));
1093 memset(&s->current_picture, 0, sizeof(Picture));
1095 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1099 * draws an line from (ex, ey) -> (sx, sy).
1100 * @param w width of the image
1101 * @param h height of the image
1102 * @param stride stride/linesize of the image
1103 * @param color color of the arrow
1105 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1108 sx= av_clip(sx, 0, w-1);
1109 sy= av_clip(sy, 0, h-1);
1110 ex= av_clip(ex, 0, w-1);
1111 ey= av_clip(ey, 0, h-1);
1113 buf[sy*stride + sx]+= color;
1115 if(FFABS(ex - sx) > FFABS(ey - sy)){
1117 FFSWAP(int, sx, ex);
1118 FFSWAP(int, sy, ey);
1120 buf+= sx + sy*stride;
1122 f= ((ey-sy)<<16)/ex;
1123 for(x= 0; x <= ex; x++){
1126 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1127 buf[(y+1)*stride + x]+= (color* fr )>>16;
1131 FFSWAP(int, sx, ex);
1132 FFSWAP(int, sy, ey);
1134 buf+= sx + sy*stride;
1136 if(ey) f= ((ex-sx)<<16)/ey;
1138 for(y= 0; y <= ey; y++){
1141 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1142 buf[y*stride + x+1]+= (color* fr )>>16;
1148 * draws an arrow from (ex, ey) -> (sx, sy).
1149 * @param w width of the image
1150 * @param h height of the image
1151 * @param stride stride/linesize of the image
1152 * @param color color of the arrow
1154 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1157 sx= av_clip(sx, -100, w+100);
1158 sy= av_clip(sy, -100, h+100);
1159 ex= av_clip(ex, -100, w+100);
1160 ey= av_clip(ey, -100, h+100);
1165 if(dx*dx + dy*dy > 3*3){
1168 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1170 //FIXME subpixel accuracy
1171 rx= ROUNDED_DIV(rx*3<<4, length);
1172 ry= ROUNDED_DIV(ry*3<<4, length);
1174 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1175 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1177 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1181 * prints debuging info for the given picture.
1183 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1185 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1187 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1190 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1191 switch (pict->pict_type) {
1192 case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1193 case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1194 case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1195 case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1196 case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1197 case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1199 for(y=0; y<s->mb_height; y++){
1200 for(x=0; x<s->mb_width; x++){
1201 if(s->avctx->debug&FF_DEBUG_SKIP){
1202 int count= s->mbskip_table[x + y*s->mb_stride];
1203 if(count>9) count=9;
1204 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1206 if(s->avctx->debug&FF_DEBUG_QP){
1207 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1209 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1210 int mb_type= pict->mb_type[x + y*s->mb_stride];
1211 //Type & MV direction
1213 av_log(s->avctx, AV_LOG_DEBUG, "P");
1214 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1215 av_log(s->avctx, AV_LOG_DEBUG, "A");
1216 else if(IS_INTRA4x4(mb_type))
1217 av_log(s->avctx, AV_LOG_DEBUG, "i");
1218 else if(IS_INTRA16x16(mb_type))
1219 av_log(s->avctx, AV_LOG_DEBUG, "I");
1220 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1221 av_log(s->avctx, AV_LOG_DEBUG, "d");
1222 else if(IS_DIRECT(mb_type))
1223 av_log(s->avctx, AV_LOG_DEBUG, "D");
1224 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1225 av_log(s->avctx, AV_LOG_DEBUG, "g");
1226 else if(IS_GMC(mb_type))
1227 av_log(s->avctx, AV_LOG_DEBUG, "G");
1228 else if(IS_SKIP(mb_type))
1229 av_log(s->avctx, AV_LOG_DEBUG, "S");
1230 else if(!USES_LIST(mb_type, 1))
1231 av_log(s->avctx, AV_LOG_DEBUG, ">");
1232 else if(!USES_LIST(mb_type, 0))
1233 av_log(s->avctx, AV_LOG_DEBUG, "<");
1235 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1236 av_log(s->avctx, AV_LOG_DEBUG, "X");
1241 av_log(s->avctx, AV_LOG_DEBUG, "+");
1242 else if(IS_16X8(mb_type))
1243 av_log(s->avctx, AV_LOG_DEBUG, "-");
1244 else if(IS_8X16(mb_type))
1245 av_log(s->avctx, AV_LOG_DEBUG, "|");
1246 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1247 av_log(s->avctx, AV_LOG_DEBUG, " ");
1249 av_log(s->avctx, AV_LOG_DEBUG, "?");
1252 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
1253 av_log(s->avctx, AV_LOG_DEBUG, "=");
1255 av_log(s->avctx, AV_LOG_DEBUG, " ");
1257 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1259 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1263 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1264 const int shift= 1 + s->quarter_sample;
1268 int h_chroma_shift, v_chroma_shift, block_height;
1269 const int width = s->avctx->width;
1270 const int height= s->avctx->height;
1271 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1272 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1273 s->low_delay=0; //needed to see the vectors without trashing the buffers
1275 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1277 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1278 pict->data[i]= s->visualization_buffer[i];
1280 pict->type= FF_BUFFER_TYPE_COPY;
1282 block_height = 16>>v_chroma_shift;
1284 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1286 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1287 const int mb_index= mb_x + mb_y*s->mb_stride;
1288 if((s->avctx->debug_mv) && pict->motion_val){
1290 for(type=0; type<3; type++){
1293 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
1297 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
1301 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
1306 if(!USES_LIST(pict->mb_type[mb_index], direction))
1309 if(IS_8X8(pict->mb_type[mb_index])){
1312 int sx= mb_x*16 + 4 + 8*(i&1);
1313 int sy= mb_y*16 + 4 + 8*(i>>1);
1314 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1315 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1316 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1317 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1319 }else if(IS_16X8(pict->mb_type[mb_index])){
1323 int sy=mb_y*16 + 4 + 8*i;
1324 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1325 int mx=(pict->motion_val[direction][xy][0]>>shift);
1326 int my=(pict->motion_val[direction][xy][1]>>shift);
1328 if(IS_INTERLACED(pict->mb_type[mb_index]))
1331 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1333 }else if(IS_8X16(pict->mb_type[mb_index])){
1336 int sx=mb_x*16 + 4 + 8*i;
1338 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1339 int mx=(pict->motion_val[direction][xy][0]>>shift);
1340 int my=(pict->motion_val[direction][xy][1]>>shift);
1342 if(IS_INTERLACED(pict->mb_type[mb_index]))
1345 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1348 int sx= mb_x*16 + 8;
1349 int sy= mb_y*16 + 8;
1350 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1351 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1352 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1353 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1357 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1358 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1360 for(y=0; y<block_height; y++){
1361 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1362 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1365 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1366 int mb_type= pict->mb_type[mb_index];
1369 #define COLOR(theta, r)\
1370 u= (int)(128 + r*cos(theta*3.141592/180));\
1371 v= (int)(128 + r*sin(theta*3.141592/180));
1375 if(IS_PCM(mb_type)){
1377 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1379 }else if(IS_INTRA4x4(mb_type)){
1381 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1383 }else if(IS_DIRECT(mb_type)){
1385 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1387 }else if(IS_GMC(mb_type)){
1389 }else if(IS_SKIP(mb_type)){
1391 }else if(!USES_LIST(mb_type, 1)){
1393 }else if(!USES_LIST(mb_type, 0)){
1396 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1400 u*= 0x0101010101010101ULL;
1401 v*= 0x0101010101010101ULL;
1402 for(y=0; y<block_height; y++){
1403 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1404 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1408 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1409 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1410 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1412 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1414 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1416 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1417 int dm= 1 << (mv_sample_log2-2);
1419 int sx= mb_x*16 + 8*(i&1);
1420 int sy= mb_y*16 + 8*(i>>1);
1421 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1423 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1424 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1426 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1427 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1428 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1432 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1436 s->mbskip_table[mb_index]=0;
1442 static inline int hpel_motion_lowres(MpegEncContext *s,
1443 uint8_t *dest, uint8_t *src,
1444 int field_based, int field_select,
1445 int src_x, int src_y,
1446 int width, int height, int stride,
1447 int h_edge_pos, int v_edge_pos,
1448 int w, int h, h264_chroma_mc_func *pix_op,
1449 int motion_x, int motion_y)
1451 const int lowres= s->avctx->lowres;
1452 const int op_index= FFMIN(lowres, 2);
1453 const int s_mask= (2<<lowres)-1;
1457 if(s->quarter_sample){
1462 sx= motion_x & s_mask;
1463 sy= motion_y & s_mask;
1464 src_x += motion_x >> (lowres+1);
1465 src_y += motion_y >> (lowres+1);
1467 src += src_y * stride + src_x;
1469 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1470 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1471 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1472 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1473 src= s->edge_emu_buffer;
1477 sx= (sx << 2) >> lowres;
1478 sy= (sy << 2) >> lowres;
1481 pix_op[op_index](dest, src, stride, h, sx, sy);
1485 /* apply one mpeg motion vector to the three components */
1486 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1487 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1488 int field_based, int bottom_field, int field_select,
1489 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1490 int motion_x, int motion_y, int h, int mb_y)
1492 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1493 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1494 const int lowres= s->avctx->lowres;
1495 const int op_index= FFMIN(lowres, 2);
1496 const int block_s= 8>>lowres;
1497 const int s_mask= (2<<lowres)-1;
1498 const int h_edge_pos = s->h_edge_pos >> lowres;
1499 const int v_edge_pos = s->v_edge_pos >> lowres;
1500 linesize = s->current_picture.linesize[0] << field_based;
1501 uvlinesize = s->current_picture.linesize[1] << field_based;
1503 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1509 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1512 sx= motion_x & s_mask;
1513 sy= motion_y & s_mask;
1514 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1515 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1517 if (s->out_format == FMT_H263) {
1518 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1519 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1522 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1525 uvsx = (2*mx) & s_mask;
1526 uvsy = (2*my) & s_mask;
1527 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1528 uvsrc_y = mb_y*block_s + (my >> lowres);
1534 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1535 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1538 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1539 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1540 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1542 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1543 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1544 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1545 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1546 ptr_y = s->edge_emu_buffer;
1547 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1548 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1549 ff_emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1550 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1551 ff_emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1552 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1558 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
1559 dest_y += s->linesize;
1560 dest_cb+= s->uvlinesize;
1561 dest_cr+= s->uvlinesize;
1565 ptr_y += s->linesize;
1566 ptr_cb+= s->uvlinesize;
1567 ptr_cr+= s->uvlinesize;
1570 sx= (sx << 2) >> lowres;
1571 sy= (sy << 2) >> lowres;
1572 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1574 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1575 uvsx= (uvsx << 2) >> lowres;
1576 uvsy= (uvsy << 2) >> lowres;
1577 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1578 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1580 //FIXME h261 lowres loop filter
1583 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1584 uint8_t *dest_cb, uint8_t *dest_cr,
1585 uint8_t **ref_picture,
1586 h264_chroma_mc_func *pix_op,
1588 const int lowres= s->avctx->lowres;
1589 const int op_index= FFMIN(lowres, 2);
1590 const int block_s= 8>>lowres;
1591 const int s_mask= (2<<lowres)-1;
1592 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1593 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1594 int emu=0, src_x, src_y, offset, sx, sy;
1597 if(s->quarter_sample){
1602 /* In case of 8X8, we construct a single chroma motion vector
1603 with a special rounding */
1604 mx= ff_h263_round_chroma(mx);
1605 my= ff_h263_round_chroma(my);
1609 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1610 src_y = s->mb_y*block_s + (my >> (lowres+1));
1612 offset = src_y * s->uvlinesize + src_x;
1613 ptr = ref_picture[1] + offset;
1614 if(s->flags&CODEC_FLAG_EMU_EDGE){
1615 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1616 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1617 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1618 ptr= s->edge_emu_buffer;
1622 sx= (sx << 2) >> lowres;
1623 sy= (sy << 2) >> lowres;
1624 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1626 ptr = ref_picture[2] + offset;
1628 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1629 ptr= s->edge_emu_buffer;
1631 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1635 * motion compensation of a single macroblock
1637 * @param dest_y luma destination pointer
1638 * @param dest_cb chroma cb/u destination pointer
1639 * @param dest_cr chroma cr/v destination pointer
1640 * @param dir direction (0->forward, 1->backward)
1641 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1642 * @param pic_op halfpel motion compensation function (average or put normally)
1643 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1645 static inline void MPV_motion_lowres(MpegEncContext *s,
1646 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1647 int dir, uint8_t **ref_picture,
1648 h264_chroma_mc_func *pix_op)
1652 const int lowres= s->avctx->lowres;
1653 const int block_s= 8>>lowres;
1658 switch(s->mv_type) {
1660 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1662 ref_picture, pix_op,
1663 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1669 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1670 ref_picture[0], 0, 0,
1671 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1672 s->width, s->height, s->linesize,
1673 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1674 block_s, block_s, pix_op,
1675 s->mv[dir][i][0], s->mv[dir][i][1]);
1677 mx += s->mv[dir][i][0];
1678 my += s->mv[dir][i][1];
1681 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1682 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1685 if (s->picture_structure == PICT_FRAME) {
1687 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1688 1, 0, s->field_select[dir][0],
1689 ref_picture, pix_op,
1690 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1692 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1693 1, 1, s->field_select[dir][1],
1694 ref_picture, pix_op,
1695 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1697 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){
1698 ref_picture= s->current_picture_ptr->data;
1701 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1702 0, 0, s->field_select[dir][0],
1703 ref_picture, pix_op,
1704 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1709 uint8_t ** ref2picture;
1711 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == FF_B_TYPE || s->first_field){
1712 ref2picture= ref_picture;
1714 ref2picture= s->current_picture_ptr->data;
1717 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1718 0, 0, s->field_select[dir][i],
1719 ref2picture, pix_op,
1720 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1722 dest_y += 2*block_s*s->linesize;
1723 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1724 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1728 if(s->picture_structure == PICT_FRAME){
1732 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1734 ref_picture, pix_op,
1735 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1737 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1741 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1742 0, 0, s->picture_structure != i+1,
1743 ref_picture, pix_op,
1744 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1746 // after put we make avg of the same block
1747 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1749 //opposite parity is always in the same frame if this is second field
1750 if(!s->first_field){
1751 ref_picture = s->current_picture_ptr->data;
1760 /* put block[] to dest[] */
1761 static inline void put_dct(MpegEncContext *s,
1762 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1764 s->dct_unquantize_intra(s, block, i, qscale);
1765 s->dsp.idct_put (dest, line_size, block);
1768 /* add block[] to dest[] */
1769 static inline void add_dct(MpegEncContext *s,
1770 DCTELEM *block, int i, uint8_t *dest, int line_size)
1772 if (s->block_last_index[i] >= 0) {
1773 s->dsp.idct_add (dest, line_size, block);
1777 static inline void add_dequant_dct(MpegEncContext *s,
1778 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1780 if (s->block_last_index[i] >= 0) {
1781 s->dct_unquantize_inter(s, block, i, qscale);
1783 s->dsp.idct_add (dest, line_size, block);
1788 * cleans dc, ac, coded_block for the current non intra MB
1790 void ff_clean_intra_table_entries(MpegEncContext *s)
1792 int wrap = s->b8_stride;
1793 int xy = s->block_index[0];
1796 s->dc_val[0][xy + 1 ] =
1797 s->dc_val[0][xy + wrap] =
1798 s->dc_val[0][xy + 1 + wrap] = 1024;
1800 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1801 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1802 if (s->msmpeg4_version>=3) {
1803 s->coded_block[xy ] =
1804 s->coded_block[xy + 1 ] =
1805 s->coded_block[xy + wrap] =
1806 s->coded_block[xy + 1 + wrap] = 0;
1809 wrap = s->mb_stride;
1810 xy = s->mb_x + s->mb_y * wrap;
1812 s->dc_val[2][xy] = 1024;
1814 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1815 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1817 s->mbintra_table[xy]= 0;
1820 /* generic function called after a macroblock has been parsed by the
1821 decoder or after it has been encoded by the encoder.
1823 Important variables used:
1824 s->mb_intra : true if intra macroblock
1825 s->mv_dir : motion vector direction
1826 s->mv_type : motion vector type
1827 s->mv : motion vector
1828 s->interlaced_dct : true if interlaced dct used (mpeg2)
1830 static av_always_inline
1831 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
1832 int lowres_flag, int is_mpeg12)
1834 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1835 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1836 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1840 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1841 /* save DCT coefficients */
1843 DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
1846 *dct++ = block[i][s->dsp.idct_permutation[j]];
1849 s->current_picture.qscale_table[mb_xy]= s->qscale;
1851 /* update DC predictors for P macroblocks */
1853 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1854 if(s->mbintra_table[mb_xy])
1855 ff_clean_intra_table_entries(s);
1859 s->last_dc[2] = 128 << s->intra_dc_precision;
1862 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1863 s->mbintra_table[mb_xy]=1;
1865 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
1866 uint8_t *dest_y, *dest_cb, *dest_cr;
1867 int dct_linesize, dct_offset;
1868 op_pixels_func (*op_pix)[4];
1869 qpel_mc_func (*op_qpix)[16];
1870 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
1871 const int uvlinesize= s->current_picture.linesize[1];
1872 const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
1873 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
1875 /* avoid copy if macroblock skipped in last frame too */
1876 /* skip only during decoding as we might trash the buffers during encoding a bit */
1878 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1879 const int age= s->current_picture.age;
1883 if (s->mb_skipped) {
1885 assert(s->pict_type!=FF_I_TYPE);
1887 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
1888 if(*mbskip_ptr >99) *mbskip_ptr= 99;
1890 /* if previous was skipped too, then nothing to do ! */
1891 if (*mbskip_ptr >= age && s->current_picture.reference){
1894 } else if(!s->current_picture.reference){
1895 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
1896 if(*mbskip_ptr >99) *mbskip_ptr= 99;
1898 *mbskip_ptr = 0; /* not skipped */
1902 dct_linesize = linesize << s->interlaced_dct;
1903 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
1907 dest_cb= s->dest[1];
1908 dest_cr= s->dest[2];
1910 dest_y = s->b_scratchpad;
1911 dest_cb= s->b_scratchpad+16*linesize;
1912 dest_cr= s->b_scratchpad+32*linesize;
1916 /* motion handling */
1917 /* decoding or more than one mb_type (MC was already done otherwise) */
1920 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
1922 if (s->mv_dir & MV_DIR_FORWARD) {
1923 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
1924 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
1926 if (s->mv_dir & MV_DIR_BACKWARD) {
1927 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
1930 op_qpix= s->me.qpel_put;
1931 if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
1932 op_pix = s->dsp.put_pixels_tab;
1934 op_pix = s->dsp.put_no_rnd_pixels_tab;
1936 if (s->mv_dir & MV_DIR_FORWARD) {
1937 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
1938 op_pix = s->dsp.avg_pixels_tab;
1939 op_qpix= s->me.qpel_avg;
1941 if (s->mv_dir & MV_DIR_BACKWARD) {
1942 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
1947 /* skip dequant / idct if we are really late ;) */
1948 if(s->hurry_up>1) goto skip_idct;
1949 if(s->avctx->skip_idct){
1950 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
1951 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
1952 || s->avctx->skip_idct >= AVDISCARD_ALL)
1956 /* add dct residue */
1957 if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
1958 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
1959 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1960 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1961 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1962 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1964 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1965 if (s->chroma_y_shift){
1966 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1967 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1971 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1972 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1973 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1974 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1977 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
1978 add_dct(s, block[0], 0, dest_y , dct_linesize);
1979 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
1980 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
1981 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
1983 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1984 if(s->chroma_y_shift){//Chroma420
1985 add_dct(s, block[4], 4, dest_cb, uvlinesize);
1986 add_dct(s, block[5], 5, dest_cr, uvlinesize);
1989 dct_linesize = uvlinesize << s->interlaced_dct;
1990 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
1992 add_dct(s, block[4], 4, dest_cb, dct_linesize);
1993 add_dct(s, block[5], 5, dest_cr, dct_linesize);
1994 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
1995 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
1996 if(!s->chroma_x_shift){//Chroma444
1997 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
1998 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
1999 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2000 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2005 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2006 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2009 /* dct only in intra block */
2010 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2011 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2012 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2013 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2014 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2016 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2017 if(s->chroma_y_shift){
2018 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2019 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2023 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2024 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2025 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2026 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2030 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2031 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2032 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2033 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2035 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2036 if(s->chroma_y_shift){
2037 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2038 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2041 dct_linesize = uvlinesize << s->interlaced_dct;
2042 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2044 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2045 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2046 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2047 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2048 if(!s->chroma_x_shift){//Chroma444
2049 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2050 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2051 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2052 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2060 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2061 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2062 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2067 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2069 if(s->out_format == FMT_MPEG1) {
2070 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2071 else MPV_decode_mb_internal(s, block, 0, 1);
2074 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2075 else MPV_decode_mb_internal(s, block, 0, 0);
2080 * @param h is the normal height, this will be reduced automatically if needed for the last row
2082 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2083 if (s->avctx->draw_horiz_band) {
2085 const int field_pic= s->picture_structure != PICT_FRAME;
2088 h= FFMIN(h, (s->avctx->height>>field_pic) - y);
2090 if(field_pic && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)){
2093 if(s->first_field) return;
2096 if(s->pict_type==FF_B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2097 src= (AVFrame*)s->current_picture_ptr;
2098 else if(s->last_picture_ptr)
2099 src= (AVFrame*)s->last_picture_ptr;
2103 if(s->pict_type==FF_B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2109 offset[0]= y * s->linesize;
2111 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2117 s->avctx->draw_horiz_band(s->avctx, src, offset,
2118 y, s->picture_structure, h);
2122 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2123 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2124 const int uvlinesize= s->current_picture.linesize[1];
2125 const int mb_size= 4 - s->avctx->lowres;
2127 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2128 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2129 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2130 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2131 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2132 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2133 //block_index is not used by mpeg2, so it is not affected by chroma_format
2135 s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
2136 s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2137 s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2139 if(!(s->pict_type==FF_B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2141 if(s->picture_structure==PICT_FRAME){
2142 s->dest[0] += s->mb_y * linesize << mb_size;
2143 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2144 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2146 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2147 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2148 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2149 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2154 void ff_mpeg_flush(AVCodecContext *avctx){
2156 MpegEncContext *s = avctx->priv_data;
2158 if(s==NULL || s->picture==NULL)
2161 for(i=0; i<MAX_PICTURE_COUNT; i++){
2162 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
2163 || s->picture[i].type == FF_BUFFER_TYPE_USER))
2164 free_frame_buffer(s, &s->picture[i]);
2166 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2168 s->mb_x= s->mb_y= 0;
2171 s->parse_context.state= -1;
2172 s->parse_context.frame_start_found= 0;
2173 s->parse_context.overread= 0;
2174 s->parse_context.overread_index= 0;
2175 s->parse_context.index= 0;
2176 s->parse_context.last_index= 0;
2177 s->bitstream_buffer_size=0;
2181 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2182 DCTELEM *block, int n, int qscale)
2184 int i, level, nCoeffs;
2185 const uint16_t *quant_matrix;
2187 nCoeffs= s->block_last_index[n];
2190 block[0] = block[0] * s->y_dc_scale;
2192 block[0] = block[0] * s->c_dc_scale;
2193 /* XXX: only mpeg1 */
2194 quant_matrix = s->intra_matrix;
2195 for(i=1;i<=nCoeffs;i++) {
2196 int j= s->intra_scantable.permutated[i];
2201 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2202 level = (level - 1) | 1;
2205 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2206 level = (level - 1) | 1;
2213 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2214 DCTELEM *block, int n, int qscale)
2216 int i, level, nCoeffs;
2217 const uint16_t *quant_matrix;
2219 nCoeffs= s->block_last_index[n];
2221 quant_matrix = s->inter_matrix;
2222 for(i=0; i<=nCoeffs; i++) {
2223 int j= s->intra_scantable.permutated[i];
2228 level = (((level << 1) + 1) * qscale *
2229 ((int) (quant_matrix[j]))) >> 4;
2230 level = (level - 1) | 1;
2233 level = (((level << 1) + 1) * qscale *
2234 ((int) (quant_matrix[j]))) >> 4;
2235 level = (level - 1) | 1;
2242 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2243 DCTELEM *block, int n, int qscale)
2245 int i, level, nCoeffs;
2246 const uint16_t *quant_matrix;
2248 if(s->alternate_scan) nCoeffs= 63;
2249 else nCoeffs= s->block_last_index[n];
2252 block[0] = block[0] * s->y_dc_scale;
2254 block[0] = block[0] * s->c_dc_scale;
2255 quant_matrix = s->intra_matrix;
2256 for(i=1;i<=nCoeffs;i++) {
2257 int j= s->intra_scantable.permutated[i];
2262 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2265 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2272 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2273 DCTELEM *block, int n, int qscale)
2275 int i, level, nCoeffs;
2276 const uint16_t *quant_matrix;
2279 if(s->alternate_scan) nCoeffs= 63;
2280 else nCoeffs= s->block_last_index[n];
2283 block[0] = block[0] * s->y_dc_scale;
2285 block[0] = block[0] * s->c_dc_scale;
2286 quant_matrix = s->intra_matrix;
2287 for(i=1;i<=nCoeffs;i++) {
2288 int j= s->intra_scantable.permutated[i];
2293 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2296 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2305 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2306 DCTELEM *block, int n, int qscale)
2308 int i, level, nCoeffs;
2309 const uint16_t *quant_matrix;
2312 if(s->alternate_scan) nCoeffs= 63;
2313 else nCoeffs= s->block_last_index[n];
2315 quant_matrix = s->inter_matrix;
2316 for(i=0; i<=nCoeffs; i++) {
2317 int j= s->intra_scantable.permutated[i];
2322 level = (((level << 1) + 1) * qscale *
2323 ((int) (quant_matrix[j]))) >> 4;
2326 level = (((level << 1) + 1) * qscale *
2327 ((int) (quant_matrix[j]))) >> 4;
2336 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2337 DCTELEM *block, int n, int qscale)
2339 int i, level, qmul, qadd;
2342 assert(s->block_last_index[n]>=0);
2348 block[0] = block[0] * s->y_dc_scale;
2350 block[0] = block[0] * s->c_dc_scale;
2351 qadd = (qscale - 1) | 1;
2358 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2360 for(i=1; i<=nCoeffs; i++) {
2364 level = level * qmul - qadd;
2366 level = level * qmul + qadd;
2373 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2374 DCTELEM *block, int n, int qscale)
2376 int i, level, qmul, qadd;
2379 assert(s->block_last_index[n]>=0);
2381 qadd = (qscale - 1) | 1;
2384 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2386 for(i=0; i<=nCoeffs; i++) {
2390 level = level * qmul - qadd;
2392 level = level * qmul + qadd;
2400 * set qscale and update qscale dependent variables.
2402 void ff_set_qscale(MpegEncContext * s, int qscale)
2406 else if (qscale > 31)
2410 s->chroma_qscale= s->chroma_qscale_table[qscale];
2412 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2413 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];