2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
46 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
47 DCTELEM *block, int n, int qscale);
48 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
49 DCTELEM *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
51 DCTELEM *block, int n, int qscale);
52 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
53 DCTELEM *block, int n, int qscale);
54 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
55 DCTELEM *block, int n, int qscale);
56 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
57 DCTELEM *block, int n, int qscale);
58 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
59 DCTELEM *block, int n, int qscale);
62 /* enable all paranoid tests for rounding, overflows, etc... */
68 static const uint8_t ff_default_chroma_qscale_table[32]={
69 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
70 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
73 const uint8_t ff_mpeg1_dc_scale_table[128]={
74 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 static const uint8_t mpeg2_dc_scale_table1[128]={
82 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 static const uint8_t mpeg2_dc_scale_table2[128]={
90 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
91 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 static const uint8_t mpeg2_dc_scale_table3[128]={
98 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
106 ff_mpeg1_dc_scale_table,
107 mpeg2_dc_scale_table1,
108 mpeg2_dc_scale_table2,
109 mpeg2_dc_scale_table3,
112 const enum PixelFormat ff_pixfmt_list_420[] = {
117 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
124 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
132 uint32_t tmp= *state << 8;
133 *state= tmp + *(p++);
134 if(tmp == 0x100 || p==end)
139 if (p[-1] > 1 ) p+= 3;
140 else if(p[-2] ) p+= 2;
141 else if(p[-3]|(p[-1]-1)) p++;
154 /* init common dct for both encoder and decoder */
155 av_cold int ff_dct_common_init(MpegEncContext *s)
157 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
158 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
159 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
160 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
161 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
162 if(s->flags & CODEC_FLAG_BITEXACT)
163 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
164 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
167 MPV_common_init_mmx(s);
169 MPV_common_init_axp(s);
171 MPV_common_init_mlib(s);
173 MPV_common_init_mmi(s);
175 MPV_common_init_arm(s);
177 MPV_common_init_altivec(s);
179 MPV_common_init_bfin(s);
182 /* load & permutate scantables
183 note: only wmv uses different ones
185 if(s->alternate_scan){
186 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
187 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
189 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
190 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
192 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
198 void ff_copy_picture(Picture *dst, Picture *src){
200 dst->type= FF_BUFFER_TYPE_COPY;
204 * Release a frame buffer
206 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
208 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
209 av_freep(&pic->hwaccel_picture_private);
213 * Allocate a frame buffer
215 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
219 if (s->avctx->hwaccel) {
220 assert(!pic->hwaccel_picture_private);
221 if (s->avctx->hwaccel->priv_data_size) {
222 pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
223 if (!pic->hwaccel_picture_private) {
224 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
230 r = s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
232 if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
233 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
234 av_freep(&pic->hwaccel_picture_private);
238 if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
239 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
240 free_frame_buffer(s, pic);
244 if (pic->linesize[1] != pic->linesize[2]) {
245 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
246 free_frame_buffer(s, pic);
254 * allocates a Picture
255 * The pixels are allocated/set by calling get_buffer() if shared=0
257 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
258 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
259 const int mb_array_size= s->mb_stride*s->mb_height;
260 const int b8_array_size= s->b8_stride*s->mb_height*2;
261 const int b4_array_size= s->b4_stride*s->mb_height*4;
266 assert(pic->data[0]);
267 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
268 pic->type= FF_BUFFER_TYPE_SHARED;
270 assert(!pic->data[0]);
272 if (alloc_frame_buffer(s, pic) < 0)
275 s->linesize = pic->linesize[0];
276 s->uvlinesize= pic->linesize[1];
279 if(pic->qscale_table==NULL){
281 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
282 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
283 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
286 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check
287 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table , mb_array_size * sizeof(uint8_t) , fail)
288 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
289 pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
290 if(s->out_format == FMT_H264){
292 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
293 pic->motion_val[i]= pic->motion_val_base[i]+4;
294 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
296 pic->motion_subsample_log2= 2;
297 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
299 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
300 pic->motion_val[i]= pic->motion_val_base[i]+4;
301 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
303 pic->motion_subsample_log2= 3;
305 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
306 FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
308 pic->qstride= s->mb_stride;
309 FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
312 /* It might be nicer if the application would keep track of these
313 * but it would require an API change. */
314 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
315 s->prev_pict_types[0]= s->dropable ? FF_B_TYPE : s->pict_type;
316 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == FF_B_TYPE)
317 pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
320 fail: //for the FF_ALLOCZ_OR_GOTO macro
322 free_frame_buffer(s, pic);
327 * deallocates a picture
329 static void free_picture(MpegEncContext *s, Picture *pic){
332 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
333 free_frame_buffer(s, pic);
336 av_freep(&pic->mb_var);
337 av_freep(&pic->mc_mb_var);
338 av_freep(&pic->mb_mean);
339 av_freep(&pic->mbskip_table);
340 av_freep(&pic->qscale_table);
341 av_freep(&pic->mb_type_base);
342 av_freep(&pic->dct_coeff);
343 av_freep(&pic->pan_scan);
346 av_freep(&pic->motion_val_base[i]);
347 av_freep(&pic->ref_index[i]);
350 if(pic->type == FF_BUFFER_TYPE_SHARED){
359 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
360 int y_size = s->b8_stride * (2 * s->mb_height + 1);
361 int c_size = s->mb_stride * (s->mb_height + 1);
362 int yc_size = y_size + 2 * c_size;
365 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
366 FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
367 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
369 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
370 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
371 s->me.temp= s->me.scratchpad;
372 s->rd_scratchpad= s->me.scratchpad;
373 s->b_scratchpad= s->me.scratchpad;
374 s->obmc_scratchpad= s->me.scratchpad + 16;
376 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
377 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
378 if(s->avctx->noise_reduction){
379 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
382 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
383 s->block= s->blocks[0];
386 s->pblocks[i] = &s->block[i];
389 if (s->out_format == FMT_H263) {
391 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
392 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
393 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
394 s->ac_val[2] = s->ac_val[1] + c_size;
399 return -1; //free() through MPV_common_end()
402 static void free_duplicate_context(MpegEncContext *s){
405 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
406 av_freep(&s->me.scratchpad);
410 s->obmc_scratchpad= NULL;
412 av_freep(&s->dct_error_sum);
413 av_freep(&s->me.map);
414 av_freep(&s->me.score_map);
415 av_freep(&s->blocks);
416 av_freep(&s->ac_val_base);
420 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
421 #define COPY(a) bak->a= src->a
422 COPY(allocated_edge_emu_buffer);
423 COPY(edge_emu_buffer);
428 COPY(obmc_scratchpad);
435 COPY(me.map_generation);
447 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
450 //FIXME copy only needed parts
452 backup_duplicate_context(&bak, dst);
453 memcpy(dst, src, sizeof(MpegEncContext));
454 backup_duplicate_context(dst, &bak);
456 dst->pblocks[i] = &dst->block[i];
458 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
462 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
463 * the changed fields will not depend upon the prior state of the MpegEncContext.
465 void MPV_common_defaults(MpegEncContext *s){
467 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
468 s->chroma_qscale_table= ff_default_chroma_qscale_table;
469 s->progressive_frame= 1;
470 s->progressive_sequence= 1;
471 s->picture_structure= PICT_FRAME;
473 s->coded_picture_number = 0;
474 s->picture_number = 0;
475 s->input_picture_number = 0;
477 s->picture_in_gop_number = 0;
484 * sets the given MpegEncContext to defaults for decoding.
485 * the changed fields will not depend upon the prior state of the MpegEncContext.
487 void MPV_decode_defaults(MpegEncContext *s){
488 MPV_common_defaults(s);
492 * init common structure for both encoder and decoder.
493 * this assumes that some variables like width/height are already set
495 av_cold int MPV_common_init(MpegEncContext *s)
497 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
499 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
500 s->mb_height = (s->height + 31) / 32 * 2;
501 else if (s->codec_id != CODEC_ID_H264)
502 s->mb_height = (s->height + 15) / 16;
504 if(s->avctx->pix_fmt == PIX_FMT_NONE){
505 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
509 if(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height)){
510 av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
514 if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
517 dsputil_init(&s->dsp, s->avctx);
518 ff_dct_common_init(s);
520 s->flags= s->avctx->flags;
521 s->flags2= s->avctx->flags2;
523 s->mb_width = (s->width + 15) / 16;
524 s->mb_stride = s->mb_width + 1;
525 s->b8_stride = s->mb_width*2 + 1;
526 s->b4_stride = s->mb_width*4 + 1;
527 mb_array_size= s->mb_height * s->mb_stride;
528 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
530 /* set chroma shifts */
531 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
532 &(s->chroma_y_shift) );
534 /* set default edge pos, will be overriden in decode_header if needed */
535 s->h_edge_pos= s->mb_width*16;
536 s->v_edge_pos= s->mb_height*16;
538 s->mb_num = s->mb_width * s->mb_height;
543 s->block_wrap[3]= s->b8_stride;
545 s->block_wrap[5]= s->mb_stride;
547 y_size = s->b8_stride * (2 * s->mb_height + 1);
548 c_size = s->mb_stride * (s->mb_height + 1);
549 yc_size = y_size + 2 * c_size;
551 /* convert fourcc to upper case */
552 s->codec_tag = ff_toupper4(s->avctx->codec_tag);
554 s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
556 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
558 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
559 for(y=0; y<s->mb_height; y++){
560 for(x=0; x<s->mb_width; x++){
561 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
564 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
567 /* Allocate MV tables */
568 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
569 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
570 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
571 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
572 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
573 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
574 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
575 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
576 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
577 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
578 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
579 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
581 if(s->msmpeg4_version){
582 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
584 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
586 /* Allocate MB type table */
587 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
589 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
591 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
592 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
593 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
594 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
595 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
596 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
598 if(s->avctx->noise_reduction){
599 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
602 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, MAX_PICTURE_COUNT * sizeof(Picture), fail)
603 for(i = 0; i < MAX_PICTURE_COUNT; i++) {
604 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
607 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
609 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
610 /* interlaced direct mode decoding tables */
615 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
616 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
618 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
619 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
620 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
622 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
625 if (s->out_format == FMT_H263) {
627 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
628 s->coded_block= s->coded_block_base + s->b8_stride + 1;
630 /* cbp, ac_pred, pred_dir */
631 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
632 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
635 if (s->h263_pred || s->h263_plus || !s->encoding) {
637 //MN: we need these for error resilience of intra-frames
638 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
639 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
640 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
641 s->dc_val[2] = s->dc_val[1] + c_size;
642 for(i=0;i<yc_size;i++)
643 s->dc_val_base[i] = 1024;
646 /* which mb is a intra block */
647 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
648 memset(s->mbintra_table, 1, mb_array_size);
650 /* init macroblock skip table */
651 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
652 //Note the +1 is for a quicker mpeg4 slice_end detection
653 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
655 s->parse_context.state= -1;
656 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
657 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
658 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
659 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
662 s->context_initialized = 1;
664 s->thread_context[0]= s;
665 threads = s->avctx->thread_count;
667 for(i=1; i<threads; i++){
668 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
669 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
672 for(i=0; i<threads; i++){
673 if(init_duplicate_context(s->thread_context[i], s) < 0)
675 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
676 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
685 /* init common structure for both encoder and decoder */
686 void MPV_common_end(MpegEncContext *s)
690 for(i=0; i<s->avctx->thread_count; i++){
691 free_duplicate_context(s->thread_context[i]);
693 for(i=1; i<s->avctx->thread_count; i++){
694 av_freep(&s->thread_context[i]);
697 av_freep(&s->parse_context.buffer);
698 s->parse_context.buffer_size=0;
700 av_freep(&s->mb_type);
701 av_freep(&s->p_mv_table_base);
702 av_freep(&s->b_forw_mv_table_base);
703 av_freep(&s->b_back_mv_table_base);
704 av_freep(&s->b_bidir_forw_mv_table_base);
705 av_freep(&s->b_bidir_back_mv_table_base);
706 av_freep(&s->b_direct_mv_table_base);
708 s->b_forw_mv_table= NULL;
709 s->b_back_mv_table= NULL;
710 s->b_bidir_forw_mv_table= NULL;
711 s->b_bidir_back_mv_table= NULL;
712 s->b_direct_mv_table= NULL;
716 av_freep(&s->b_field_mv_table_base[i][j][k]);
717 s->b_field_mv_table[i][j][k]=NULL;
719 av_freep(&s->b_field_select_table[i][j]);
720 av_freep(&s->p_field_mv_table_base[i][j]);
721 s->p_field_mv_table[i][j]=NULL;
723 av_freep(&s->p_field_select_table[i]);
726 av_freep(&s->dc_val_base);
727 av_freep(&s->coded_block_base);
728 av_freep(&s->mbintra_table);
729 av_freep(&s->cbp_table);
730 av_freep(&s->pred_dir_table);
732 av_freep(&s->mbskip_table);
733 av_freep(&s->prev_pict_types);
734 av_freep(&s->bitstream_buffer);
735 s->allocated_bitstream_buffer_size=0;
737 av_freep(&s->avctx->stats_out);
738 av_freep(&s->ac_stats);
739 av_freep(&s->error_status_table);
740 av_freep(&s->mb_index2xy);
741 av_freep(&s->lambda_table);
742 av_freep(&s->q_intra_matrix);
743 av_freep(&s->q_inter_matrix);
744 av_freep(&s->q_intra_matrix16);
745 av_freep(&s->q_inter_matrix16);
746 av_freep(&s->input_picture);
747 av_freep(&s->reordered_input_picture);
748 av_freep(&s->dct_offset);
751 for(i=0; i<MAX_PICTURE_COUNT; i++){
752 free_picture(s, &s->picture[i]);
755 av_freep(&s->picture);
756 s->context_initialized = 0;
759 s->current_picture_ptr= NULL;
760 s->linesize= s->uvlinesize= 0;
763 av_freep(&s->visualization_buffer[i]);
765 avcodec_default_free_buffers(s->avctx);
768 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
770 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
771 uint8_t index_run[MAX_RUN+1];
772 int last, run, level, start, end, i;
774 /* If table is static, we can quit if rl->max_level[0] is not NULL */
775 if(static_store && rl->max_level[0])
778 /* compute max_level[], max_run[] and index_run[] */
779 for(last=0;last<2;last++) {
788 memset(max_level, 0, MAX_RUN + 1);
789 memset(max_run, 0, MAX_LEVEL + 1);
790 memset(index_run, rl->n, MAX_RUN + 1);
791 for(i=start;i<end;i++) {
792 run = rl->table_run[i];
793 level = rl->table_level[i];
794 if (index_run[run] == rl->n)
796 if (level > max_level[run])
797 max_level[run] = level;
798 if (run > max_run[level])
799 max_run[level] = run;
802 rl->max_level[last] = static_store[last];
804 rl->max_level[last] = av_malloc(MAX_RUN + 1);
805 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
807 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
809 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
810 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
812 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
814 rl->index_run[last] = av_malloc(MAX_RUN + 1);
815 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
819 void init_vlc_rl(RLTable *rl)
831 for(i=0; i<rl->vlc.table_size; i++){
832 int code= rl->vlc.table[i][0];
833 int len = rl->vlc.table[i][1];
836 if(len==0){ // illegal code
839 }else if(len<0){ //more bits needed
843 if(code==rl->n){ //esc
847 run= rl->table_run [code] + 1;
848 level= rl->table_level[code] * qmul + qadd;
849 if(code >= rl->last) run+=192;
852 rl->rl_vlc[q][i].len= len;
853 rl->rl_vlc[q][i].level= level;
854 rl->rl_vlc[q][i].run= run;
859 int ff_find_unused_picture(MpegEncContext *s, int shared){
863 for(i=0; i<MAX_PICTURE_COUNT; i++){
864 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
867 for(i=0; i<MAX_PICTURE_COUNT; i++){
868 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
870 for(i=0; i<MAX_PICTURE_COUNT; i++){
871 if(s->picture[i].data[0]==NULL) return i;
875 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
876 /* We could return -1, but the codec would crash trying to draw into a
877 * non-existing frame anyway. This is safer than waiting for a random crash.
878 * Also the return of this is never useful, an encoder must only allocate
879 * as much as allowed in the specification. This has no relationship to how
880 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
881 * enough for such valid streams).
882 * Plus, a decoder has to check stream validity and remove frames if too
883 * many reference frames are around. Waiting for "OOM" is not correct at
884 * all. Similarly, missing reference frames have to be replaced by
885 * interpolated/MC frames, anything else is a bug in the codec ...
891 static void update_noise_reduction(MpegEncContext *s){
894 for(intra=0; intra<2; intra++){
895 if(s->dct_count[intra] > (1<<16)){
897 s->dct_error_sum[intra][i] >>=1;
899 s->dct_count[intra] >>= 1;
903 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
909 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
911 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
917 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
919 /* mark&release old frames */
920 if (s->pict_type != FF_B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
921 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
922 free_frame_buffer(s, s->last_picture_ptr);
924 /* release forgotten pictures */
925 /* if(mpeg124/h263) */
927 for(i=0; i<MAX_PICTURE_COUNT; i++){
928 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
929 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
930 free_frame_buffer(s, &s->picture[i]);
938 /* release non reference frames */
939 for(i=0; i<MAX_PICTURE_COUNT; i++){
940 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
941 free_frame_buffer(s, &s->picture[i]);
945 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
946 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
948 i= ff_find_unused_picture(s, 0);
954 if (s->codec_id == CODEC_ID_H264)
955 pic->reference = s->picture_structure;
956 else if (s->pict_type != FF_B_TYPE)
960 pic->coded_picture_number= s->coded_picture_number++;
962 if(ff_alloc_picture(s, pic, 0) < 0)
965 s->current_picture_ptr= pic;
966 //FIXME use only the vars from current_pic
967 s->current_picture_ptr->top_field_first= s->top_field_first;
968 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
969 if(s->picture_structure != PICT_FRAME)
970 s->current_picture_ptr->top_field_first= (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
972 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
975 s->current_picture_ptr->pict_type= s->pict_type;
976 // if(s->flags && CODEC_FLAG_QSCALE)
977 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
978 s->current_picture_ptr->key_frame= s->pict_type == FF_I_TYPE;
980 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
982 if (s->pict_type != FF_B_TYPE) {
983 s->last_picture_ptr= s->next_picture_ptr;
985 s->next_picture_ptr= s->current_picture_ptr;
987 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
988 s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
989 s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
990 s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
991 s->pict_type, s->dropable);*/
993 if(s->codec_id != CODEC_ID_H264){
994 if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && s->pict_type!=FF_I_TYPE){
995 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
996 /* Allocate a dummy frame */
997 i= ff_find_unused_picture(s, 0);
998 s->last_picture_ptr= &s->picture[i];
999 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1002 if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==FF_B_TYPE){
1003 /* Allocate a dummy frame */
1004 i= ff_find_unused_picture(s, 0);
1005 s->next_picture_ptr= &s->picture[i];
1006 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1011 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1012 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1014 assert(s->pict_type == FF_I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1016 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1019 if(s->picture_structure == PICT_BOTTOM_FIELD){
1020 s->current_picture.data[i] += s->current_picture.linesize[i];
1022 s->current_picture.linesize[i] *= 2;
1023 s->last_picture.linesize[i] *=2;
1024 s->next_picture.linesize[i] *=2;
1028 s->hurry_up= s->avctx->hurry_up;
1029 s->error_recognition= avctx->error_recognition;
1031 /* set dequantizer, we can't do it during init as it might change for mpeg4
1032 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1033 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1034 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1035 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1036 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1037 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1038 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1040 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1041 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1044 if(s->dct_error_sum){
1045 assert(s->avctx->noise_reduction && s->encoding);
1047 update_noise_reduction(s);
1050 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1051 return ff_xvmc_field_start(s, avctx);
1056 /* generic function for encode/decode called after a frame has been coded/decoded */
1057 void MPV_frame_end(MpegEncContext *s)
1060 /* draw edge for correct motion prediction if outside */
1061 //just to make sure that all data is rendered.
1062 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1063 ff_xvmc_field_end(s);
1064 }else if(!s->avctx->hwaccel
1065 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1066 && s->unrestricted_mv
1067 && s->current_picture.reference
1069 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1070 s->dsp.draw_edges(s->current_picture.data[0], s->linesize ,
1071 s->h_edge_pos , s->v_edge_pos ,
1072 EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
1073 s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize,
1074 s->h_edge_pos>>1, s->v_edge_pos>>1,
1075 EDGE_WIDTH/2, EDGE_TOP | EDGE_BOTTOM);
1076 s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize,
1077 s->h_edge_pos>>1, s->v_edge_pos>>1,
1078 EDGE_WIDTH/2, EDGE_TOP | EDGE_BOTTOM);
1082 s->last_pict_type = s->pict_type;
1083 s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
1084 if(s->pict_type!=FF_B_TYPE){
1085 s->last_non_b_pict_type= s->pict_type;
1088 /* copy back current_picture variables */
1089 for(i=0; i<MAX_PICTURE_COUNT; i++){
1090 if(s->picture[i].data[0] == s->current_picture.data[0]){
1091 s->picture[i]= s->current_picture;
1095 assert(i<MAX_PICTURE_COUNT);
1099 /* release non-reference frames */
1100 for(i=0; i<MAX_PICTURE_COUNT; i++){
1101 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1102 free_frame_buffer(s, &s->picture[i]);
1106 // clear copies, to avoid confusion
1108 memset(&s->last_picture, 0, sizeof(Picture));
1109 memset(&s->next_picture, 0, sizeof(Picture));
1110 memset(&s->current_picture, 0, sizeof(Picture));
1112 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1116 * draws an line from (ex, ey) -> (sx, sy).
1117 * @param w width of the image
1118 * @param h height of the image
1119 * @param stride stride/linesize of the image
1120 * @param color color of the arrow
1122 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1125 sx= av_clip(sx, 0, w-1);
1126 sy= av_clip(sy, 0, h-1);
1127 ex= av_clip(ex, 0, w-1);
1128 ey= av_clip(ey, 0, h-1);
1130 buf[sy*stride + sx]+= color;
1132 if(FFABS(ex - sx) > FFABS(ey - sy)){
1134 FFSWAP(int, sx, ex);
1135 FFSWAP(int, sy, ey);
1137 buf+= sx + sy*stride;
1139 f= ((ey-sy)<<16)/ex;
1140 for(x= 0; x <= ex; x++){
1143 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1144 buf[(y+1)*stride + x]+= (color* fr )>>16;
1148 FFSWAP(int, sx, ex);
1149 FFSWAP(int, sy, ey);
1151 buf+= sx + sy*stride;
1153 if(ey) f= ((ex-sx)<<16)/ey;
1155 for(y= 0; y <= ey; y++){
1158 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1159 buf[y*stride + x+1]+= (color* fr )>>16;
1165 * draws an arrow from (ex, ey) -> (sx, sy).
1166 * @param w width of the image
1167 * @param h height of the image
1168 * @param stride stride/linesize of the image
1169 * @param color color of the arrow
1171 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1174 sx= av_clip(sx, -100, w+100);
1175 sy= av_clip(sy, -100, h+100);
1176 ex= av_clip(ex, -100, w+100);
1177 ey= av_clip(ey, -100, h+100);
1182 if(dx*dx + dy*dy > 3*3){
1185 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1187 //FIXME subpixel accuracy
1188 rx= ROUNDED_DIV(rx*3<<4, length);
1189 ry= ROUNDED_DIV(ry*3<<4, length);
1191 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1192 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1194 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1198 * prints debuging info for the given picture.
1200 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1202 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1204 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1207 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1208 switch (pict->pict_type) {
1209 case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1210 case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1211 case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1212 case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1213 case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1214 case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1216 for(y=0; y<s->mb_height; y++){
1217 for(x=0; x<s->mb_width; x++){
1218 if(s->avctx->debug&FF_DEBUG_SKIP){
1219 int count= s->mbskip_table[x + y*s->mb_stride];
1220 if(count>9) count=9;
1221 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1223 if(s->avctx->debug&FF_DEBUG_QP){
1224 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1226 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1227 int mb_type= pict->mb_type[x + y*s->mb_stride];
1228 //Type & MV direction
1230 av_log(s->avctx, AV_LOG_DEBUG, "P");
1231 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1232 av_log(s->avctx, AV_LOG_DEBUG, "A");
1233 else if(IS_INTRA4x4(mb_type))
1234 av_log(s->avctx, AV_LOG_DEBUG, "i");
1235 else if(IS_INTRA16x16(mb_type))
1236 av_log(s->avctx, AV_LOG_DEBUG, "I");
1237 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1238 av_log(s->avctx, AV_LOG_DEBUG, "d");
1239 else if(IS_DIRECT(mb_type))
1240 av_log(s->avctx, AV_LOG_DEBUG, "D");
1241 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1242 av_log(s->avctx, AV_LOG_DEBUG, "g");
1243 else if(IS_GMC(mb_type))
1244 av_log(s->avctx, AV_LOG_DEBUG, "G");
1245 else if(IS_SKIP(mb_type))
1246 av_log(s->avctx, AV_LOG_DEBUG, "S");
1247 else if(!USES_LIST(mb_type, 1))
1248 av_log(s->avctx, AV_LOG_DEBUG, ">");
1249 else if(!USES_LIST(mb_type, 0))
1250 av_log(s->avctx, AV_LOG_DEBUG, "<");
1252 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1253 av_log(s->avctx, AV_LOG_DEBUG, "X");
1258 av_log(s->avctx, AV_LOG_DEBUG, "+");
1259 else if(IS_16X8(mb_type))
1260 av_log(s->avctx, AV_LOG_DEBUG, "-");
1261 else if(IS_8X16(mb_type))
1262 av_log(s->avctx, AV_LOG_DEBUG, "|");
1263 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1264 av_log(s->avctx, AV_LOG_DEBUG, " ");
1266 av_log(s->avctx, AV_LOG_DEBUG, "?");
1269 if(IS_INTERLACED(mb_type))
1270 av_log(s->avctx, AV_LOG_DEBUG, "=");
1272 av_log(s->avctx, AV_LOG_DEBUG, " ");
1274 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1276 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1280 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1281 const int shift= 1 + s->quarter_sample;
1285 int h_chroma_shift, v_chroma_shift, block_height;
1286 const int width = s->avctx->width;
1287 const int height= s->avctx->height;
1288 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1289 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1290 s->low_delay=0; //needed to see the vectors without trashing the buffers
1292 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1294 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1295 pict->data[i]= s->visualization_buffer[i];
1297 pict->type= FF_BUFFER_TYPE_COPY;
1299 block_height = 16>>v_chroma_shift;
1301 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1303 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1304 const int mb_index= mb_x + mb_y*s->mb_stride;
1305 if((s->avctx->debug_mv) && pict->motion_val){
1307 for(type=0; type<3; type++){
1310 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
1314 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
1318 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
1323 if(!USES_LIST(pict->mb_type[mb_index], direction))
1326 if(IS_8X8(pict->mb_type[mb_index])){
1329 int sx= mb_x*16 + 4 + 8*(i&1);
1330 int sy= mb_y*16 + 4 + 8*(i>>1);
1331 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1332 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1333 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1334 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1336 }else if(IS_16X8(pict->mb_type[mb_index])){
1340 int sy=mb_y*16 + 4 + 8*i;
1341 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1342 int mx=(pict->motion_val[direction][xy][0]>>shift);
1343 int my=(pict->motion_val[direction][xy][1]>>shift);
1345 if(IS_INTERLACED(pict->mb_type[mb_index]))
1348 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1350 }else if(IS_8X16(pict->mb_type[mb_index])){
1353 int sx=mb_x*16 + 4 + 8*i;
1355 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1356 int mx=(pict->motion_val[direction][xy][0]>>shift);
1357 int my=(pict->motion_val[direction][xy][1]>>shift);
1359 if(IS_INTERLACED(pict->mb_type[mb_index]))
1362 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1365 int sx= mb_x*16 + 8;
1366 int sy= mb_y*16 + 8;
1367 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1368 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1369 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1370 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1374 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1375 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1377 for(y=0; y<block_height; y++){
1378 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1379 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1382 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1383 int mb_type= pict->mb_type[mb_index];
1386 #define COLOR(theta, r)\
1387 u= (int)(128 + r*cos(theta*3.141592/180));\
1388 v= (int)(128 + r*sin(theta*3.141592/180));
1392 if(IS_PCM(mb_type)){
1394 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1396 }else if(IS_INTRA4x4(mb_type)){
1398 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1400 }else if(IS_DIRECT(mb_type)){
1402 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1404 }else if(IS_GMC(mb_type)){
1406 }else if(IS_SKIP(mb_type)){
1408 }else if(!USES_LIST(mb_type, 1)){
1410 }else if(!USES_LIST(mb_type, 0)){
1413 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1417 u*= 0x0101010101010101ULL;
1418 v*= 0x0101010101010101ULL;
1419 for(y=0; y<block_height; y++){
1420 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1421 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1425 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1426 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1427 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1429 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1431 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1433 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1434 int dm= 1 << (mv_sample_log2-2);
1436 int sx= mb_x*16 + 8*(i&1);
1437 int sy= mb_y*16 + 8*(i>>1);
1438 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1440 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1441 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1443 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1444 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1445 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1449 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1453 s->mbskip_table[mb_index]=0;
1459 static inline int hpel_motion_lowres(MpegEncContext *s,
1460 uint8_t *dest, uint8_t *src,
1461 int field_based, int field_select,
1462 int src_x, int src_y,
1463 int width, int height, int stride,
1464 int h_edge_pos, int v_edge_pos,
1465 int w, int h, h264_chroma_mc_func *pix_op,
1466 int motion_x, int motion_y)
1468 const int lowres= s->avctx->lowres;
1469 const int op_index= FFMIN(lowres, 2);
1470 const int s_mask= (2<<lowres)-1;
1474 if(s->quarter_sample){
1479 sx= motion_x & s_mask;
1480 sy= motion_y & s_mask;
1481 src_x += motion_x >> (lowres+1);
1482 src_y += motion_y >> (lowres+1);
1484 src += src_y * stride + src_x;
1486 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1487 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1488 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1489 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1490 src= s->edge_emu_buffer;
1494 sx= (sx << 2) >> lowres;
1495 sy= (sy << 2) >> lowres;
1498 pix_op[op_index](dest, src, stride, h, sx, sy);
1502 /* apply one mpeg motion vector to the three components */
1503 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1504 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1505 int field_based, int bottom_field, int field_select,
1506 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1507 int motion_x, int motion_y, int h, int mb_y)
1509 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1510 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1511 const int lowres= s->avctx->lowres;
1512 const int op_index= FFMIN(lowres, 2);
1513 const int block_s= 8>>lowres;
1514 const int s_mask= (2<<lowres)-1;
1515 const int h_edge_pos = s->h_edge_pos >> lowres;
1516 const int v_edge_pos = s->v_edge_pos >> lowres;
1517 linesize = s->current_picture.linesize[0] << field_based;
1518 uvlinesize = s->current_picture.linesize[1] << field_based;
1520 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1526 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1529 sx= motion_x & s_mask;
1530 sy= motion_y & s_mask;
1531 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1532 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1534 if (s->out_format == FMT_H263) {
1535 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1536 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1539 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1542 uvsx = (2*mx) & s_mask;
1543 uvsy = (2*my) & s_mask;
1544 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1545 uvsrc_y = mb_y*block_s + (my >> lowres);
1551 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1552 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1555 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1556 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1557 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1559 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1560 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1561 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1562 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1563 ptr_y = s->edge_emu_buffer;
1564 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1565 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1566 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1567 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1568 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1569 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1575 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
1576 dest_y += s->linesize;
1577 dest_cb+= s->uvlinesize;
1578 dest_cr+= s->uvlinesize;
1582 ptr_y += s->linesize;
1583 ptr_cb+= s->uvlinesize;
1584 ptr_cr+= s->uvlinesize;
1587 sx= (sx << 2) >> lowres;
1588 sy= (sy << 2) >> lowres;
1589 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1591 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1592 uvsx= (uvsx << 2) >> lowres;
1593 uvsy= (uvsy << 2) >> lowres;
1594 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1595 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1597 //FIXME h261 lowres loop filter
1600 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1601 uint8_t *dest_cb, uint8_t *dest_cr,
1602 uint8_t **ref_picture,
1603 h264_chroma_mc_func *pix_op,
1605 const int lowres= s->avctx->lowres;
1606 const int op_index= FFMIN(lowres, 2);
1607 const int block_s= 8>>lowres;
1608 const int s_mask= (2<<lowres)-1;
1609 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1610 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1611 int emu=0, src_x, src_y, offset, sx, sy;
1614 if(s->quarter_sample){
1619 /* In case of 8X8, we construct a single chroma motion vector
1620 with a special rounding */
1621 mx= ff_h263_round_chroma(mx);
1622 my= ff_h263_round_chroma(my);
1626 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1627 src_y = s->mb_y*block_s + (my >> (lowres+1));
1629 offset = src_y * s->uvlinesize + src_x;
1630 ptr = ref_picture[1] + offset;
1631 if(s->flags&CODEC_FLAG_EMU_EDGE){
1632 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1633 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1634 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1635 ptr= s->edge_emu_buffer;
1639 sx= (sx << 2) >> lowres;
1640 sy= (sy << 2) >> lowres;
1641 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1643 ptr = ref_picture[2] + offset;
1645 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1646 ptr= s->edge_emu_buffer;
1648 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1652 * motion compensation of a single macroblock
1654 * @param dest_y luma destination pointer
1655 * @param dest_cb chroma cb/u destination pointer
1656 * @param dest_cr chroma cr/v destination pointer
1657 * @param dir direction (0->forward, 1->backward)
1658 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1659 * @param pix_op halfpel motion compensation function (average or put normally)
1660 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1662 static inline void MPV_motion_lowres(MpegEncContext *s,
1663 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1664 int dir, uint8_t **ref_picture,
1665 h264_chroma_mc_func *pix_op)
1669 const int lowres= s->avctx->lowres;
1670 const int block_s= 8>>lowres;
1675 switch(s->mv_type) {
1677 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1679 ref_picture, pix_op,
1680 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1686 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1687 ref_picture[0], 0, 0,
1688 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1689 s->width, s->height, s->linesize,
1690 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1691 block_s, block_s, pix_op,
1692 s->mv[dir][i][0], s->mv[dir][i][1]);
1694 mx += s->mv[dir][i][0];
1695 my += s->mv[dir][i][1];
1698 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1699 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1702 if (s->picture_structure == PICT_FRAME) {
1704 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1705 1, 0, s->field_select[dir][0],
1706 ref_picture, pix_op,
1707 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1709 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1710 1, 1, s->field_select[dir][1],
1711 ref_picture, pix_op,
1712 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1714 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){
1715 ref_picture= s->current_picture_ptr->data;
1718 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1719 0, 0, s->field_select[dir][0],
1720 ref_picture, pix_op,
1721 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1726 uint8_t ** ref2picture;
1728 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == FF_B_TYPE || s->first_field){
1729 ref2picture= ref_picture;
1731 ref2picture= s->current_picture_ptr->data;
1734 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1735 0, 0, s->field_select[dir][i],
1736 ref2picture, pix_op,
1737 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1739 dest_y += 2*block_s*s->linesize;
1740 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1741 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1745 if(s->picture_structure == PICT_FRAME){
1749 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1751 ref_picture, pix_op,
1752 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1754 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1758 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1759 0, 0, s->picture_structure != i+1,
1760 ref_picture, pix_op,
1761 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1763 // after put we make avg of the same block
1764 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1766 //opposite parity is always in the same frame if this is second field
1767 if(!s->first_field){
1768 ref_picture = s->current_picture_ptr->data;
1777 /* put block[] to dest[] */
1778 static inline void put_dct(MpegEncContext *s,
1779 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1781 s->dct_unquantize_intra(s, block, i, qscale);
1782 s->dsp.idct_put (dest, line_size, block);
1785 /* add block[] to dest[] */
1786 static inline void add_dct(MpegEncContext *s,
1787 DCTELEM *block, int i, uint8_t *dest, int line_size)
1789 if (s->block_last_index[i] >= 0) {
1790 s->dsp.idct_add (dest, line_size, block);
1794 static inline void add_dequant_dct(MpegEncContext *s,
1795 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1797 if (s->block_last_index[i] >= 0) {
1798 s->dct_unquantize_inter(s, block, i, qscale);
1800 s->dsp.idct_add (dest, line_size, block);
1805 * cleans dc, ac, coded_block for the current non intra MB
1807 void ff_clean_intra_table_entries(MpegEncContext *s)
1809 int wrap = s->b8_stride;
1810 int xy = s->block_index[0];
1813 s->dc_val[0][xy + 1 ] =
1814 s->dc_val[0][xy + wrap] =
1815 s->dc_val[0][xy + 1 + wrap] = 1024;
1817 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1818 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1819 if (s->msmpeg4_version>=3) {
1820 s->coded_block[xy ] =
1821 s->coded_block[xy + 1 ] =
1822 s->coded_block[xy + wrap] =
1823 s->coded_block[xy + 1 + wrap] = 0;
1826 wrap = s->mb_stride;
1827 xy = s->mb_x + s->mb_y * wrap;
1829 s->dc_val[2][xy] = 1024;
1831 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1832 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1834 s->mbintra_table[xy]= 0;
1837 /* generic function called after a macroblock has been parsed by the
1838 decoder or after it has been encoded by the encoder.
1840 Important variables used:
1841 s->mb_intra : true if intra macroblock
1842 s->mv_dir : motion vector direction
1843 s->mv_type : motion vector type
1844 s->mv : motion vector
1845 s->interlaced_dct : true if interlaced dct used (mpeg2)
1847 static av_always_inline
1848 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
1849 int lowres_flag, int is_mpeg12)
1851 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1852 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1853 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1857 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1858 /* save DCT coefficients */
1860 DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
1863 *dct++ = block[i][s->dsp.idct_permutation[j]];
1866 s->current_picture.qscale_table[mb_xy]= s->qscale;
1868 /* update DC predictors for P macroblocks */
1870 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1871 if(s->mbintra_table[mb_xy])
1872 ff_clean_intra_table_entries(s);
1876 s->last_dc[2] = 128 << s->intra_dc_precision;
1879 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1880 s->mbintra_table[mb_xy]=1;
1882 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
1883 uint8_t *dest_y, *dest_cb, *dest_cr;
1884 int dct_linesize, dct_offset;
1885 op_pixels_func (*op_pix)[4];
1886 qpel_mc_func (*op_qpix)[16];
1887 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
1888 const int uvlinesize= s->current_picture.linesize[1];
1889 const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
1890 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
1892 /* avoid copy if macroblock skipped in last frame too */
1893 /* skip only during decoding as we might trash the buffers during encoding a bit */
1895 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1896 const int age= s->current_picture.age;
1900 if (s->mb_skipped) {
1902 assert(s->pict_type!=FF_I_TYPE);
1904 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
1905 if(*mbskip_ptr >99) *mbskip_ptr= 99;
1907 /* if previous was skipped too, then nothing to do ! */
1908 if (*mbskip_ptr >= age && s->current_picture.reference){
1911 } else if(!s->current_picture.reference){
1912 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
1913 if(*mbskip_ptr >99) *mbskip_ptr= 99;
1915 *mbskip_ptr = 0; /* not skipped */
1919 dct_linesize = linesize << s->interlaced_dct;
1920 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
1924 dest_cb= s->dest[1];
1925 dest_cr= s->dest[2];
1927 dest_y = s->b_scratchpad;
1928 dest_cb= s->b_scratchpad+16*linesize;
1929 dest_cr= s->b_scratchpad+32*linesize;
1933 /* motion handling */
1934 /* decoding or more than one mb_type (MC was already done otherwise) */
1937 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
1939 if (s->mv_dir & MV_DIR_FORWARD) {
1940 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
1941 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
1943 if (s->mv_dir & MV_DIR_BACKWARD) {
1944 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
1947 op_qpix= s->me.qpel_put;
1948 if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
1949 op_pix = s->dsp.put_pixels_tab;
1951 op_pix = s->dsp.put_no_rnd_pixels_tab;
1953 if (s->mv_dir & MV_DIR_FORWARD) {
1954 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
1955 op_pix = s->dsp.avg_pixels_tab;
1956 op_qpix= s->me.qpel_avg;
1958 if (s->mv_dir & MV_DIR_BACKWARD) {
1959 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
1964 /* skip dequant / idct if we are really late ;) */
1965 if(s->hurry_up>1) goto skip_idct;
1966 if(s->avctx->skip_idct){
1967 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
1968 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
1969 || s->avctx->skip_idct >= AVDISCARD_ALL)
1973 /* add dct residue */
1974 if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
1975 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
1976 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1977 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1978 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1979 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1981 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1982 if (s->chroma_y_shift){
1983 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1984 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1988 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1989 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1990 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1991 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1994 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
1995 add_dct(s, block[0], 0, dest_y , dct_linesize);
1996 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
1997 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
1998 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2000 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2001 if(s->chroma_y_shift){//Chroma420
2002 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2003 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2006 dct_linesize = uvlinesize << s->interlaced_dct;
2007 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2009 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2010 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2011 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2012 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2013 if(!s->chroma_x_shift){//Chroma444
2014 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2015 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2016 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2017 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2022 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2023 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2026 /* dct only in intra block */
2027 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2028 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2029 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2030 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2031 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2033 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2034 if(s->chroma_y_shift){
2035 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2036 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2040 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2041 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2042 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2043 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2047 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2048 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2049 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2050 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2052 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2053 if(s->chroma_y_shift){
2054 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2055 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2058 dct_linesize = uvlinesize << s->interlaced_dct;
2059 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2061 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2062 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2063 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2064 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2065 if(!s->chroma_x_shift){//Chroma444
2066 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2067 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2068 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2069 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2077 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2078 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2079 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2084 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2086 if(s->out_format == FMT_MPEG1) {
2087 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2088 else MPV_decode_mb_internal(s, block, 0, 1);
2091 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2092 else MPV_decode_mb_internal(s, block, 0, 0);
2097 * @param h is the normal height, this will be reduced automatically if needed for the last row
2099 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2100 if (s->avctx->draw_horiz_band) {
2102 const int field_pic= s->picture_structure != PICT_FRAME;
2105 h= FFMIN(h, (s->avctx->height>>field_pic) - y);
2107 if(field_pic && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)){
2110 if(s->first_field) return;
2113 if(s->pict_type==FF_B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2114 src= (AVFrame*)s->current_picture_ptr;
2115 else if(s->last_picture_ptr)
2116 src= (AVFrame*)s->last_picture_ptr;
2120 if(s->pict_type==FF_B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2126 offset[0]= y * s->linesize;
2128 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2134 s->avctx->draw_horiz_band(s->avctx, src, offset,
2135 y, s->picture_structure, h);
2139 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2140 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2141 const int uvlinesize= s->current_picture.linesize[1];
2142 const int mb_size= 4 - s->avctx->lowres;
2144 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2145 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2146 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2147 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2148 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2149 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2150 //block_index is not used by mpeg2, so it is not affected by chroma_format
2152 s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
2153 s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2154 s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2156 if(!(s->pict_type==FF_B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2158 if(s->picture_structure==PICT_FRAME){
2159 s->dest[0] += s->mb_y * linesize << mb_size;
2160 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2161 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2163 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2164 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2165 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2166 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2171 void ff_mpeg_flush(AVCodecContext *avctx){
2173 MpegEncContext *s = avctx->priv_data;
2175 if(s==NULL || s->picture==NULL)
2178 for(i=0; i<MAX_PICTURE_COUNT; i++){
2179 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
2180 || s->picture[i].type == FF_BUFFER_TYPE_USER))
2181 free_frame_buffer(s, &s->picture[i]);
2183 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2185 s->mb_x= s->mb_y= 0;
2188 s->parse_context.state= -1;
2189 s->parse_context.frame_start_found= 0;
2190 s->parse_context.overread= 0;
2191 s->parse_context.overread_index= 0;
2192 s->parse_context.index= 0;
2193 s->parse_context.last_index= 0;
2194 s->bitstream_buffer_size=0;
2198 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2199 DCTELEM *block, int n, int qscale)
2201 int i, level, nCoeffs;
2202 const uint16_t *quant_matrix;
2204 nCoeffs= s->block_last_index[n];
2207 block[0] = block[0] * s->y_dc_scale;
2209 block[0] = block[0] * s->c_dc_scale;
2210 /* XXX: only mpeg1 */
2211 quant_matrix = s->intra_matrix;
2212 for(i=1;i<=nCoeffs;i++) {
2213 int j= s->intra_scantable.permutated[i];
2218 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2219 level = (level - 1) | 1;
2222 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2223 level = (level - 1) | 1;
2230 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2231 DCTELEM *block, int n, int qscale)
2233 int i, level, nCoeffs;
2234 const uint16_t *quant_matrix;
2236 nCoeffs= s->block_last_index[n];
2238 quant_matrix = s->inter_matrix;
2239 for(i=0; i<=nCoeffs; i++) {
2240 int j= s->intra_scantable.permutated[i];
2245 level = (((level << 1) + 1) * qscale *
2246 ((int) (quant_matrix[j]))) >> 4;
2247 level = (level - 1) | 1;
2250 level = (((level << 1) + 1) * qscale *
2251 ((int) (quant_matrix[j]))) >> 4;
2252 level = (level - 1) | 1;
2259 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2260 DCTELEM *block, int n, int qscale)
2262 int i, level, nCoeffs;
2263 const uint16_t *quant_matrix;
2265 if(s->alternate_scan) nCoeffs= 63;
2266 else nCoeffs= s->block_last_index[n];
2269 block[0] = block[0] * s->y_dc_scale;
2271 block[0] = block[0] * s->c_dc_scale;
2272 quant_matrix = s->intra_matrix;
2273 for(i=1;i<=nCoeffs;i++) {
2274 int j= s->intra_scantable.permutated[i];
2279 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2282 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2289 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2290 DCTELEM *block, int n, int qscale)
2292 int i, level, nCoeffs;
2293 const uint16_t *quant_matrix;
2296 if(s->alternate_scan) nCoeffs= 63;
2297 else nCoeffs= s->block_last_index[n];
2300 block[0] = block[0] * s->y_dc_scale;
2302 block[0] = block[0] * s->c_dc_scale;
2303 quant_matrix = s->intra_matrix;
2304 for(i=1;i<=nCoeffs;i++) {
2305 int j= s->intra_scantable.permutated[i];
2310 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2313 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2322 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2323 DCTELEM *block, int n, int qscale)
2325 int i, level, nCoeffs;
2326 const uint16_t *quant_matrix;
2329 if(s->alternate_scan) nCoeffs= 63;
2330 else nCoeffs= s->block_last_index[n];
2332 quant_matrix = s->inter_matrix;
2333 for(i=0; i<=nCoeffs; i++) {
2334 int j= s->intra_scantable.permutated[i];
2339 level = (((level << 1) + 1) * qscale *
2340 ((int) (quant_matrix[j]))) >> 4;
2343 level = (((level << 1) + 1) * qscale *
2344 ((int) (quant_matrix[j]))) >> 4;
2353 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2354 DCTELEM *block, int n, int qscale)
2356 int i, level, qmul, qadd;
2359 assert(s->block_last_index[n]>=0);
2365 block[0] = block[0] * s->y_dc_scale;
2367 block[0] = block[0] * s->c_dc_scale;
2368 qadd = (qscale - 1) | 1;
2375 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2377 for(i=1; i<=nCoeffs; i++) {
2381 level = level * qmul - qadd;
2383 level = level * qmul + qadd;
2390 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2391 DCTELEM *block, int n, int qscale)
2393 int i, level, qmul, qadd;
2396 assert(s->block_last_index[n]>=0);
2398 qadd = (qscale - 1) | 1;
2401 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2403 for(i=0; i<=nCoeffs; i++) {
2407 level = level * qmul - qadd;
2409 level = level * qmul + qadd;
2417 * set qscale and update qscale dependent variables.
2419 void ff_set_qscale(MpegEncContext * s, int qscale)
2423 else if (qscale > 31)
2427 s->chroma_qscale= s->chroma_qscale_table[qscale];
2429 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2430 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];