2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
46 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
47 DCTELEM *block, int n, int qscale);
48 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
49 DCTELEM *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
51 DCTELEM *block, int n, int qscale);
52 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
53 DCTELEM *block, int n, int qscale);
54 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
55 DCTELEM *block, int n, int qscale);
56 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
57 DCTELEM *block, int n, int qscale);
58 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
59 DCTELEM *block, int n, int qscale);
62 /* enable all paranoid tests for rounding, overflows, etc... */
68 static const uint8_t ff_default_chroma_qscale_table[32]={
69 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
70 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
73 const uint8_t ff_mpeg1_dc_scale_table[128]={
74 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 static const uint8_t mpeg2_dc_scale_table1[128]={
82 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 static const uint8_t mpeg2_dc_scale_table2[128]={
90 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
91 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 static const uint8_t mpeg2_dc_scale_table3[128]={
98 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
106 ff_mpeg1_dc_scale_table,
107 mpeg2_dc_scale_table1,
108 mpeg2_dc_scale_table2,
109 mpeg2_dc_scale_table3,
112 const enum PixelFormat ff_pixfmt_list_420[] = {
117 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
124 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
132 uint32_t tmp= *state << 8;
133 *state= tmp + *(p++);
134 if(tmp == 0x100 || p==end)
139 if (p[-1] > 1 ) p+= 3;
140 else if(p[-2] ) p+= 2;
141 else if(p[-3]|(p[-1]-1)) p++;
154 /* init common dct for both encoder and decoder */
155 av_cold int ff_dct_common_init(MpegEncContext *s)
157 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
158 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
159 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
160 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
161 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
162 if(s->flags & CODEC_FLAG_BITEXACT)
163 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
164 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
167 MPV_common_init_mmx(s);
169 MPV_common_init_axp(s);
171 MPV_common_init_mlib(s);
173 MPV_common_init_mmi(s);
175 MPV_common_init_arm(s);
177 MPV_common_init_altivec(s);
179 MPV_common_init_bfin(s);
182 /* load & permutate scantables
183 note: only wmv uses different ones
185 if(s->alternate_scan){
186 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
187 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
189 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
190 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
192 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
198 void ff_copy_picture(Picture *dst, Picture *src){
200 dst->type= FF_BUFFER_TYPE_COPY;
204 * Release a frame buffer
206 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
208 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
209 av_freep(&pic->hwaccel_picture_private);
213 * Allocate a frame buffer
215 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
219 if (s->avctx->hwaccel) {
220 assert(!pic->hwaccel_picture_private);
221 if (s->avctx->hwaccel->priv_data_size) {
222 pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
223 if (!pic->hwaccel_picture_private) {
224 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
230 r = s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
232 if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
233 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
234 av_freep(&pic->hwaccel_picture_private);
238 if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
239 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
240 free_frame_buffer(s, pic);
244 if (pic->linesize[1] != pic->linesize[2]) {
245 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
246 free_frame_buffer(s, pic);
254 * allocates a Picture
255 * The pixels are allocated/set by calling get_buffer() if shared=0
257 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
258 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
259 const int mb_array_size= s->mb_stride*s->mb_height;
260 const int b8_array_size= s->b8_stride*s->mb_height*2;
261 const int b4_array_size= s->b4_stride*s->mb_height*4;
266 assert(pic->data[0]);
267 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
268 pic->type= FF_BUFFER_TYPE_SHARED;
270 assert(!pic->data[0]);
272 if (alloc_frame_buffer(s, pic) < 0)
275 s->linesize = pic->linesize[0];
276 s->uvlinesize= pic->linesize[1];
279 if(pic->qscale_table==NULL){
281 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
282 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
283 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
286 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check
287 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table , mb_array_size * sizeof(uint8_t) , fail)
288 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
289 pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
290 if(s->out_format == FMT_H264){
292 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
293 pic->motion_val[i]= pic->motion_val_base[i]+4;
294 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
296 pic->motion_subsample_log2= 2;
297 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
299 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
300 pic->motion_val[i]= pic->motion_val_base[i]+4;
301 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
303 pic->motion_subsample_log2= 3;
305 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
306 FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
308 pic->qstride= s->mb_stride;
309 FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
312 /* It might be nicer if the application would keep track of these
313 * but it would require an API change. */
314 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
315 s->prev_pict_types[0]= s->dropable ? FF_B_TYPE : s->pict_type;
316 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == FF_B_TYPE)
317 pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
320 fail: //for the FF_ALLOCZ_OR_GOTO macro
322 free_frame_buffer(s, pic);
327 * deallocates a picture
329 static void free_picture(MpegEncContext *s, Picture *pic){
332 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
333 free_frame_buffer(s, pic);
336 av_freep(&pic->mb_var);
337 av_freep(&pic->mc_mb_var);
338 av_freep(&pic->mb_mean);
339 av_freep(&pic->mbskip_table);
340 av_freep(&pic->qscale_table);
341 av_freep(&pic->mb_type_base);
342 av_freep(&pic->dct_coeff);
343 av_freep(&pic->pan_scan);
346 av_freep(&pic->motion_val_base[i]);
347 av_freep(&pic->ref_index[i]);
350 if(pic->type == FF_BUFFER_TYPE_SHARED){
359 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
360 int y_size = s->b8_stride * (2 * s->mb_height + 1);
361 int c_size = s->mb_stride * (s->mb_height + 1);
362 int yc_size = y_size + 2 * c_size;
365 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
366 FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
367 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
369 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
370 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
371 s->me.temp= s->me.scratchpad;
372 s->rd_scratchpad= s->me.scratchpad;
373 s->b_scratchpad= s->me.scratchpad;
374 s->obmc_scratchpad= s->me.scratchpad + 16;
376 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
377 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
378 if(s->avctx->noise_reduction){
379 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
382 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
383 s->block= s->blocks[0];
386 s->pblocks[i] = &s->block[i];
389 if (s->out_format == FMT_H263) {
391 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
392 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
393 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
394 s->ac_val[2] = s->ac_val[1] + c_size;
399 return -1; //free() through MPV_common_end()
402 static void free_duplicate_context(MpegEncContext *s){
405 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
406 av_freep(&s->me.scratchpad);
410 s->obmc_scratchpad= NULL;
412 av_freep(&s->dct_error_sum);
413 av_freep(&s->me.map);
414 av_freep(&s->me.score_map);
415 av_freep(&s->blocks);
416 av_freep(&s->ac_val_base);
420 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
421 #define COPY(a) bak->a= src->a
422 COPY(allocated_edge_emu_buffer);
423 COPY(edge_emu_buffer);
428 COPY(obmc_scratchpad);
435 COPY(me.map_generation);
447 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
450 //FIXME copy only needed parts
452 backup_duplicate_context(&bak, dst);
453 memcpy(dst, src, sizeof(MpegEncContext));
454 backup_duplicate_context(dst, &bak);
456 dst->pblocks[i] = &dst->block[i];
458 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
462 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
463 * the changed fields will not depend upon the prior state of the MpegEncContext.
465 void MPV_common_defaults(MpegEncContext *s){
467 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
468 s->chroma_qscale_table= ff_default_chroma_qscale_table;
469 s->progressive_frame= 1;
470 s->progressive_sequence= 1;
471 s->picture_structure= PICT_FRAME;
473 s->coded_picture_number = 0;
474 s->picture_number = 0;
475 s->input_picture_number = 0;
477 s->picture_in_gop_number = 0;
484 * sets the given MpegEncContext to defaults for decoding.
485 * the changed fields will not depend upon the prior state of the MpegEncContext.
487 void MPV_decode_defaults(MpegEncContext *s){
488 MPV_common_defaults(s);
492 * init common structure for both encoder and decoder.
493 * this assumes that some variables like width/height are already set
495 av_cold int MPV_common_init(MpegEncContext *s)
497 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
499 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
500 s->mb_height = (s->height + 31) / 32 * 2;
501 else if (s->codec_id != CODEC_ID_H264)
502 s->mb_height = (s->height + 15) / 16;
504 if(s->avctx->pix_fmt == PIX_FMT_NONE){
505 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
509 if(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height)){
510 av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
514 if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
517 dsputil_init(&s->dsp, s->avctx);
518 ff_dct_common_init(s);
520 s->flags= s->avctx->flags;
521 s->flags2= s->avctx->flags2;
523 s->mb_width = (s->width + 15) / 16;
524 s->mb_stride = s->mb_width + 1;
525 s->b8_stride = s->mb_width*2 + 1;
526 s->b4_stride = s->mb_width*4 + 1;
527 mb_array_size= s->mb_height * s->mb_stride;
528 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
530 /* set chroma shifts */
531 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
532 &(s->chroma_y_shift) );
534 /* set default edge pos, will be overriden in decode_header if needed */
535 s->h_edge_pos= s->mb_width*16;
536 s->v_edge_pos= s->mb_height*16;
538 s->mb_num = s->mb_width * s->mb_height;
543 s->block_wrap[3]= s->b8_stride;
545 s->block_wrap[5]= s->mb_stride;
547 y_size = s->b8_stride * (2 * s->mb_height + 1);
548 c_size = s->mb_stride * (s->mb_height + 1);
549 yc_size = y_size + 2 * c_size;
551 /* convert fourcc to upper case */
552 s->codec_tag = ff_toupper4(s->avctx->codec_tag);
554 s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
556 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
558 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
559 for(y=0; y<s->mb_height; y++){
560 for(x=0; x<s->mb_width; x++){
561 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
564 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
567 /* Allocate MV tables */
568 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
569 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
570 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
571 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
572 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
573 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
574 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
575 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
576 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
577 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
578 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
579 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
581 if(s->msmpeg4_version){
582 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
584 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
586 /* Allocate MB type table */
587 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
589 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
591 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
592 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
593 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
594 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
595 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
596 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
598 if(s->avctx->noise_reduction){
599 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
602 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, MAX_PICTURE_COUNT * sizeof(Picture), fail)
603 for(i = 0; i < MAX_PICTURE_COUNT; i++) {
604 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
607 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
609 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
610 /* interlaced direct mode decoding tables */
615 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
616 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
618 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
619 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
620 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
622 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
625 if (s->out_format == FMT_H263) {
627 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
628 s->coded_block= s->coded_block_base + s->b8_stride + 1;
630 /* cbp, ac_pred, pred_dir */
631 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
632 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
635 if (s->h263_pred || s->h263_plus || !s->encoding) {
637 //MN: we need these for error resilience of intra-frames
638 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
639 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
640 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
641 s->dc_val[2] = s->dc_val[1] + c_size;
642 for(i=0;i<yc_size;i++)
643 s->dc_val_base[i] = 1024;
646 /* which mb is a intra block */
647 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
648 memset(s->mbintra_table, 1, mb_array_size);
650 /* init macroblock skip table */
651 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
652 //Note the +1 is for a quicker mpeg4 slice_end detection
653 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
655 s->parse_context.state= -1;
656 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
657 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
658 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
659 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
662 s->context_initialized = 1;
664 s->thread_context[0]= s;
665 threads = s->avctx->thread_count;
667 for(i=1; i<threads; i++){
668 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
669 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
672 for(i=0; i<threads; i++){
673 if(init_duplicate_context(s->thread_context[i], s) < 0)
675 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
676 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
685 /* init common structure for both encoder and decoder */
686 void MPV_common_end(MpegEncContext *s)
690 for(i=0; i<s->avctx->thread_count; i++){
691 free_duplicate_context(s->thread_context[i]);
693 for(i=1; i<s->avctx->thread_count; i++){
694 av_freep(&s->thread_context[i]);
697 av_freep(&s->parse_context.buffer);
698 s->parse_context.buffer_size=0;
700 av_freep(&s->mb_type);
701 av_freep(&s->p_mv_table_base);
702 av_freep(&s->b_forw_mv_table_base);
703 av_freep(&s->b_back_mv_table_base);
704 av_freep(&s->b_bidir_forw_mv_table_base);
705 av_freep(&s->b_bidir_back_mv_table_base);
706 av_freep(&s->b_direct_mv_table_base);
708 s->b_forw_mv_table= NULL;
709 s->b_back_mv_table= NULL;
710 s->b_bidir_forw_mv_table= NULL;
711 s->b_bidir_back_mv_table= NULL;
712 s->b_direct_mv_table= NULL;
716 av_freep(&s->b_field_mv_table_base[i][j][k]);
717 s->b_field_mv_table[i][j][k]=NULL;
719 av_freep(&s->b_field_select_table[i][j]);
720 av_freep(&s->p_field_mv_table_base[i][j]);
721 s->p_field_mv_table[i][j]=NULL;
723 av_freep(&s->p_field_select_table[i]);
726 av_freep(&s->dc_val_base);
727 av_freep(&s->coded_block_base);
728 av_freep(&s->mbintra_table);
729 av_freep(&s->cbp_table);
730 av_freep(&s->pred_dir_table);
732 av_freep(&s->mbskip_table);
733 av_freep(&s->prev_pict_types);
734 av_freep(&s->bitstream_buffer);
735 s->allocated_bitstream_buffer_size=0;
737 av_freep(&s->avctx->stats_out);
738 av_freep(&s->ac_stats);
739 av_freep(&s->error_status_table);
740 av_freep(&s->mb_index2xy);
741 av_freep(&s->lambda_table);
742 av_freep(&s->q_intra_matrix);
743 av_freep(&s->q_inter_matrix);
744 av_freep(&s->q_intra_matrix16);
745 av_freep(&s->q_inter_matrix16);
746 av_freep(&s->input_picture);
747 av_freep(&s->reordered_input_picture);
748 av_freep(&s->dct_offset);
751 for(i=0; i<MAX_PICTURE_COUNT; i++){
752 free_picture(s, &s->picture[i]);
755 av_freep(&s->picture);
756 s->context_initialized = 0;
759 s->current_picture_ptr= NULL;
760 s->linesize= s->uvlinesize= 0;
763 av_freep(&s->visualization_buffer[i]);
765 avcodec_default_free_buffers(s->avctx);
768 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
770 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
771 uint8_t index_run[MAX_RUN+1];
772 int last, run, level, start, end, i;
774 /* If table is static, we can quit if rl->max_level[0] is not NULL */
775 if(static_store && rl->max_level[0])
778 /* compute max_level[], max_run[] and index_run[] */
779 for(last=0;last<2;last++) {
788 memset(max_level, 0, MAX_RUN + 1);
789 memset(max_run, 0, MAX_LEVEL + 1);
790 memset(index_run, rl->n, MAX_RUN + 1);
791 for(i=start;i<end;i++) {
792 run = rl->table_run[i];
793 level = rl->table_level[i];
794 if (index_run[run] == rl->n)
796 if (level > max_level[run])
797 max_level[run] = level;
798 if (run > max_run[level])
799 max_run[level] = run;
802 rl->max_level[last] = static_store[last];
804 rl->max_level[last] = av_malloc(MAX_RUN + 1);
805 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
807 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
809 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
810 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
812 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
814 rl->index_run[last] = av_malloc(MAX_RUN + 1);
815 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
819 void init_vlc_rl(RLTable *rl)
831 for(i=0; i<rl->vlc.table_size; i++){
832 int code= rl->vlc.table[i][0];
833 int len = rl->vlc.table[i][1];
836 if(len==0){ // illegal code
839 }else if(len<0){ //more bits needed
843 if(code==rl->n){ //esc
847 run= rl->table_run [code] + 1;
848 level= rl->table_level[code] * qmul + qadd;
849 if(code >= rl->last) run+=192;
852 rl->rl_vlc[q][i].len= len;
853 rl->rl_vlc[q][i].level= level;
854 rl->rl_vlc[q][i].run= run;
859 int ff_find_unused_picture(MpegEncContext *s, int shared){
863 for(i=0; i<MAX_PICTURE_COUNT; i++){
864 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
867 for(i=0; i<MAX_PICTURE_COUNT; i++){
868 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
870 for(i=0; i<MAX_PICTURE_COUNT; i++){
871 if(s->picture[i].data[0]==NULL) return i;
875 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
876 /* We could return -1, but the codec would crash trying to draw into a
877 * non-existing frame anyway. This is safer than waiting for a random crash.
878 * Also the return of this is never useful, an encoder must only allocate
879 * as much as allowed in the specification. This has no relationship to how
880 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
881 * enough for such valid streams).
882 * Plus, a decoder has to check stream validity and remove frames if too
883 * many reference frames are around. Waiting for "OOM" is not correct at
884 * all. Similarly, missing reference frames have to be replaced by
885 * interpolated/MC frames, anything else is a bug in the codec ...
891 static void update_noise_reduction(MpegEncContext *s){
894 for(intra=0; intra<2; intra++){
895 if(s->dct_count[intra] > (1<<16)){
897 s->dct_error_sum[intra][i] >>=1;
899 s->dct_count[intra] >>= 1;
903 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
909 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
911 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
917 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
919 /* mark&release old frames */
920 if (s->pict_type != FF_B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
921 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
922 free_frame_buffer(s, s->last_picture_ptr);
924 /* release forgotten pictures */
925 /* if(mpeg124/h263) */
927 for(i=0; i<MAX_PICTURE_COUNT; i++){
928 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
929 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
930 free_frame_buffer(s, &s->picture[i]);
938 /* release non reference frames */
939 for(i=0; i<MAX_PICTURE_COUNT; i++){
940 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
941 free_frame_buffer(s, &s->picture[i]);
945 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
946 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
948 i= ff_find_unused_picture(s, 0);
954 if (s->codec_id == CODEC_ID_H264)
955 pic->reference = s->picture_structure;
956 else if (s->pict_type != FF_B_TYPE)
960 pic->coded_picture_number= s->coded_picture_number++;
962 if(ff_alloc_picture(s, pic, 0) < 0)
965 s->current_picture_ptr= pic;
966 //FIXME use only the vars from current_pic
967 s->current_picture_ptr->top_field_first= s->top_field_first;
968 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
969 if(s->picture_structure != PICT_FRAME)
970 s->current_picture_ptr->top_field_first= (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
972 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
975 s->current_picture_ptr->pict_type= s->pict_type;
976 // if(s->flags && CODEC_FLAG_QSCALE)
977 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
978 s->current_picture_ptr->key_frame= s->pict_type == FF_I_TYPE;
980 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
982 if (s->pict_type != FF_B_TYPE) {
983 s->last_picture_ptr= s->next_picture_ptr;
985 s->next_picture_ptr= s->current_picture_ptr;
987 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
988 s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
989 s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
990 s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
991 s->pict_type, s->dropable);*/
993 if(s->codec_id != CODEC_ID_H264){
994 if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && s->pict_type!=FF_I_TYPE){
995 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
996 /* Allocate a dummy frame */
997 i= ff_find_unused_picture(s, 0);
998 s->last_picture_ptr= &s->picture[i];
999 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1002 if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==FF_B_TYPE){
1003 /* Allocate a dummy frame */
1004 i= ff_find_unused_picture(s, 0);
1005 s->next_picture_ptr= &s->picture[i];
1006 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1011 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1012 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1014 assert(s->pict_type == FF_I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1016 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1019 if(s->picture_structure == PICT_BOTTOM_FIELD){
1020 s->current_picture.data[i] += s->current_picture.linesize[i];
1022 s->current_picture.linesize[i] *= 2;
1023 s->last_picture.linesize[i] *=2;
1024 s->next_picture.linesize[i] *=2;
1029 s->hurry_up= s->avctx->hurry_up;
1031 s->error_recognition= avctx->error_recognition;
1033 /* set dequantizer, we can't do it during init as it might change for mpeg4
1034 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1035 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1036 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1037 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1038 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1039 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1040 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1042 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1043 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1046 if(s->dct_error_sum){
1047 assert(s->avctx->noise_reduction && s->encoding);
1049 update_noise_reduction(s);
1052 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1053 return ff_xvmc_field_start(s, avctx);
1058 /* generic function for encode/decode called after a frame has been coded/decoded */
1059 void MPV_frame_end(MpegEncContext *s)
1062 /* draw edge for correct motion prediction if outside */
1063 //just to make sure that all data is rendered.
1064 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1065 ff_xvmc_field_end(s);
1066 }else if(!s->avctx->hwaccel
1067 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1068 && s->unrestricted_mv
1069 && s->current_picture.reference
1071 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1072 s->dsp.draw_edges(s->current_picture.data[0], s->linesize ,
1073 s->h_edge_pos , s->v_edge_pos ,
1074 EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
1075 s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize,
1076 s->h_edge_pos>>1, s->v_edge_pos>>1,
1077 EDGE_WIDTH/2, EDGE_TOP | EDGE_BOTTOM);
1078 s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize,
1079 s->h_edge_pos>>1, s->v_edge_pos>>1,
1080 EDGE_WIDTH/2, EDGE_TOP | EDGE_BOTTOM);
1084 s->last_pict_type = s->pict_type;
1085 s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
1086 if(s->pict_type!=FF_B_TYPE){
1087 s->last_non_b_pict_type= s->pict_type;
1090 /* copy back current_picture variables */
1091 for(i=0; i<MAX_PICTURE_COUNT; i++){
1092 if(s->picture[i].data[0] == s->current_picture.data[0]){
1093 s->picture[i]= s->current_picture;
1097 assert(i<MAX_PICTURE_COUNT);
1101 /* release non-reference frames */
1102 for(i=0; i<MAX_PICTURE_COUNT; i++){
1103 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1104 free_frame_buffer(s, &s->picture[i]);
1108 // clear copies, to avoid confusion
1110 memset(&s->last_picture, 0, sizeof(Picture));
1111 memset(&s->next_picture, 0, sizeof(Picture));
1112 memset(&s->current_picture, 0, sizeof(Picture));
1114 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1118 * draws an line from (ex, ey) -> (sx, sy).
1119 * @param w width of the image
1120 * @param h height of the image
1121 * @param stride stride/linesize of the image
1122 * @param color color of the arrow
1124 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1127 sx= av_clip(sx, 0, w-1);
1128 sy= av_clip(sy, 0, h-1);
1129 ex= av_clip(ex, 0, w-1);
1130 ey= av_clip(ey, 0, h-1);
1132 buf[sy*stride + sx]+= color;
1134 if(FFABS(ex - sx) > FFABS(ey - sy)){
1136 FFSWAP(int, sx, ex);
1137 FFSWAP(int, sy, ey);
1139 buf+= sx + sy*stride;
1141 f= ((ey-sy)<<16)/ex;
1142 for(x= 0; x <= ex; x++){
1145 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1146 buf[(y+1)*stride + x]+= (color* fr )>>16;
1150 FFSWAP(int, sx, ex);
1151 FFSWAP(int, sy, ey);
1153 buf+= sx + sy*stride;
1155 if(ey) f= ((ex-sx)<<16)/ey;
1157 for(y= 0; y <= ey; y++){
1160 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1161 buf[y*stride + x+1]+= (color* fr )>>16;
1167 * draws an arrow from (ex, ey) -> (sx, sy).
1168 * @param w width of the image
1169 * @param h height of the image
1170 * @param stride stride/linesize of the image
1171 * @param color color of the arrow
1173 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1176 sx= av_clip(sx, -100, w+100);
1177 sy= av_clip(sy, -100, h+100);
1178 ex= av_clip(ex, -100, w+100);
1179 ey= av_clip(ey, -100, h+100);
1184 if(dx*dx + dy*dy > 3*3){
1187 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1189 //FIXME subpixel accuracy
1190 rx= ROUNDED_DIV(rx*3<<4, length);
1191 ry= ROUNDED_DIV(ry*3<<4, length);
1193 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1194 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1196 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1200 * prints debuging info for the given picture.
1202 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1204 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1206 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1209 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1210 switch (pict->pict_type) {
1211 case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1212 case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1213 case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1214 case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1215 case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1216 case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1218 for(y=0; y<s->mb_height; y++){
1219 for(x=0; x<s->mb_width; x++){
1220 if(s->avctx->debug&FF_DEBUG_SKIP){
1221 int count= s->mbskip_table[x + y*s->mb_stride];
1222 if(count>9) count=9;
1223 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1225 if(s->avctx->debug&FF_DEBUG_QP){
1226 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1228 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1229 int mb_type= pict->mb_type[x + y*s->mb_stride];
1230 //Type & MV direction
1232 av_log(s->avctx, AV_LOG_DEBUG, "P");
1233 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1234 av_log(s->avctx, AV_LOG_DEBUG, "A");
1235 else if(IS_INTRA4x4(mb_type))
1236 av_log(s->avctx, AV_LOG_DEBUG, "i");
1237 else if(IS_INTRA16x16(mb_type))
1238 av_log(s->avctx, AV_LOG_DEBUG, "I");
1239 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1240 av_log(s->avctx, AV_LOG_DEBUG, "d");
1241 else if(IS_DIRECT(mb_type))
1242 av_log(s->avctx, AV_LOG_DEBUG, "D");
1243 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1244 av_log(s->avctx, AV_LOG_DEBUG, "g");
1245 else if(IS_GMC(mb_type))
1246 av_log(s->avctx, AV_LOG_DEBUG, "G");
1247 else if(IS_SKIP(mb_type))
1248 av_log(s->avctx, AV_LOG_DEBUG, "S");
1249 else if(!USES_LIST(mb_type, 1))
1250 av_log(s->avctx, AV_LOG_DEBUG, ">");
1251 else if(!USES_LIST(mb_type, 0))
1252 av_log(s->avctx, AV_LOG_DEBUG, "<");
1254 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1255 av_log(s->avctx, AV_LOG_DEBUG, "X");
1260 av_log(s->avctx, AV_LOG_DEBUG, "+");
1261 else if(IS_16X8(mb_type))
1262 av_log(s->avctx, AV_LOG_DEBUG, "-");
1263 else if(IS_8X16(mb_type))
1264 av_log(s->avctx, AV_LOG_DEBUG, "|");
1265 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1266 av_log(s->avctx, AV_LOG_DEBUG, " ");
1268 av_log(s->avctx, AV_LOG_DEBUG, "?");
1271 if(IS_INTERLACED(mb_type))
1272 av_log(s->avctx, AV_LOG_DEBUG, "=");
1274 av_log(s->avctx, AV_LOG_DEBUG, " ");
1276 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1278 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1282 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1283 const int shift= 1 + s->quarter_sample;
1287 int h_chroma_shift, v_chroma_shift, block_height;
1288 const int width = s->avctx->width;
1289 const int height= s->avctx->height;
1290 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1291 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1292 s->low_delay=0; //needed to see the vectors without trashing the buffers
1294 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1296 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1297 pict->data[i]= s->visualization_buffer[i];
1299 pict->type= FF_BUFFER_TYPE_COPY;
1301 block_height = 16>>v_chroma_shift;
1303 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1305 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1306 const int mb_index= mb_x + mb_y*s->mb_stride;
1307 if((s->avctx->debug_mv) && pict->motion_val){
1309 for(type=0; type<3; type++){
1312 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
1316 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
1320 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
1325 if(!USES_LIST(pict->mb_type[mb_index], direction))
1328 if(IS_8X8(pict->mb_type[mb_index])){
1331 int sx= mb_x*16 + 4 + 8*(i&1);
1332 int sy= mb_y*16 + 4 + 8*(i>>1);
1333 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1334 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1335 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1336 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1338 }else if(IS_16X8(pict->mb_type[mb_index])){
1342 int sy=mb_y*16 + 4 + 8*i;
1343 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1344 int mx=(pict->motion_val[direction][xy][0]>>shift);
1345 int my=(pict->motion_val[direction][xy][1]>>shift);
1347 if(IS_INTERLACED(pict->mb_type[mb_index]))
1350 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1352 }else if(IS_8X16(pict->mb_type[mb_index])){
1355 int sx=mb_x*16 + 4 + 8*i;
1357 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1358 int mx=(pict->motion_val[direction][xy][0]>>shift);
1359 int my=(pict->motion_val[direction][xy][1]>>shift);
1361 if(IS_INTERLACED(pict->mb_type[mb_index]))
1364 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1367 int sx= mb_x*16 + 8;
1368 int sy= mb_y*16 + 8;
1369 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1370 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1371 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1372 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1376 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1377 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1379 for(y=0; y<block_height; y++){
1380 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1381 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1384 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1385 int mb_type= pict->mb_type[mb_index];
1388 #define COLOR(theta, r)\
1389 u= (int)(128 + r*cos(theta*3.141592/180));\
1390 v= (int)(128 + r*sin(theta*3.141592/180));
1394 if(IS_PCM(mb_type)){
1396 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1398 }else if(IS_INTRA4x4(mb_type)){
1400 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1402 }else if(IS_DIRECT(mb_type)){
1404 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1406 }else if(IS_GMC(mb_type)){
1408 }else if(IS_SKIP(mb_type)){
1410 }else if(!USES_LIST(mb_type, 1)){
1412 }else if(!USES_LIST(mb_type, 0)){
1415 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1419 u*= 0x0101010101010101ULL;
1420 v*= 0x0101010101010101ULL;
1421 for(y=0; y<block_height; y++){
1422 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1423 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1427 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1428 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1429 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1431 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1433 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1435 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1436 int dm= 1 << (mv_sample_log2-2);
1438 int sx= mb_x*16 + 8*(i&1);
1439 int sy= mb_y*16 + 8*(i>>1);
1440 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1442 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1443 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1445 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1446 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1447 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1451 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1455 s->mbskip_table[mb_index]=0;
1461 static inline int hpel_motion_lowres(MpegEncContext *s,
1462 uint8_t *dest, uint8_t *src,
1463 int field_based, int field_select,
1464 int src_x, int src_y,
1465 int width, int height, int stride,
1466 int h_edge_pos, int v_edge_pos,
1467 int w, int h, h264_chroma_mc_func *pix_op,
1468 int motion_x, int motion_y)
1470 const int lowres= s->avctx->lowres;
1471 const int op_index= FFMIN(lowres, 2);
1472 const int s_mask= (2<<lowres)-1;
1476 if(s->quarter_sample){
1481 sx= motion_x & s_mask;
1482 sy= motion_y & s_mask;
1483 src_x += motion_x >> (lowres+1);
1484 src_y += motion_y >> (lowres+1);
1486 src += src_y * stride + src_x;
1488 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1489 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1490 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1491 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1492 src= s->edge_emu_buffer;
1496 sx= (sx << 2) >> lowres;
1497 sy= (sy << 2) >> lowres;
1500 pix_op[op_index](dest, src, stride, h, sx, sy);
1504 /* apply one mpeg motion vector to the three components */
1505 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1506 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1507 int field_based, int bottom_field, int field_select,
1508 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1509 int motion_x, int motion_y, int h, int mb_y)
1511 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1512 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1513 const int lowres= s->avctx->lowres;
1514 const int op_index= FFMIN(lowres, 2);
1515 const int block_s= 8>>lowres;
1516 const int s_mask= (2<<lowres)-1;
1517 const int h_edge_pos = s->h_edge_pos >> lowres;
1518 const int v_edge_pos = s->v_edge_pos >> lowres;
1519 linesize = s->current_picture.linesize[0] << field_based;
1520 uvlinesize = s->current_picture.linesize[1] << field_based;
1522 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1528 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1531 sx= motion_x & s_mask;
1532 sy= motion_y & s_mask;
1533 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1534 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1536 if (s->out_format == FMT_H263) {
1537 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1538 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1541 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1544 uvsx = (2*mx) & s_mask;
1545 uvsy = (2*my) & s_mask;
1546 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1547 uvsrc_y = mb_y*block_s + (my >> lowres);
1553 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1554 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1557 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1558 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1559 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1561 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1562 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1563 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1564 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1565 ptr_y = s->edge_emu_buffer;
1566 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1567 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1568 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1569 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1570 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1571 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1577 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
1578 dest_y += s->linesize;
1579 dest_cb+= s->uvlinesize;
1580 dest_cr+= s->uvlinesize;
1584 ptr_y += s->linesize;
1585 ptr_cb+= s->uvlinesize;
1586 ptr_cr+= s->uvlinesize;
1589 sx= (sx << 2) >> lowres;
1590 sy= (sy << 2) >> lowres;
1591 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1593 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1594 uvsx= (uvsx << 2) >> lowres;
1595 uvsy= (uvsy << 2) >> lowres;
1596 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1597 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1599 //FIXME h261 lowres loop filter
1602 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1603 uint8_t *dest_cb, uint8_t *dest_cr,
1604 uint8_t **ref_picture,
1605 h264_chroma_mc_func *pix_op,
1607 const int lowres= s->avctx->lowres;
1608 const int op_index= FFMIN(lowres, 2);
1609 const int block_s= 8>>lowres;
1610 const int s_mask= (2<<lowres)-1;
1611 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1612 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1613 int emu=0, src_x, src_y, offset, sx, sy;
1616 if(s->quarter_sample){
1621 /* In case of 8X8, we construct a single chroma motion vector
1622 with a special rounding */
1623 mx= ff_h263_round_chroma(mx);
1624 my= ff_h263_round_chroma(my);
1628 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1629 src_y = s->mb_y*block_s + (my >> (lowres+1));
1631 offset = src_y * s->uvlinesize + src_x;
1632 ptr = ref_picture[1] + offset;
1633 if(s->flags&CODEC_FLAG_EMU_EDGE){
1634 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1635 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1636 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1637 ptr= s->edge_emu_buffer;
1641 sx= (sx << 2) >> lowres;
1642 sy= (sy << 2) >> lowres;
1643 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1645 ptr = ref_picture[2] + offset;
1647 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1648 ptr= s->edge_emu_buffer;
1650 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1654 * motion compensation of a single macroblock
1656 * @param dest_y luma destination pointer
1657 * @param dest_cb chroma cb/u destination pointer
1658 * @param dest_cr chroma cr/v destination pointer
1659 * @param dir direction (0->forward, 1->backward)
1660 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1661 * @param pix_op halfpel motion compensation function (average or put normally)
1662 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1664 static inline void MPV_motion_lowres(MpegEncContext *s,
1665 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1666 int dir, uint8_t **ref_picture,
1667 h264_chroma_mc_func *pix_op)
1671 const int lowres= s->avctx->lowres;
1672 const int block_s= 8>>lowres;
1677 switch(s->mv_type) {
1679 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1681 ref_picture, pix_op,
1682 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1688 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1689 ref_picture[0], 0, 0,
1690 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1691 s->width, s->height, s->linesize,
1692 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1693 block_s, block_s, pix_op,
1694 s->mv[dir][i][0], s->mv[dir][i][1]);
1696 mx += s->mv[dir][i][0];
1697 my += s->mv[dir][i][1];
1700 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1701 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1704 if (s->picture_structure == PICT_FRAME) {
1706 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1707 1, 0, s->field_select[dir][0],
1708 ref_picture, pix_op,
1709 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1711 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1712 1, 1, s->field_select[dir][1],
1713 ref_picture, pix_op,
1714 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1716 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){
1717 ref_picture= s->current_picture_ptr->data;
1720 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1721 0, 0, s->field_select[dir][0],
1722 ref_picture, pix_op,
1723 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1728 uint8_t ** ref2picture;
1730 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == FF_B_TYPE || s->first_field){
1731 ref2picture= ref_picture;
1733 ref2picture= s->current_picture_ptr->data;
1736 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1737 0, 0, s->field_select[dir][i],
1738 ref2picture, pix_op,
1739 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1741 dest_y += 2*block_s*s->linesize;
1742 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1743 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1747 if(s->picture_structure == PICT_FRAME){
1751 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1753 ref_picture, pix_op,
1754 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1756 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1760 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1761 0, 0, s->picture_structure != i+1,
1762 ref_picture, pix_op,
1763 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1765 // after put we make avg of the same block
1766 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1768 //opposite parity is always in the same frame if this is second field
1769 if(!s->first_field){
1770 ref_picture = s->current_picture_ptr->data;
1779 /* put block[] to dest[] */
1780 static inline void put_dct(MpegEncContext *s,
1781 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1783 s->dct_unquantize_intra(s, block, i, qscale);
1784 s->dsp.idct_put (dest, line_size, block);
1787 /* add block[] to dest[] */
1788 static inline void add_dct(MpegEncContext *s,
1789 DCTELEM *block, int i, uint8_t *dest, int line_size)
1791 if (s->block_last_index[i] >= 0) {
1792 s->dsp.idct_add (dest, line_size, block);
1796 static inline void add_dequant_dct(MpegEncContext *s,
1797 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1799 if (s->block_last_index[i] >= 0) {
1800 s->dct_unquantize_inter(s, block, i, qscale);
1802 s->dsp.idct_add (dest, line_size, block);
1807 * cleans dc, ac, coded_block for the current non intra MB
1809 void ff_clean_intra_table_entries(MpegEncContext *s)
1811 int wrap = s->b8_stride;
1812 int xy = s->block_index[0];
1815 s->dc_val[0][xy + 1 ] =
1816 s->dc_val[0][xy + wrap] =
1817 s->dc_val[0][xy + 1 + wrap] = 1024;
1819 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1820 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1821 if (s->msmpeg4_version>=3) {
1822 s->coded_block[xy ] =
1823 s->coded_block[xy + 1 ] =
1824 s->coded_block[xy + wrap] =
1825 s->coded_block[xy + 1 + wrap] = 0;
1828 wrap = s->mb_stride;
1829 xy = s->mb_x + s->mb_y * wrap;
1831 s->dc_val[2][xy] = 1024;
1833 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1834 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1836 s->mbintra_table[xy]= 0;
1839 /* generic function called after a macroblock has been parsed by the
1840 decoder or after it has been encoded by the encoder.
1842 Important variables used:
1843 s->mb_intra : true if intra macroblock
1844 s->mv_dir : motion vector direction
1845 s->mv_type : motion vector type
1846 s->mv : motion vector
1847 s->interlaced_dct : true if interlaced dct used (mpeg2)
1849 static av_always_inline
1850 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
1851 int lowres_flag, int is_mpeg12)
1853 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1854 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1855 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1859 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1860 /* save DCT coefficients */
1862 DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
1865 *dct++ = block[i][s->dsp.idct_permutation[j]];
1868 s->current_picture.qscale_table[mb_xy]= s->qscale;
1870 /* update DC predictors for P macroblocks */
1872 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1873 if(s->mbintra_table[mb_xy])
1874 ff_clean_intra_table_entries(s);
1878 s->last_dc[2] = 128 << s->intra_dc_precision;
1881 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1882 s->mbintra_table[mb_xy]=1;
1884 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
1885 uint8_t *dest_y, *dest_cb, *dest_cr;
1886 int dct_linesize, dct_offset;
1887 op_pixels_func (*op_pix)[4];
1888 qpel_mc_func (*op_qpix)[16];
1889 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
1890 const int uvlinesize= s->current_picture.linesize[1];
1891 const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
1892 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
1894 /* avoid copy if macroblock skipped in last frame too */
1895 /* skip only during decoding as we might trash the buffers during encoding a bit */
1897 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1898 const int age= s->current_picture.age;
1902 if (s->mb_skipped) {
1904 assert(s->pict_type!=FF_I_TYPE);
1906 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
1907 if(*mbskip_ptr >99) *mbskip_ptr= 99;
1909 /* if previous was skipped too, then nothing to do ! */
1910 if (*mbskip_ptr >= age && s->current_picture.reference){
1913 } else if(!s->current_picture.reference){
1914 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
1915 if(*mbskip_ptr >99) *mbskip_ptr= 99;
1917 *mbskip_ptr = 0; /* not skipped */
1921 dct_linesize = linesize << s->interlaced_dct;
1922 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
1926 dest_cb= s->dest[1];
1927 dest_cr= s->dest[2];
1929 dest_y = s->b_scratchpad;
1930 dest_cb= s->b_scratchpad+16*linesize;
1931 dest_cr= s->b_scratchpad+32*linesize;
1935 /* motion handling */
1936 /* decoding or more than one mb_type (MC was already done otherwise) */
1939 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
1941 if (s->mv_dir & MV_DIR_FORWARD) {
1942 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
1943 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
1945 if (s->mv_dir & MV_DIR_BACKWARD) {
1946 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
1949 op_qpix= s->me.qpel_put;
1950 if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
1951 op_pix = s->dsp.put_pixels_tab;
1953 op_pix = s->dsp.put_no_rnd_pixels_tab;
1955 if (s->mv_dir & MV_DIR_FORWARD) {
1956 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
1957 op_pix = s->dsp.avg_pixels_tab;
1958 op_qpix= s->me.qpel_avg;
1960 if (s->mv_dir & MV_DIR_BACKWARD) {
1961 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
1966 /* skip dequant / idct if we are really late ;) */
1968 if(s->hurry_up>1) goto skip_idct;
1970 if(s->avctx->skip_idct){
1971 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
1972 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
1973 || s->avctx->skip_idct >= AVDISCARD_ALL)
1977 /* add dct residue */
1978 if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
1979 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
1980 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1981 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1982 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1983 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1985 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1986 if (s->chroma_y_shift){
1987 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1988 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1992 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1993 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1994 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1995 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1998 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
1999 add_dct(s, block[0], 0, dest_y , dct_linesize);
2000 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2001 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2002 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2004 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2005 if(s->chroma_y_shift){//Chroma420
2006 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2007 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2010 dct_linesize = uvlinesize << s->interlaced_dct;
2011 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2013 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2014 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2015 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2016 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2017 if(!s->chroma_x_shift){//Chroma444
2018 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2019 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2020 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2021 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2026 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2027 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2030 /* dct only in intra block */
2031 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2032 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2033 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2034 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2035 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2037 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2038 if(s->chroma_y_shift){
2039 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2040 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2044 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2045 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2046 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2047 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2051 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2052 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2053 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2054 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2056 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2057 if(s->chroma_y_shift){
2058 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2059 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2062 dct_linesize = uvlinesize << s->interlaced_dct;
2063 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2065 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2066 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2067 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2068 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2069 if(!s->chroma_x_shift){//Chroma444
2070 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2071 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2072 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2073 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2081 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2082 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2083 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2088 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2090 if(s->out_format == FMT_MPEG1) {
2091 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2092 else MPV_decode_mb_internal(s, block, 0, 1);
2095 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2096 else MPV_decode_mb_internal(s, block, 0, 0);
2101 * @param h is the normal height, this will be reduced automatically if needed for the last row
2103 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2104 if (s->avctx->draw_horiz_band) {
2106 const int field_pic= s->picture_structure != PICT_FRAME;
2109 h= FFMIN(h, (s->avctx->height>>field_pic) - y);
2111 if(field_pic && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)){
2114 if(s->first_field) return;
2117 if(s->pict_type==FF_B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2118 src= (AVFrame*)s->current_picture_ptr;
2119 else if(s->last_picture_ptr)
2120 src= (AVFrame*)s->last_picture_ptr;
2124 if(s->pict_type==FF_B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2130 offset[0]= y * s->linesize;
2132 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2138 s->avctx->draw_horiz_band(s->avctx, src, offset,
2139 y, s->picture_structure, h);
2143 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2144 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2145 const int uvlinesize= s->current_picture.linesize[1];
2146 const int mb_size= 4 - s->avctx->lowres;
2148 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2149 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2150 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2151 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2152 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2153 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2154 //block_index is not used by mpeg2, so it is not affected by chroma_format
2156 s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
2157 s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2158 s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2160 if(!(s->pict_type==FF_B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2162 if(s->picture_structure==PICT_FRAME){
2163 s->dest[0] += s->mb_y * linesize << mb_size;
2164 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2165 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2167 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2168 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2169 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2170 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2175 void ff_mpeg_flush(AVCodecContext *avctx){
2177 MpegEncContext *s = avctx->priv_data;
2179 if(s==NULL || s->picture==NULL)
2182 for(i=0; i<MAX_PICTURE_COUNT; i++){
2183 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
2184 || s->picture[i].type == FF_BUFFER_TYPE_USER))
2185 free_frame_buffer(s, &s->picture[i]);
2187 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2189 s->mb_x= s->mb_y= 0;
2192 s->parse_context.state= -1;
2193 s->parse_context.frame_start_found= 0;
2194 s->parse_context.overread= 0;
2195 s->parse_context.overread_index= 0;
2196 s->parse_context.index= 0;
2197 s->parse_context.last_index= 0;
2198 s->bitstream_buffer_size=0;
2202 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2203 DCTELEM *block, int n, int qscale)
2205 int i, level, nCoeffs;
2206 const uint16_t *quant_matrix;
2208 nCoeffs= s->block_last_index[n];
2211 block[0] = block[0] * s->y_dc_scale;
2213 block[0] = block[0] * s->c_dc_scale;
2214 /* XXX: only mpeg1 */
2215 quant_matrix = s->intra_matrix;
2216 for(i=1;i<=nCoeffs;i++) {
2217 int j= s->intra_scantable.permutated[i];
2222 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2223 level = (level - 1) | 1;
2226 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2227 level = (level - 1) | 1;
2234 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2235 DCTELEM *block, int n, int qscale)
2237 int i, level, nCoeffs;
2238 const uint16_t *quant_matrix;
2240 nCoeffs= s->block_last_index[n];
2242 quant_matrix = s->inter_matrix;
2243 for(i=0; i<=nCoeffs; i++) {
2244 int j= s->intra_scantable.permutated[i];
2249 level = (((level << 1) + 1) * qscale *
2250 ((int) (quant_matrix[j]))) >> 4;
2251 level = (level - 1) | 1;
2254 level = (((level << 1) + 1) * qscale *
2255 ((int) (quant_matrix[j]))) >> 4;
2256 level = (level - 1) | 1;
2263 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2264 DCTELEM *block, int n, int qscale)
2266 int i, level, nCoeffs;
2267 const uint16_t *quant_matrix;
2269 if(s->alternate_scan) nCoeffs= 63;
2270 else nCoeffs= s->block_last_index[n];
2273 block[0] = block[0] * s->y_dc_scale;
2275 block[0] = block[0] * s->c_dc_scale;
2276 quant_matrix = s->intra_matrix;
2277 for(i=1;i<=nCoeffs;i++) {
2278 int j= s->intra_scantable.permutated[i];
2283 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2286 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2293 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2294 DCTELEM *block, int n, int qscale)
2296 int i, level, nCoeffs;
2297 const uint16_t *quant_matrix;
2300 if(s->alternate_scan) nCoeffs= 63;
2301 else nCoeffs= s->block_last_index[n];
2304 block[0] = block[0] * s->y_dc_scale;
2306 block[0] = block[0] * s->c_dc_scale;
2307 quant_matrix = s->intra_matrix;
2308 for(i=1;i<=nCoeffs;i++) {
2309 int j= s->intra_scantable.permutated[i];
2314 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2317 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2326 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2327 DCTELEM *block, int n, int qscale)
2329 int i, level, nCoeffs;
2330 const uint16_t *quant_matrix;
2333 if(s->alternate_scan) nCoeffs= 63;
2334 else nCoeffs= s->block_last_index[n];
2336 quant_matrix = s->inter_matrix;
2337 for(i=0; i<=nCoeffs; i++) {
2338 int j= s->intra_scantable.permutated[i];
2343 level = (((level << 1) + 1) * qscale *
2344 ((int) (quant_matrix[j]))) >> 4;
2347 level = (((level << 1) + 1) * qscale *
2348 ((int) (quant_matrix[j]))) >> 4;
2357 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2358 DCTELEM *block, int n, int qscale)
2360 int i, level, qmul, qadd;
2363 assert(s->block_last_index[n]>=0);
2369 block[0] = block[0] * s->y_dc_scale;
2371 block[0] = block[0] * s->c_dc_scale;
2372 qadd = (qscale - 1) | 1;
2379 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2381 for(i=1; i<=nCoeffs; i++) {
2385 level = level * qmul - qadd;
2387 level = level * qmul + qadd;
2394 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2395 DCTELEM *block, int n, int qscale)
2397 int i, level, qmul, qadd;
2400 assert(s->block_last_index[n]>=0);
2402 qadd = (qscale - 1) | 1;
2405 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2407 for(i=0; i<=nCoeffs; i++) {
2411 level = level * qmul - qadd;
2413 level = level * qmul + qadd;
2421 * set qscale and update qscale dependent variables.
2423 void ff_set_qscale(MpegEncContext * s, int qscale)
2427 else if (qscale > 31)
2431 s->chroma_qscale= s->chroma_qscale_table[qscale];
2433 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2434 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];