2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
34 #include "mpegvideo.h"
35 #include "mpegvideo_common.h"
39 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 DCTELEM *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
61 /* enable all paranoid tests for rounding, overflows, etc... */
67 static const uint8_t ff_default_chroma_qscale_table[32]={
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
69 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
72 const uint8_t ff_mpeg1_dc_scale_table[128]={
73 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 static const uint8_t mpeg2_dc_scale_table1[128]={
81 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 static const uint8_t mpeg2_dc_scale_table2[128]={
89 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
90 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
91 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 static const uint8_t mpeg2_dc_scale_table3[128]={
97 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
105 ff_mpeg1_dc_scale_table,
106 mpeg2_dc_scale_table1,
107 mpeg2_dc_scale_table2,
108 mpeg2_dc_scale_table3,
111 const enum PixelFormat ff_pixfmt_list_420[] = {
116 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
123 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
131 uint32_t tmp= *state << 8;
132 *state= tmp + *(p++);
133 if(tmp == 0x100 || p==end)
138 if (p[-1] > 1 ) p+= 3;
139 else if(p[-2] ) p+= 2;
140 else if(p[-3]|(p[-1]-1)) p++;
153 /* init common dct for both encoder and decoder */
154 av_cold int ff_dct_common_init(MpegEncContext *s)
156 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
157 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
158 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
159 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
160 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
161 if(s->flags & CODEC_FLAG_BITEXACT)
162 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
163 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
166 MPV_common_init_mmx(s);
168 MPV_common_init_axp(s);
170 MPV_common_init_mlib(s);
172 MPV_common_init_mmi(s);
174 MPV_common_init_arm(s);
176 MPV_common_init_altivec(s);
178 MPV_common_init_bfin(s);
181 /* load & permutate scantables
182 note: only wmv uses different ones
184 if(s->alternate_scan){
185 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
186 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
188 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
189 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
192 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
197 void ff_copy_picture(Picture *dst, Picture *src){
199 dst->type= FF_BUFFER_TYPE_COPY;
203 * Release a frame buffer
205 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
207 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
208 av_freep(&pic->hwaccel_picture_private);
212 * Allocate a frame buffer
214 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
218 if (s->avctx->hwaccel) {
219 assert(!pic->hwaccel_picture_private);
220 if (s->avctx->hwaccel->priv_data_size) {
221 pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
222 if (!pic->hwaccel_picture_private) {
223 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
229 r = s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
231 if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
232 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
233 av_freep(&pic->hwaccel_picture_private);
237 if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
238 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
239 free_frame_buffer(s, pic);
243 if (pic->linesize[1] != pic->linesize[2]) {
244 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
245 free_frame_buffer(s, pic);
253 * allocates a Picture
254 * The pixels are allocated/set by calling get_buffer() if shared=0
256 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
257 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
258 const int mb_array_size= s->mb_stride*s->mb_height;
259 const int b8_array_size= s->b8_stride*s->mb_height*2;
260 const int b4_array_size= s->b4_stride*s->mb_height*4;
265 assert(pic->data[0]);
266 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
267 pic->type= FF_BUFFER_TYPE_SHARED;
269 assert(!pic->data[0]);
271 if (alloc_frame_buffer(s, pic) < 0)
274 s->linesize = pic->linesize[0];
275 s->uvlinesize= pic->linesize[1];
278 if(pic->qscale_table==NULL){
280 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
281 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
282 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
285 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check
286 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table , mb_array_size * sizeof(uint8_t) , fail)
287 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
288 pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
289 if(s->out_format == FMT_H264){
291 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
292 pic->motion_val[i]= pic->motion_val_base[i]+4;
293 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
295 pic->motion_subsample_log2= 2;
296 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
298 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
299 pic->motion_val[i]= pic->motion_val_base[i]+4;
300 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
302 pic->motion_subsample_log2= 3;
304 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
305 FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
307 pic->qstride= s->mb_stride;
308 FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
311 /* It might be nicer if the application would keep track of these
312 * but it would require an API change. */
313 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
314 s->prev_pict_types[0]= s->dropable ? FF_B_TYPE : s->pict_type;
315 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == FF_B_TYPE)
316 pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
319 fail: //for the FF_ALLOCZ_OR_GOTO macro
321 free_frame_buffer(s, pic);
326 * deallocates a picture
328 static void free_picture(MpegEncContext *s, Picture *pic){
331 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
332 free_frame_buffer(s, pic);
335 av_freep(&pic->mb_var);
336 av_freep(&pic->mc_mb_var);
337 av_freep(&pic->mb_mean);
338 av_freep(&pic->mbskip_table);
339 av_freep(&pic->qscale_table);
340 av_freep(&pic->mb_type_base);
341 av_freep(&pic->dct_coeff);
342 av_freep(&pic->pan_scan);
345 av_freep(&pic->motion_val_base[i]);
346 av_freep(&pic->ref_index[i]);
349 if(pic->type == FF_BUFFER_TYPE_SHARED){
358 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
359 int y_size = s->b8_stride * (2 * s->mb_height + 1);
360 int c_size = s->mb_stride * (s->mb_height + 1);
361 int yc_size = y_size + 2 * c_size;
364 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
365 FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
366 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
368 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
369 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
370 s->me.temp= s->me.scratchpad;
371 s->rd_scratchpad= s->me.scratchpad;
372 s->b_scratchpad= s->me.scratchpad;
373 s->obmc_scratchpad= s->me.scratchpad + 16;
375 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
376 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
377 if(s->avctx->noise_reduction){
378 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
381 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
382 s->block= s->blocks[0];
385 s->pblocks[i] = &s->block[i];
388 if (s->out_format == FMT_H263) {
390 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
391 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
392 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
393 s->ac_val[2] = s->ac_val[1] + c_size;
398 return -1; //free() through MPV_common_end()
401 static void free_duplicate_context(MpegEncContext *s){
404 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
405 av_freep(&s->me.scratchpad);
409 s->obmc_scratchpad= NULL;
411 av_freep(&s->dct_error_sum);
412 av_freep(&s->me.map);
413 av_freep(&s->me.score_map);
414 av_freep(&s->blocks);
415 av_freep(&s->ac_val_base);
419 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
420 #define COPY(a) bak->a= src->a
421 COPY(allocated_edge_emu_buffer);
422 COPY(edge_emu_buffer);
427 COPY(obmc_scratchpad);
434 COPY(me.map_generation);
446 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
449 //FIXME copy only needed parts
451 backup_duplicate_context(&bak, dst);
452 memcpy(dst, src, sizeof(MpegEncContext));
453 backup_duplicate_context(dst, &bak);
455 dst->pblocks[i] = &dst->block[i];
457 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
461 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
462 * the changed fields will not depend upon the prior state of the MpegEncContext.
464 void MPV_common_defaults(MpegEncContext *s){
466 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
467 s->chroma_qscale_table= ff_default_chroma_qscale_table;
468 s->progressive_frame= 1;
469 s->progressive_sequence= 1;
470 s->picture_structure= PICT_FRAME;
472 s->coded_picture_number = 0;
473 s->picture_number = 0;
474 s->input_picture_number = 0;
476 s->picture_in_gop_number = 0;
483 * sets the given MpegEncContext to defaults for decoding.
484 * the changed fields will not depend upon the prior state of the MpegEncContext.
486 void MPV_decode_defaults(MpegEncContext *s){
487 MPV_common_defaults(s);
491 * init common structure for both encoder and decoder.
492 * this assumes that some variables like width/height are already set
494 av_cold int MPV_common_init(MpegEncContext *s)
496 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
498 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
499 s->mb_height = (s->height + 31) / 32 * 2;
501 s->mb_height = (s->height + 15) / 16;
503 if(s->avctx->pix_fmt == PIX_FMT_NONE){
504 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
508 if(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height)){
509 av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
513 if((s->width || s->height) && avcodec_check_dimensions(s->avctx, s->width, s->height))
516 dsputil_init(&s->dsp, s->avctx);
517 ff_dct_common_init(s);
519 s->flags= s->avctx->flags;
520 s->flags2= s->avctx->flags2;
522 s->mb_width = (s->width + 15) / 16;
523 s->mb_stride = s->mb_width + 1;
524 s->b8_stride = s->mb_width*2 + 1;
525 s->b4_stride = s->mb_width*4 + 1;
526 mb_array_size= s->mb_height * s->mb_stride;
527 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
529 /* set chroma shifts */
530 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
531 &(s->chroma_y_shift) );
533 /* set default edge pos, will be overriden in decode_header if needed */
534 s->h_edge_pos= s->mb_width*16;
535 s->v_edge_pos= s->mb_height*16;
537 s->mb_num = s->mb_width * s->mb_height;
542 s->block_wrap[3]= s->b8_stride;
544 s->block_wrap[5]= s->mb_stride;
546 y_size = s->b8_stride * (2 * s->mb_height + 1);
547 c_size = s->mb_stride * (s->mb_height + 1);
548 yc_size = y_size + 2 * c_size;
550 /* convert fourcc to upper case */
551 s->codec_tag = ff_toupper4(s->avctx->codec_tag);
553 s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
555 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
557 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
558 for(y=0; y<s->mb_height; y++){
559 for(x=0; x<s->mb_width; x++){
560 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
563 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
566 /* Allocate MV tables */
567 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
568 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
569 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
570 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
571 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
572 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
573 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
574 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
575 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
576 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
577 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
578 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
580 if(s->msmpeg4_version){
581 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
583 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
585 /* Allocate MB type table */
586 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
588 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
590 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
591 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
592 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
593 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
594 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
595 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
597 if(s->avctx->noise_reduction){
598 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
601 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, MAX_PICTURE_COUNT * sizeof(Picture), fail)
602 for(i = 0; i < MAX_PICTURE_COUNT; i++) {
603 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
606 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
608 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
609 /* interlaced direct mode decoding tables */
614 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
615 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
617 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
618 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
619 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
621 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
624 if (s->out_format == FMT_H263) {
626 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
627 s->coded_block= s->coded_block_base + s->b8_stride + 1;
629 /* cbp, ac_pred, pred_dir */
630 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
631 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
634 if (s->h263_pred || s->h263_plus || !s->encoding) {
636 //MN: we need these for error resilience of intra-frames
637 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
638 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
639 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
640 s->dc_val[2] = s->dc_val[1] + c_size;
641 for(i=0;i<yc_size;i++)
642 s->dc_val_base[i] = 1024;
645 /* which mb is a intra block */
646 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
647 memset(s->mbintra_table, 1, mb_array_size);
649 /* init macroblock skip table */
650 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
651 //Note the +1 is for a quicker mpeg4 slice_end detection
652 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
654 s->parse_context.state= -1;
655 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
656 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
657 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
658 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
661 s->context_initialized = 1;
663 s->thread_context[0]= s;
664 threads = s->avctx->thread_count;
666 for(i=1; i<threads; i++){
667 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
668 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
671 for(i=0; i<threads; i++){
672 if(init_duplicate_context(s->thread_context[i], s) < 0)
674 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
675 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
684 /* init common structure for both encoder and decoder */
685 void MPV_common_end(MpegEncContext *s)
689 for(i=0; i<s->avctx->thread_count; i++){
690 free_duplicate_context(s->thread_context[i]);
692 for(i=1; i<s->avctx->thread_count; i++){
693 av_freep(&s->thread_context[i]);
696 av_freep(&s->parse_context.buffer);
697 s->parse_context.buffer_size=0;
699 av_freep(&s->mb_type);
700 av_freep(&s->p_mv_table_base);
701 av_freep(&s->b_forw_mv_table_base);
702 av_freep(&s->b_back_mv_table_base);
703 av_freep(&s->b_bidir_forw_mv_table_base);
704 av_freep(&s->b_bidir_back_mv_table_base);
705 av_freep(&s->b_direct_mv_table_base);
707 s->b_forw_mv_table= NULL;
708 s->b_back_mv_table= NULL;
709 s->b_bidir_forw_mv_table= NULL;
710 s->b_bidir_back_mv_table= NULL;
711 s->b_direct_mv_table= NULL;
715 av_freep(&s->b_field_mv_table_base[i][j][k]);
716 s->b_field_mv_table[i][j][k]=NULL;
718 av_freep(&s->b_field_select_table[i][j]);
719 av_freep(&s->p_field_mv_table_base[i][j]);
720 s->p_field_mv_table[i][j]=NULL;
722 av_freep(&s->p_field_select_table[i]);
725 av_freep(&s->dc_val_base);
726 av_freep(&s->coded_block_base);
727 av_freep(&s->mbintra_table);
728 av_freep(&s->cbp_table);
729 av_freep(&s->pred_dir_table);
731 av_freep(&s->mbskip_table);
732 av_freep(&s->prev_pict_types);
733 av_freep(&s->bitstream_buffer);
734 s->allocated_bitstream_buffer_size=0;
736 av_freep(&s->avctx->stats_out);
737 av_freep(&s->ac_stats);
738 av_freep(&s->error_status_table);
739 av_freep(&s->mb_index2xy);
740 av_freep(&s->lambda_table);
741 av_freep(&s->q_intra_matrix);
742 av_freep(&s->q_inter_matrix);
743 av_freep(&s->q_intra_matrix16);
744 av_freep(&s->q_inter_matrix16);
745 av_freep(&s->input_picture);
746 av_freep(&s->reordered_input_picture);
747 av_freep(&s->dct_offset);
750 for(i=0; i<MAX_PICTURE_COUNT; i++){
751 free_picture(s, &s->picture[i]);
754 av_freep(&s->picture);
755 s->context_initialized = 0;
758 s->current_picture_ptr= NULL;
759 s->linesize= s->uvlinesize= 0;
762 av_freep(&s->visualization_buffer[i]);
764 avcodec_default_free_buffers(s->avctx);
767 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
769 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
770 uint8_t index_run[MAX_RUN+1];
771 int last, run, level, start, end, i;
773 /* If table is static, we can quit if rl->max_level[0] is not NULL */
774 if(static_store && rl->max_level[0])
777 /* compute max_level[], max_run[] and index_run[] */
778 for(last=0;last<2;last++) {
787 memset(max_level, 0, MAX_RUN + 1);
788 memset(max_run, 0, MAX_LEVEL + 1);
789 memset(index_run, rl->n, MAX_RUN + 1);
790 for(i=start;i<end;i++) {
791 run = rl->table_run[i];
792 level = rl->table_level[i];
793 if (index_run[run] == rl->n)
795 if (level > max_level[run])
796 max_level[run] = level;
797 if (run > max_run[level])
798 max_run[level] = run;
801 rl->max_level[last] = static_store[last];
803 rl->max_level[last] = av_malloc(MAX_RUN + 1);
804 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
806 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
808 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
809 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
811 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
813 rl->index_run[last] = av_malloc(MAX_RUN + 1);
814 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
818 void init_vlc_rl(RLTable *rl)
830 for(i=0; i<rl->vlc.table_size; i++){
831 int code= rl->vlc.table[i][0];
832 int len = rl->vlc.table[i][1];
835 if(len==0){ // illegal code
838 }else if(len<0){ //more bits needed
842 if(code==rl->n){ //esc
846 run= rl->table_run [code] + 1;
847 level= rl->table_level[code] * qmul + qadd;
848 if(code >= rl->last) run+=192;
851 rl->rl_vlc[q][i].len= len;
852 rl->rl_vlc[q][i].level= level;
853 rl->rl_vlc[q][i].run= run;
858 int ff_find_unused_picture(MpegEncContext *s, int shared){
862 for(i=0; i<MAX_PICTURE_COUNT; i++){
863 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
866 for(i=0; i<MAX_PICTURE_COUNT; i++){
867 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
869 for(i=0; i<MAX_PICTURE_COUNT; i++){
870 if(s->picture[i].data[0]==NULL) return i;
874 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
875 /* We could return -1, but the codec would crash trying to draw into a
876 * non-existing frame anyway. This is safer than waiting for a random crash.
877 * Also the return of this is never useful, an encoder must only allocate
878 * as much as allowed in the specification. This has no relationship to how
879 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
880 * enough for such valid streams).
881 * Plus, a decoder has to check stream validity and remove frames if too
882 * many reference frames are around. Waiting for "OOM" is not correct at
883 * all. Similarly, missing reference frames have to be replaced by
884 * interpolated/MC frames, anything else is a bug in the codec ...
890 static void update_noise_reduction(MpegEncContext *s){
893 for(intra=0; intra<2; intra++){
894 if(s->dct_count[intra] > (1<<16)){
896 s->dct_error_sum[intra][i] >>=1;
898 s->dct_count[intra] >>= 1;
902 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
908 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
910 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
916 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
918 /* mark&release old frames */
919 if (s->pict_type != FF_B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
920 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
921 free_frame_buffer(s, s->last_picture_ptr);
923 /* release forgotten pictures */
924 /* if(mpeg124/h263) */
926 for(i=0; i<MAX_PICTURE_COUNT; i++){
927 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
928 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
929 free_frame_buffer(s, &s->picture[i]);
937 /* release non reference frames */
938 for(i=0; i<MAX_PICTURE_COUNT; i++){
939 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
940 free_frame_buffer(s, &s->picture[i]);
944 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
945 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
947 i= ff_find_unused_picture(s, 0);
953 if (s->codec_id == CODEC_ID_H264)
954 pic->reference = s->picture_structure;
955 else if (s->pict_type != FF_B_TYPE)
959 pic->coded_picture_number= s->coded_picture_number++;
961 if(ff_alloc_picture(s, pic, 0) < 0)
964 s->current_picture_ptr= pic;
965 s->current_picture_ptr->top_field_first= s->top_field_first; //FIXME use only the vars from current_pic
966 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
969 s->current_picture_ptr->pict_type= s->pict_type;
970 // if(s->flags && CODEC_FLAG_QSCALE)
971 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
972 s->current_picture_ptr->key_frame= s->pict_type == FF_I_TYPE;
974 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
976 if (s->pict_type != FF_B_TYPE) {
977 s->last_picture_ptr= s->next_picture_ptr;
979 s->next_picture_ptr= s->current_picture_ptr;
981 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
982 s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
983 s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
984 s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
985 s->pict_type, s->dropable);*/
987 if(s->codec_id != CODEC_ID_H264){
988 if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && s->pict_type!=FF_I_TYPE){
989 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
990 /* Allocate a dummy frame */
991 i= ff_find_unused_picture(s, 0);
992 s->last_picture_ptr= &s->picture[i];
993 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
996 if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==FF_B_TYPE){
997 /* Allocate a dummy frame */
998 i= ff_find_unused_picture(s, 0);
999 s->next_picture_ptr= &s->picture[i];
1000 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1005 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1006 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1008 assert(s->pict_type == FF_I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1010 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1013 if(s->picture_structure == PICT_BOTTOM_FIELD){
1014 s->current_picture.data[i] += s->current_picture.linesize[i];
1016 s->current_picture.linesize[i] *= 2;
1017 s->last_picture.linesize[i] *=2;
1018 s->next_picture.linesize[i] *=2;
1022 s->hurry_up= s->avctx->hurry_up;
1023 s->error_recognition= avctx->error_recognition;
1025 /* set dequantizer, we can't do it during init as it might change for mpeg4
1026 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1027 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1028 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1029 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1030 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1031 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1032 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1034 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1035 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1038 if(s->dct_error_sum){
1039 assert(s->avctx->noise_reduction && s->encoding);
1041 update_noise_reduction(s);
1044 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1045 return ff_xvmc_field_start(s, avctx);
1050 /* generic function for encode/decode called after a frame has been coded/decoded */
1051 void MPV_frame_end(MpegEncContext *s)
1054 /* draw edge for correct motion prediction if outside */
1055 //just to make sure that all data is rendered.
1056 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1057 ff_xvmc_field_end(s);
1058 }else if(!s->avctx->hwaccel
1059 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1060 && s->unrestricted_mv
1061 && s->current_picture.reference
1063 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1064 s->dsp.draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
1065 s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1066 s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1070 s->last_pict_type = s->pict_type;
1071 s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
1072 if(s->pict_type!=FF_B_TYPE){
1073 s->last_non_b_pict_type= s->pict_type;
1076 /* copy back current_picture variables */
1077 for(i=0; i<MAX_PICTURE_COUNT; i++){
1078 if(s->picture[i].data[0] == s->current_picture.data[0]){
1079 s->picture[i]= s->current_picture;
1083 assert(i<MAX_PICTURE_COUNT);
1087 /* release non-reference frames */
1088 for(i=0; i<MAX_PICTURE_COUNT; i++){
1089 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1090 free_frame_buffer(s, &s->picture[i]);
1094 // clear copies, to avoid confusion
1096 memset(&s->last_picture, 0, sizeof(Picture));
1097 memset(&s->next_picture, 0, sizeof(Picture));
1098 memset(&s->current_picture, 0, sizeof(Picture));
1100 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1104 * draws an line from (ex, ey) -> (sx, sy).
1105 * @param w width of the image
1106 * @param h height of the image
1107 * @param stride stride/linesize of the image
1108 * @param color color of the arrow
1110 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1113 sx= av_clip(sx, 0, w-1);
1114 sy= av_clip(sy, 0, h-1);
1115 ex= av_clip(ex, 0, w-1);
1116 ey= av_clip(ey, 0, h-1);
1118 buf[sy*stride + sx]+= color;
1120 if(FFABS(ex - sx) > FFABS(ey - sy)){
1122 FFSWAP(int, sx, ex);
1123 FFSWAP(int, sy, ey);
1125 buf+= sx + sy*stride;
1127 f= ((ey-sy)<<16)/ex;
1128 for(x= 0; x <= ex; x++){
1131 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1132 buf[(y+1)*stride + x]+= (color* fr )>>16;
1136 FFSWAP(int, sx, ex);
1137 FFSWAP(int, sy, ey);
1139 buf+= sx + sy*stride;
1141 if(ey) f= ((ex-sx)<<16)/ey;
1143 for(y= 0; y <= ey; y++){
1146 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1147 buf[y*stride + x+1]+= (color* fr )>>16;
1153 * draws an arrow from (ex, ey) -> (sx, sy).
1154 * @param w width of the image
1155 * @param h height of the image
1156 * @param stride stride/linesize of the image
1157 * @param color color of the arrow
1159 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1162 sx= av_clip(sx, -100, w+100);
1163 sy= av_clip(sy, -100, h+100);
1164 ex= av_clip(ex, -100, w+100);
1165 ey= av_clip(ey, -100, h+100);
1170 if(dx*dx + dy*dy > 3*3){
1173 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1175 //FIXME subpixel accuracy
1176 rx= ROUNDED_DIV(rx*3<<4, length);
1177 ry= ROUNDED_DIV(ry*3<<4, length);
1179 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1180 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1182 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1186 * prints debuging info for the given picture.
1188 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1190 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1192 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1195 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1196 switch (pict->pict_type) {
1197 case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1198 case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1199 case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1200 case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1201 case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1202 case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1204 for(y=0; y<s->mb_height; y++){
1205 for(x=0; x<s->mb_width; x++){
1206 if(s->avctx->debug&FF_DEBUG_SKIP){
1207 int count= s->mbskip_table[x + y*s->mb_stride];
1208 if(count>9) count=9;
1209 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1211 if(s->avctx->debug&FF_DEBUG_QP){
1212 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1214 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1215 int mb_type= pict->mb_type[x + y*s->mb_stride];
1216 //Type & MV direction
1218 av_log(s->avctx, AV_LOG_DEBUG, "P");
1219 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1220 av_log(s->avctx, AV_LOG_DEBUG, "A");
1221 else if(IS_INTRA4x4(mb_type))
1222 av_log(s->avctx, AV_LOG_DEBUG, "i");
1223 else if(IS_INTRA16x16(mb_type))
1224 av_log(s->avctx, AV_LOG_DEBUG, "I");
1225 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1226 av_log(s->avctx, AV_LOG_DEBUG, "d");
1227 else if(IS_DIRECT(mb_type))
1228 av_log(s->avctx, AV_LOG_DEBUG, "D");
1229 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1230 av_log(s->avctx, AV_LOG_DEBUG, "g");
1231 else if(IS_GMC(mb_type))
1232 av_log(s->avctx, AV_LOG_DEBUG, "G");
1233 else if(IS_SKIP(mb_type))
1234 av_log(s->avctx, AV_LOG_DEBUG, "S");
1235 else if(!USES_LIST(mb_type, 1))
1236 av_log(s->avctx, AV_LOG_DEBUG, ">");
1237 else if(!USES_LIST(mb_type, 0))
1238 av_log(s->avctx, AV_LOG_DEBUG, "<");
1240 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1241 av_log(s->avctx, AV_LOG_DEBUG, "X");
1246 av_log(s->avctx, AV_LOG_DEBUG, "+");
1247 else if(IS_16X8(mb_type))
1248 av_log(s->avctx, AV_LOG_DEBUG, "-");
1249 else if(IS_8X16(mb_type))
1250 av_log(s->avctx, AV_LOG_DEBUG, "|");
1251 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1252 av_log(s->avctx, AV_LOG_DEBUG, " ");
1254 av_log(s->avctx, AV_LOG_DEBUG, "?");
1257 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
1258 av_log(s->avctx, AV_LOG_DEBUG, "=");
1260 av_log(s->avctx, AV_LOG_DEBUG, " ");
1262 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1264 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1268 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1269 const int shift= 1 + s->quarter_sample;
1273 int h_chroma_shift, v_chroma_shift, block_height;
1274 const int width = s->avctx->width;
1275 const int height= s->avctx->height;
1276 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1277 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1278 s->low_delay=0; //needed to see the vectors without trashing the buffers
1280 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1282 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1283 pict->data[i]= s->visualization_buffer[i];
1285 pict->type= FF_BUFFER_TYPE_COPY;
1287 block_height = 16>>v_chroma_shift;
1289 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1291 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1292 const int mb_index= mb_x + mb_y*s->mb_stride;
1293 if((s->avctx->debug_mv) && pict->motion_val){
1295 for(type=0; type<3; type++){
1298 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
1302 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
1306 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
1311 if(!USES_LIST(pict->mb_type[mb_index], direction))
1314 if(IS_8X8(pict->mb_type[mb_index])){
1317 int sx= mb_x*16 + 4 + 8*(i&1);
1318 int sy= mb_y*16 + 4 + 8*(i>>1);
1319 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1320 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1321 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1322 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1324 }else if(IS_16X8(pict->mb_type[mb_index])){
1328 int sy=mb_y*16 + 4 + 8*i;
1329 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1330 int mx=(pict->motion_val[direction][xy][0]>>shift);
1331 int my=(pict->motion_val[direction][xy][1]>>shift);
1333 if(IS_INTERLACED(pict->mb_type[mb_index]))
1336 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1338 }else if(IS_8X16(pict->mb_type[mb_index])){
1341 int sx=mb_x*16 + 4 + 8*i;
1343 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1344 int mx=(pict->motion_val[direction][xy][0]>>shift);
1345 int my=(pict->motion_val[direction][xy][1]>>shift);
1347 if(IS_INTERLACED(pict->mb_type[mb_index]))
1350 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1353 int sx= mb_x*16 + 8;
1354 int sy= mb_y*16 + 8;
1355 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1356 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1357 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1358 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1362 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1363 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1365 for(y=0; y<block_height; y++){
1366 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1367 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1370 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1371 int mb_type= pict->mb_type[mb_index];
1374 #define COLOR(theta, r)\
1375 u= (int)(128 + r*cos(theta*3.141592/180));\
1376 v= (int)(128 + r*sin(theta*3.141592/180));
1380 if(IS_PCM(mb_type)){
1382 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1384 }else if(IS_INTRA4x4(mb_type)){
1386 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1388 }else if(IS_DIRECT(mb_type)){
1390 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1392 }else if(IS_GMC(mb_type)){
1394 }else if(IS_SKIP(mb_type)){
1396 }else if(!USES_LIST(mb_type, 1)){
1398 }else if(!USES_LIST(mb_type, 0)){
1401 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1405 u*= 0x0101010101010101ULL;
1406 v*= 0x0101010101010101ULL;
1407 for(y=0; y<block_height; y++){
1408 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1409 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1413 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1414 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1415 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1417 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1419 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1421 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1422 int dm= 1 << (mv_sample_log2-2);
1424 int sx= mb_x*16 + 8*(i&1);
1425 int sy= mb_y*16 + 8*(i>>1);
1426 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1428 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1429 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1431 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1432 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1433 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1437 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1441 s->mbskip_table[mb_index]=0;
1447 static inline int hpel_motion_lowres(MpegEncContext *s,
1448 uint8_t *dest, uint8_t *src,
1449 int field_based, int field_select,
1450 int src_x, int src_y,
1451 int width, int height, int stride,
1452 int h_edge_pos, int v_edge_pos,
1453 int w, int h, h264_chroma_mc_func *pix_op,
1454 int motion_x, int motion_y)
1456 const int lowres= s->avctx->lowres;
1457 const int op_index= FFMIN(lowres, 2);
1458 const int s_mask= (2<<lowres)-1;
1462 if(s->quarter_sample){
1467 sx= motion_x & s_mask;
1468 sy= motion_y & s_mask;
1469 src_x += motion_x >> (lowres+1);
1470 src_y += motion_y >> (lowres+1);
1472 src += src_y * stride + src_x;
1474 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1475 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1476 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1477 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1478 src= s->edge_emu_buffer;
1482 sx= (sx << 2) >> lowres;
1483 sy= (sy << 2) >> lowres;
1486 pix_op[op_index](dest, src, stride, h, sx, sy);
1490 /* apply one mpeg motion vector to the three components */
1491 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1492 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1493 int field_based, int bottom_field, int field_select,
1494 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1495 int motion_x, int motion_y, int h, int mb_y)
1497 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1498 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1499 const int lowres= s->avctx->lowres;
1500 const int op_index= FFMIN(lowres, 2);
1501 const int block_s= 8>>lowres;
1502 const int s_mask= (2<<lowres)-1;
1503 const int h_edge_pos = s->h_edge_pos >> lowres;
1504 const int v_edge_pos = s->v_edge_pos >> lowres;
1505 linesize = s->current_picture.linesize[0] << field_based;
1506 uvlinesize = s->current_picture.linesize[1] << field_based;
1508 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1514 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1517 sx= motion_x & s_mask;
1518 sy= motion_y & s_mask;
1519 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1520 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1522 if (s->out_format == FMT_H263) {
1523 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1524 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1527 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1530 uvsx = (2*mx) & s_mask;
1531 uvsy = (2*my) & s_mask;
1532 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1533 uvsrc_y = mb_y*block_s + (my >> lowres);
1539 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1540 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1543 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1544 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1545 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1547 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1548 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1549 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1550 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1551 ptr_y = s->edge_emu_buffer;
1552 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1553 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1554 ff_emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1555 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1556 ff_emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1557 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1563 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
1564 dest_y += s->linesize;
1565 dest_cb+= s->uvlinesize;
1566 dest_cr+= s->uvlinesize;
1570 ptr_y += s->linesize;
1571 ptr_cb+= s->uvlinesize;
1572 ptr_cr+= s->uvlinesize;
1575 sx= (sx << 2) >> lowres;
1576 sy= (sy << 2) >> lowres;
1577 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1579 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1580 uvsx= (uvsx << 2) >> lowres;
1581 uvsy= (uvsy << 2) >> lowres;
1582 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1583 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1585 //FIXME h261 lowres loop filter
1588 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1589 uint8_t *dest_cb, uint8_t *dest_cr,
1590 uint8_t **ref_picture,
1591 h264_chroma_mc_func *pix_op,
1593 const int lowres= s->avctx->lowres;
1594 const int op_index= FFMIN(lowres, 2);
1595 const int block_s= 8>>lowres;
1596 const int s_mask= (2<<lowres)-1;
1597 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1598 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1599 int emu=0, src_x, src_y, offset, sx, sy;
1602 if(s->quarter_sample){
1607 /* In case of 8X8, we construct a single chroma motion vector
1608 with a special rounding */
1609 mx= ff_h263_round_chroma(mx);
1610 my= ff_h263_round_chroma(my);
1614 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1615 src_y = s->mb_y*block_s + (my >> (lowres+1));
1617 offset = src_y * s->uvlinesize + src_x;
1618 ptr = ref_picture[1] + offset;
1619 if(s->flags&CODEC_FLAG_EMU_EDGE){
1620 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1621 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1622 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1623 ptr= s->edge_emu_buffer;
1627 sx= (sx << 2) >> lowres;
1628 sy= (sy << 2) >> lowres;
1629 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1631 ptr = ref_picture[2] + offset;
1633 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1634 ptr= s->edge_emu_buffer;
1636 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1640 * motion compensation of a single macroblock
1642 * @param dest_y luma destination pointer
1643 * @param dest_cb chroma cb/u destination pointer
1644 * @param dest_cr chroma cr/v destination pointer
1645 * @param dir direction (0->forward, 1->backward)
1646 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1647 * @param pix_op halfpel motion compensation function (average or put normally)
1648 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1650 static inline void MPV_motion_lowres(MpegEncContext *s,
1651 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1652 int dir, uint8_t **ref_picture,
1653 h264_chroma_mc_func *pix_op)
1657 const int lowres= s->avctx->lowres;
1658 const int block_s= 8>>lowres;
1663 switch(s->mv_type) {
1665 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1667 ref_picture, pix_op,
1668 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1674 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1675 ref_picture[0], 0, 0,
1676 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1677 s->width, s->height, s->linesize,
1678 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1679 block_s, block_s, pix_op,
1680 s->mv[dir][i][0], s->mv[dir][i][1]);
1682 mx += s->mv[dir][i][0];
1683 my += s->mv[dir][i][1];
1686 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1687 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1690 if (s->picture_structure == PICT_FRAME) {
1692 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1693 1, 0, s->field_select[dir][0],
1694 ref_picture, pix_op,
1695 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1697 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1698 1, 1, s->field_select[dir][1],
1699 ref_picture, pix_op,
1700 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1702 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){
1703 ref_picture= s->current_picture_ptr->data;
1706 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1707 0, 0, s->field_select[dir][0],
1708 ref_picture, pix_op,
1709 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1714 uint8_t ** ref2picture;
1716 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == FF_B_TYPE || s->first_field){
1717 ref2picture= ref_picture;
1719 ref2picture= s->current_picture_ptr->data;
1722 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1723 0, 0, s->field_select[dir][i],
1724 ref2picture, pix_op,
1725 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1727 dest_y += 2*block_s*s->linesize;
1728 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1729 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1733 if(s->picture_structure == PICT_FRAME){
1737 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1739 ref_picture, pix_op,
1740 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1742 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1746 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1747 0, 0, s->picture_structure != i+1,
1748 ref_picture, pix_op,
1749 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1751 // after put we make avg of the same block
1752 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1754 //opposite parity is always in the same frame if this is second field
1755 if(!s->first_field){
1756 ref_picture = s->current_picture_ptr->data;
1765 /* put block[] to dest[] */
1766 static inline void put_dct(MpegEncContext *s,
1767 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1769 s->dct_unquantize_intra(s, block, i, qscale);
1770 s->dsp.idct_put (dest, line_size, block);
1773 /* add block[] to dest[] */
1774 static inline void add_dct(MpegEncContext *s,
1775 DCTELEM *block, int i, uint8_t *dest, int line_size)
1777 if (s->block_last_index[i] >= 0) {
1778 s->dsp.idct_add (dest, line_size, block);
1782 static inline void add_dequant_dct(MpegEncContext *s,
1783 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1785 if (s->block_last_index[i] >= 0) {
1786 s->dct_unquantize_inter(s, block, i, qscale);
1788 s->dsp.idct_add (dest, line_size, block);
1793 * cleans dc, ac, coded_block for the current non intra MB
1795 void ff_clean_intra_table_entries(MpegEncContext *s)
1797 int wrap = s->b8_stride;
1798 int xy = s->block_index[0];
1801 s->dc_val[0][xy + 1 ] =
1802 s->dc_val[0][xy + wrap] =
1803 s->dc_val[0][xy + 1 + wrap] = 1024;
1805 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1806 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1807 if (s->msmpeg4_version>=3) {
1808 s->coded_block[xy ] =
1809 s->coded_block[xy + 1 ] =
1810 s->coded_block[xy + wrap] =
1811 s->coded_block[xy + 1 + wrap] = 0;
1814 wrap = s->mb_stride;
1815 xy = s->mb_x + s->mb_y * wrap;
1817 s->dc_val[2][xy] = 1024;
1819 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1820 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1822 s->mbintra_table[xy]= 0;
1825 /* generic function called after a macroblock has been parsed by the
1826 decoder or after it has been encoded by the encoder.
1828 Important variables used:
1829 s->mb_intra : true if intra macroblock
1830 s->mv_dir : motion vector direction
1831 s->mv_type : motion vector type
1832 s->mv : motion vector
1833 s->interlaced_dct : true if interlaced dct used (mpeg2)
1835 static av_always_inline
1836 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
1837 int lowres_flag, int is_mpeg12)
1839 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1840 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1841 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1845 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1846 /* save DCT coefficients */
1848 DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
1851 *dct++ = block[i][s->dsp.idct_permutation[j]];
1854 s->current_picture.qscale_table[mb_xy]= s->qscale;
1856 /* update DC predictors for P macroblocks */
1858 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1859 if(s->mbintra_table[mb_xy])
1860 ff_clean_intra_table_entries(s);
1864 s->last_dc[2] = 128 << s->intra_dc_precision;
1867 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1868 s->mbintra_table[mb_xy]=1;
1870 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
1871 uint8_t *dest_y, *dest_cb, *dest_cr;
1872 int dct_linesize, dct_offset;
1873 op_pixels_func (*op_pix)[4];
1874 qpel_mc_func (*op_qpix)[16];
1875 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
1876 const int uvlinesize= s->current_picture.linesize[1];
1877 const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
1878 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
1880 /* avoid copy if macroblock skipped in last frame too */
1881 /* skip only during decoding as we might trash the buffers during encoding a bit */
1883 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1884 const int age= s->current_picture.age;
1888 if (s->mb_skipped) {
1890 assert(s->pict_type!=FF_I_TYPE);
1892 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
1893 if(*mbskip_ptr >99) *mbskip_ptr= 99;
1895 /* if previous was skipped too, then nothing to do ! */
1896 if (*mbskip_ptr >= age && s->current_picture.reference){
1899 } else if(!s->current_picture.reference){
1900 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
1901 if(*mbskip_ptr >99) *mbskip_ptr= 99;
1903 *mbskip_ptr = 0; /* not skipped */
1907 dct_linesize = linesize << s->interlaced_dct;
1908 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
1912 dest_cb= s->dest[1];
1913 dest_cr= s->dest[2];
1915 dest_y = s->b_scratchpad;
1916 dest_cb= s->b_scratchpad+16*linesize;
1917 dest_cr= s->b_scratchpad+32*linesize;
1921 /* motion handling */
1922 /* decoding or more than one mb_type (MC was already done otherwise) */
1925 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
1927 if (s->mv_dir & MV_DIR_FORWARD) {
1928 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
1929 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
1931 if (s->mv_dir & MV_DIR_BACKWARD) {
1932 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
1935 op_qpix= s->me.qpel_put;
1936 if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
1937 op_pix = s->dsp.put_pixels_tab;
1939 op_pix = s->dsp.put_no_rnd_pixels_tab;
1941 if (s->mv_dir & MV_DIR_FORWARD) {
1942 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
1943 op_pix = s->dsp.avg_pixels_tab;
1944 op_qpix= s->me.qpel_avg;
1946 if (s->mv_dir & MV_DIR_BACKWARD) {
1947 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
1952 /* skip dequant / idct if we are really late ;) */
1953 if(s->hurry_up>1) goto skip_idct;
1954 if(s->avctx->skip_idct){
1955 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
1956 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
1957 || s->avctx->skip_idct >= AVDISCARD_ALL)
1961 /* add dct residue */
1962 if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
1963 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
1964 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1965 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1966 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1967 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1969 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1970 if (s->chroma_y_shift){
1971 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1972 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1976 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1977 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1978 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1979 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1982 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
1983 add_dct(s, block[0], 0, dest_y , dct_linesize);
1984 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
1985 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
1986 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
1988 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1989 if(s->chroma_y_shift){//Chroma420
1990 add_dct(s, block[4], 4, dest_cb, uvlinesize);
1991 add_dct(s, block[5], 5, dest_cr, uvlinesize);
1994 dct_linesize = uvlinesize << s->interlaced_dct;
1995 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
1997 add_dct(s, block[4], 4, dest_cb, dct_linesize);
1998 add_dct(s, block[5], 5, dest_cr, dct_linesize);
1999 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2000 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2001 if(!s->chroma_x_shift){//Chroma444
2002 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2003 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2004 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2005 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2010 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2011 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2014 /* dct only in intra block */
2015 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2016 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2017 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2018 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2019 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2021 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2022 if(s->chroma_y_shift){
2023 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2024 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2028 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2029 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2030 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2031 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2035 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2036 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2037 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2038 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2040 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2041 if(s->chroma_y_shift){
2042 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2043 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2046 dct_linesize = uvlinesize << s->interlaced_dct;
2047 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2049 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2050 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2051 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2052 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2053 if(!s->chroma_x_shift){//Chroma444
2054 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2055 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2056 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2057 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2065 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2066 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2067 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2072 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2074 if(s->out_format == FMT_MPEG1) {
2075 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2076 else MPV_decode_mb_internal(s, block, 0, 1);
2079 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2080 else MPV_decode_mb_internal(s, block, 0, 0);
2085 * @param h is the normal height, this will be reduced automatically if needed for the last row
2087 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2088 if (s->avctx->draw_horiz_band) {
2090 const int field_pic= s->picture_structure != PICT_FRAME;
2093 h= FFMIN(h, (s->avctx->height>>field_pic) - y);
2095 if(field_pic && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)){
2098 if(s->first_field) return;
2101 if(s->pict_type==FF_B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2102 src= (AVFrame*)s->current_picture_ptr;
2103 else if(s->last_picture_ptr)
2104 src= (AVFrame*)s->last_picture_ptr;
2108 if(s->pict_type==FF_B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2114 offset[0]= y * s->linesize;
2116 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2122 s->avctx->draw_horiz_band(s->avctx, src, offset,
2123 y, s->picture_structure, h);
2127 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2128 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2129 const int uvlinesize= s->current_picture.linesize[1];
2130 const int mb_size= 4 - s->avctx->lowres;
2132 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2133 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2134 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2135 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2136 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2137 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2138 //block_index is not used by mpeg2, so it is not affected by chroma_format
2140 s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
2141 s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2142 s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2144 if(!(s->pict_type==FF_B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2146 if(s->picture_structure==PICT_FRAME){
2147 s->dest[0] += s->mb_y * linesize << mb_size;
2148 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2149 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2151 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2152 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2153 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2154 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2159 void ff_mpeg_flush(AVCodecContext *avctx){
2161 MpegEncContext *s = avctx->priv_data;
2163 if(s==NULL || s->picture==NULL)
2166 for(i=0; i<MAX_PICTURE_COUNT; i++){
2167 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
2168 || s->picture[i].type == FF_BUFFER_TYPE_USER))
2169 free_frame_buffer(s, &s->picture[i]);
2171 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2173 s->mb_x= s->mb_y= 0;
2176 s->parse_context.state= -1;
2177 s->parse_context.frame_start_found= 0;
2178 s->parse_context.overread= 0;
2179 s->parse_context.overread_index= 0;
2180 s->parse_context.index= 0;
2181 s->parse_context.last_index= 0;
2182 s->bitstream_buffer_size=0;
2186 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2187 DCTELEM *block, int n, int qscale)
2189 int i, level, nCoeffs;
2190 const uint16_t *quant_matrix;
2192 nCoeffs= s->block_last_index[n];
2195 block[0] = block[0] * s->y_dc_scale;
2197 block[0] = block[0] * s->c_dc_scale;
2198 /* XXX: only mpeg1 */
2199 quant_matrix = s->intra_matrix;
2200 for(i=1;i<=nCoeffs;i++) {
2201 int j= s->intra_scantable.permutated[i];
2206 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2207 level = (level - 1) | 1;
2210 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2211 level = (level - 1) | 1;
2218 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2219 DCTELEM *block, int n, int qscale)
2221 int i, level, nCoeffs;
2222 const uint16_t *quant_matrix;
2224 nCoeffs= s->block_last_index[n];
2226 quant_matrix = s->inter_matrix;
2227 for(i=0; i<=nCoeffs; i++) {
2228 int j= s->intra_scantable.permutated[i];
2233 level = (((level << 1) + 1) * qscale *
2234 ((int) (quant_matrix[j]))) >> 4;
2235 level = (level - 1) | 1;
2238 level = (((level << 1) + 1) * qscale *
2239 ((int) (quant_matrix[j]))) >> 4;
2240 level = (level - 1) | 1;
2247 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2248 DCTELEM *block, int n, int qscale)
2250 int i, level, nCoeffs;
2251 const uint16_t *quant_matrix;
2253 if(s->alternate_scan) nCoeffs= 63;
2254 else nCoeffs= s->block_last_index[n];
2257 block[0] = block[0] * s->y_dc_scale;
2259 block[0] = block[0] * s->c_dc_scale;
2260 quant_matrix = s->intra_matrix;
2261 for(i=1;i<=nCoeffs;i++) {
2262 int j= s->intra_scantable.permutated[i];
2267 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2270 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2277 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2278 DCTELEM *block, int n, int qscale)
2280 int i, level, nCoeffs;
2281 const uint16_t *quant_matrix;
2284 if(s->alternate_scan) nCoeffs= 63;
2285 else nCoeffs= s->block_last_index[n];
2288 block[0] = block[0] * s->y_dc_scale;
2290 block[0] = block[0] * s->c_dc_scale;
2291 quant_matrix = s->intra_matrix;
2292 for(i=1;i<=nCoeffs;i++) {
2293 int j= s->intra_scantable.permutated[i];
2298 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2301 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2310 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2311 DCTELEM *block, int n, int qscale)
2313 int i, level, nCoeffs;
2314 const uint16_t *quant_matrix;
2317 if(s->alternate_scan) nCoeffs= 63;
2318 else nCoeffs= s->block_last_index[n];
2320 quant_matrix = s->inter_matrix;
2321 for(i=0; i<=nCoeffs; i++) {
2322 int j= s->intra_scantable.permutated[i];
2327 level = (((level << 1) + 1) * qscale *
2328 ((int) (quant_matrix[j]))) >> 4;
2331 level = (((level << 1) + 1) * qscale *
2332 ((int) (quant_matrix[j]))) >> 4;
2341 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2342 DCTELEM *block, int n, int qscale)
2344 int i, level, qmul, qadd;
2347 assert(s->block_last_index[n]>=0);
2353 block[0] = block[0] * s->y_dc_scale;
2355 block[0] = block[0] * s->c_dc_scale;
2356 qadd = (qscale - 1) | 1;
2363 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2365 for(i=1; i<=nCoeffs; i++) {
2369 level = level * qmul - qadd;
2371 level = level * qmul + qadd;
2378 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2379 DCTELEM *block, int n, int qscale)
2381 int i, level, qmul, qadd;
2384 assert(s->block_last_index[n]>=0);
2386 qadd = (qscale - 1) | 1;
2389 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2391 for(i=0; i<=nCoeffs; i++) {
2395 level = level * qmul - qadd;
2397 level = level * qmul + qadd;
2405 * set qscale and update qscale dependent variables.
2407 void ff_set_qscale(MpegEncContext * s, int qscale)
2411 else if (qscale > 31)
2415 s->chroma_qscale= s->chroma_qscale_table[qscale];
2417 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2418 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];