2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
34 #include "mpegvideo.h"
35 #include "mpegvideo_common.h"
39 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 DCTELEM *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
61 /* enable all paranoid tests for rounding, overflows, etc... */
67 static const uint8_t ff_default_chroma_qscale_table[32]={
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
69 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
72 const uint8_t ff_mpeg1_dc_scale_table[128]={
73 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 static const uint8_t mpeg2_dc_scale_table1[128]={
81 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 static const uint8_t mpeg2_dc_scale_table2[128]={
89 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
90 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
91 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 static const uint8_t mpeg2_dc_scale_table3[128]={
97 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
105 ff_mpeg1_dc_scale_table,
106 mpeg2_dc_scale_table1,
107 mpeg2_dc_scale_table2,
108 mpeg2_dc_scale_table3,
111 const enum PixelFormat ff_pixfmt_list_420[] = {
116 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
123 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
131 uint32_t tmp= *state << 8;
132 *state= tmp + *(p++);
133 if(tmp == 0x100 || p==end)
138 if (p[-1] > 1 ) p+= 3;
139 else if(p[-2] ) p+= 2;
140 else if(p[-3]|(p[-1]-1)) p++;
153 /* init common dct for both encoder and decoder */
154 av_cold int ff_dct_common_init(MpegEncContext *s)
156 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
157 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
158 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
159 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
160 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
161 if(s->flags & CODEC_FLAG_BITEXACT)
162 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
163 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
166 MPV_common_init_mmx(s);
168 MPV_common_init_axp(s);
170 MPV_common_init_mlib(s);
172 MPV_common_init_mmi(s);
174 MPV_common_init_arm(s);
176 MPV_common_init_altivec(s);
178 MPV_common_init_bfin(s);
181 /* load & permutate scantables
182 note: only wmv uses different ones
184 if(s->alternate_scan){
185 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
186 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
188 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
189 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
192 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
197 void ff_copy_picture(Picture *dst, Picture *src){
199 dst->type= FF_BUFFER_TYPE_COPY;
203 * Release a frame buffer
205 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
207 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
208 av_freep(&pic->hwaccel_picture_private);
212 * Allocate a frame buffer
214 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
218 if (s->avctx->hwaccel) {
219 assert(!pic->hwaccel_picture_private);
220 if (s->avctx->hwaccel->priv_data_size) {
221 pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
222 if (!pic->hwaccel_picture_private) {
223 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
229 r = s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
231 if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
232 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
233 av_freep(&pic->hwaccel_picture_private);
237 if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
238 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
239 free_frame_buffer(s, pic);
243 if (pic->linesize[1] != pic->linesize[2]) {
244 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
245 free_frame_buffer(s, pic);
253 * allocates a Picture
254 * The pixels are allocated/set by calling get_buffer() if shared=0
256 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
257 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
258 const int mb_array_size= s->mb_stride*s->mb_height;
259 const int b8_array_size= s->b8_stride*s->mb_height*2;
260 const int b4_array_size= s->b4_stride*s->mb_height*4;
265 assert(pic->data[0]);
266 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
267 pic->type= FF_BUFFER_TYPE_SHARED;
269 assert(!pic->data[0]);
271 if (alloc_frame_buffer(s, pic) < 0)
274 s->linesize = pic->linesize[0];
275 s->uvlinesize= pic->linesize[1];
278 if(pic->qscale_table==NULL){
280 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
281 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
282 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
285 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check
286 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table , mb_array_size * sizeof(uint8_t) , fail)
287 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
288 pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
289 if(s->out_format == FMT_H264){
291 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
292 pic->motion_val[i]= pic->motion_val_base[i]+4;
293 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
295 pic->motion_subsample_log2= 2;
296 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
298 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
299 pic->motion_val[i]= pic->motion_val_base[i]+4;
300 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
302 pic->motion_subsample_log2= 3;
304 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
305 FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
307 pic->qstride= s->mb_stride;
308 FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
311 /* It might be nicer if the application would keep track of these
312 * but it would require an API change. */
313 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
314 s->prev_pict_types[0]= s->dropable ? FF_B_TYPE : s->pict_type;
315 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == FF_B_TYPE)
316 pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
319 fail: //for the FF_ALLOCZ_OR_GOTO macro
321 free_frame_buffer(s, pic);
326 * deallocates a picture
328 static void free_picture(MpegEncContext *s, Picture *pic){
331 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
332 free_frame_buffer(s, pic);
335 av_freep(&pic->mb_var);
336 av_freep(&pic->mc_mb_var);
337 av_freep(&pic->mb_mean);
338 av_freep(&pic->mbskip_table);
339 av_freep(&pic->qscale_table);
340 av_freep(&pic->mb_type_base);
341 av_freep(&pic->dct_coeff);
342 av_freep(&pic->pan_scan);
345 av_freep(&pic->motion_val_base[i]);
346 av_freep(&pic->ref_index[i]);
349 if(pic->type == FF_BUFFER_TYPE_SHARED){
358 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
361 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
362 FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
363 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
365 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
366 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
367 s->me.temp= s->me.scratchpad;
368 s->rd_scratchpad= s->me.scratchpad;
369 s->b_scratchpad= s->me.scratchpad;
370 s->obmc_scratchpad= s->me.scratchpad + 16;
372 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
373 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
374 if(s->avctx->noise_reduction){
375 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
378 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
379 s->block= s->blocks[0];
382 s->pblocks[i] = &s->block[i];
386 return -1; //free() through MPV_common_end()
389 static void free_duplicate_context(MpegEncContext *s){
392 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
393 av_freep(&s->me.scratchpad);
397 s->obmc_scratchpad= NULL;
399 av_freep(&s->dct_error_sum);
400 av_freep(&s->me.map);
401 av_freep(&s->me.score_map);
402 av_freep(&s->blocks);
406 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
407 #define COPY(a) bak->a= src->a
408 COPY(allocated_edge_emu_buffer);
409 COPY(edge_emu_buffer);
414 COPY(obmc_scratchpad);
421 COPY(me.map_generation);
429 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
432 //FIXME copy only needed parts
434 backup_duplicate_context(&bak, dst);
435 memcpy(dst, src, sizeof(MpegEncContext));
436 backup_duplicate_context(dst, &bak);
438 dst->pblocks[i] = &dst->block[i];
440 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
444 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
445 * the changed fields will not depend upon the prior state of the MpegEncContext.
447 void MPV_common_defaults(MpegEncContext *s){
449 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
450 s->chroma_qscale_table= ff_default_chroma_qscale_table;
451 s->progressive_frame= 1;
452 s->progressive_sequence= 1;
453 s->picture_structure= PICT_FRAME;
455 s->coded_picture_number = 0;
456 s->picture_number = 0;
457 s->input_picture_number = 0;
459 s->picture_in_gop_number = 0;
466 * sets the given MpegEncContext to defaults for decoding.
467 * the changed fields will not depend upon the prior state of the MpegEncContext.
469 void MPV_decode_defaults(MpegEncContext *s){
470 MPV_common_defaults(s);
474 * init common structure for both encoder and decoder.
475 * this assumes that some variables like width/height are already set
477 av_cold int MPV_common_init(MpegEncContext *s)
479 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
481 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
482 s->mb_height = (s->height + 31) / 32 * 2;
484 s->mb_height = (s->height + 15) / 16;
486 if(s->avctx->pix_fmt == PIX_FMT_NONE){
487 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
491 if(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height)){
492 av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
496 if((s->width || s->height) && avcodec_check_dimensions(s->avctx, s->width, s->height))
499 dsputil_init(&s->dsp, s->avctx);
500 ff_dct_common_init(s);
502 s->flags= s->avctx->flags;
503 s->flags2= s->avctx->flags2;
505 s->mb_width = (s->width + 15) / 16;
506 s->mb_stride = s->mb_width + 1;
507 s->b8_stride = s->mb_width*2 + 1;
508 s->b4_stride = s->mb_width*4 + 1;
509 mb_array_size= s->mb_height * s->mb_stride;
510 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
512 /* set chroma shifts */
513 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
514 &(s->chroma_y_shift) );
516 /* set default edge pos, will be overriden in decode_header if needed */
517 s->h_edge_pos= s->mb_width*16;
518 s->v_edge_pos= s->mb_height*16;
520 s->mb_num = s->mb_width * s->mb_height;
525 s->block_wrap[3]= s->b8_stride;
527 s->block_wrap[5]= s->mb_stride;
529 y_size = s->b8_stride * (2 * s->mb_height + 1);
530 c_size = s->mb_stride * (s->mb_height + 1);
531 yc_size = y_size + 2 * c_size;
533 /* convert fourcc to upper case */
534 s->codec_tag = ff_toupper4(s->avctx->codec_tag);
536 s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
538 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
540 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
541 for(y=0; y<s->mb_height; y++){
542 for(x=0; x<s->mb_width; x++){
543 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
546 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
549 /* Allocate MV tables */
550 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
551 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
552 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
553 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
554 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
555 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
556 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
557 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
558 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
559 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
560 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
561 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
563 if(s->msmpeg4_version){
564 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
566 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
568 /* Allocate MB type table */
569 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
571 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
573 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
574 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
575 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
576 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
577 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
578 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
580 if(s->avctx->noise_reduction){
581 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
584 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, MAX_PICTURE_COUNT * sizeof(Picture), fail)
585 for(i = 0; i < MAX_PICTURE_COUNT; i++) {
586 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
589 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
591 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
592 /* interlaced direct mode decoding tables */
597 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
598 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
600 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
601 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
602 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
604 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
607 if (s->out_format == FMT_H263) {
609 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
610 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
611 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
612 s->ac_val[2] = s->ac_val[1] + c_size;
615 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
616 s->coded_block= s->coded_block_base + s->b8_stride + 1;
618 /* cbp, ac_pred, pred_dir */
619 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
620 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
623 if (s->h263_pred || s->h263_plus || !s->encoding) {
625 //MN: we need these for error resilience of intra-frames
626 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
627 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
628 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
629 s->dc_val[2] = s->dc_val[1] + c_size;
630 for(i=0;i<yc_size;i++)
631 s->dc_val_base[i] = 1024;
634 /* which mb is a intra block */
635 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
636 memset(s->mbintra_table, 1, mb_array_size);
638 /* init macroblock skip table */
639 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
640 //Note the +1 is for a quicker mpeg4 slice_end detection
641 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
643 s->parse_context.state= -1;
644 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
645 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
646 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
647 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
650 s->context_initialized = 1;
652 s->thread_context[0]= s;
653 threads = s->avctx->thread_count;
655 for(i=1; i<threads; i++){
656 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
657 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
660 for(i=0; i<threads; i++){
661 if(init_duplicate_context(s->thread_context[i], s) < 0)
663 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
664 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
673 /* init common structure for both encoder and decoder */
674 void MPV_common_end(MpegEncContext *s)
678 for(i=0; i<s->avctx->thread_count; i++){
679 free_duplicate_context(s->thread_context[i]);
681 for(i=1; i<s->avctx->thread_count; i++){
682 av_freep(&s->thread_context[i]);
685 av_freep(&s->parse_context.buffer);
686 s->parse_context.buffer_size=0;
688 av_freep(&s->mb_type);
689 av_freep(&s->p_mv_table_base);
690 av_freep(&s->b_forw_mv_table_base);
691 av_freep(&s->b_back_mv_table_base);
692 av_freep(&s->b_bidir_forw_mv_table_base);
693 av_freep(&s->b_bidir_back_mv_table_base);
694 av_freep(&s->b_direct_mv_table_base);
696 s->b_forw_mv_table= NULL;
697 s->b_back_mv_table= NULL;
698 s->b_bidir_forw_mv_table= NULL;
699 s->b_bidir_back_mv_table= NULL;
700 s->b_direct_mv_table= NULL;
704 av_freep(&s->b_field_mv_table_base[i][j][k]);
705 s->b_field_mv_table[i][j][k]=NULL;
707 av_freep(&s->b_field_select_table[i][j]);
708 av_freep(&s->p_field_mv_table_base[i][j]);
709 s->p_field_mv_table[i][j]=NULL;
711 av_freep(&s->p_field_select_table[i]);
714 av_freep(&s->dc_val_base);
715 av_freep(&s->ac_val_base);
716 av_freep(&s->coded_block_base);
717 av_freep(&s->mbintra_table);
718 av_freep(&s->cbp_table);
719 av_freep(&s->pred_dir_table);
721 av_freep(&s->mbskip_table);
722 av_freep(&s->prev_pict_types);
723 av_freep(&s->bitstream_buffer);
724 s->allocated_bitstream_buffer_size=0;
726 av_freep(&s->avctx->stats_out);
727 av_freep(&s->ac_stats);
728 av_freep(&s->error_status_table);
729 av_freep(&s->mb_index2xy);
730 av_freep(&s->lambda_table);
731 av_freep(&s->q_intra_matrix);
732 av_freep(&s->q_inter_matrix);
733 av_freep(&s->q_intra_matrix16);
734 av_freep(&s->q_inter_matrix16);
735 av_freep(&s->input_picture);
736 av_freep(&s->reordered_input_picture);
737 av_freep(&s->dct_offset);
740 for(i=0; i<MAX_PICTURE_COUNT; i++){
741 free_picture(s, &s->picture[i]);
744 av_freep(&s->picture);
745 s->context_initialized = 0;
748 s->current_picture_ptr= NULL;
749 s->linesize= s->uvlinesize= 0;
752 av_freep(&s->visualization_buffer[i]);
754 avcodec_default_free_buffers(s->avctx);
757 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
759 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
760 uint8_t index_run[MAX_RUN+1];
761 int last, run, level, start, end, i;
763 /* If table is static, we can quit if rl->max_level[0] is not NULL */
764 if(static_store && rl->max_level[0])
767 /* compute max_level[], max_run[] and index_run[] */
768 for(last=0;last<2;last++) {
777 memset(max_level, 0, MAX_RUN + 1);
778 memset(max_run, 0, MAX_LEVEL + 1);
779 memset(index_run, rl->n, MAX_RUN + 1);
780 for(i=start;i<end;i++) {
781 run = rl->table_run[i];
782 level = rl->table_level[i];
783 if (index_run[run] == rl->n)
785 if (level > max_level[run])
786 max_level[run] = level;
787 if (run > max_run[level])
788 max_run[level] = run;
791 rl->max_level[last] = static_store[last];
793 rl->max_level[last] = av_malloc(MAX_RUN + 1);
794 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
796 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
798 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
799 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
801 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
803 rl->index_run[last] = av_malloc(MAX_RUN + 1);
804 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
808 void init_vlc_rl(RLTable *rl)
820 for(i=0; i<rl->vlc.table_size; i++){
821 int code= rl->vlc.table[i][0];
822 int len = rl->vlc.table[i][1];
825 if(len==0){ // illegal code
828 }else if(len<0){ //more bits needed
832 if(code==rl->n){ //esc
836 run= rl->table_run [code] + 1;
837 level= rl->table_level[code] * qmul + qadd;
838 if(code >= rl->last) run+=192;
841 rl->rl_vlc[q][i].len= len;
842 rl->rl_vlc[q][i].level= level;
843 rl->rl_vlc[q][i].run= run;
848 int ff_find_unused_picture(MpegEncContext *s, int shared){
852 for(i=0; i<MAX_PICTURE_COUNT; i++){
853 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
856 for(i=0; i<MAX_PICTURE_COUNT; i++){
857 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
859 for(i=0; i<MAX_PICTURE_COUNT; i++){
860 if(s->picture[i].data[0]==NULL) return i;
864 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
865 /* We could return -1, but the codec would crash trying to draw into a
866 * non-existing frame anyway. This is safer than waiting for a random crash.
867 * Also the return of this is never useful, an encoder must only allocate
868 * as much as allowed in the specification. This has no relationship to how
869 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
870 * enough for such valid streams).
871 * Plus, a decoder has to check stream validity and remove frames if too
872 * many reference frames are around. Waiting for "OOM" is not correct at
873 * all. Similarly, missing reference frames have to be replaced by
874 * interpolated/MC frames, anything else is a bug in the codec ...
880 static void update_noise_reduction(MpegEncContext *s){
883 for(intra=0; intra<2; intra++){
884 if(s->dct_count[intra] > (1<<16)){
886 s->dct_error_sum[intra][i] >>=1;
888 s->dct_count[intra] >>= 1;
892 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
898 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
900 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
906 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
908 /* mark&release old frames */
909 if (s->pict_type != FF_B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
910 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
911 free_frame_buffer(s, s->last_picture_ptr);
913 /* release forgotten pictures */
914 /* if(mpeg124/h263) */
916 for(i=0; i<MAX_PICTURE_COUNT; i++){
917 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
918 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
919 free_frame_buffer(s, &s->picture[i]);
927 /* release non reference frames */
928 for(i=0; i<MAX_PICTURE_COUNT; i++){
929 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
930 free_frame_buffer(s, &s->picture[i]);
934 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
935 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
937 i= ff_find_unused_picture(s, 0);
943 if (s->codec_id == CODEC_ID_H264)
944 pic->reference = s->picture_structure;
945 else if (s->pict_type != FF_B_TYPE)
949 pic->coded_picture_number= s->coded_picture_number++;
951 if(ff_alloc_picture(s, pic, 0) < 0)
954 s->current_picture_ptr= pic;
955 s->current_picture_ptr->top_field_first= s->top_field_first; //FIXME use only the vars from current_pic
956 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
959 s->current_picture_ptr->pict_type= s->pict_type;
960 // if(s->flags && CODEC_FLAG_QSCALE)
961 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
962 s->current_picture_ptr->key_frame= s->pict_type == FF_I_TYPE;
964 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
966 if (s->pict_type != FF_B_TYPE) {
967 s->last_picture_ptr= s->next_picture_ptr;
969 s->next_picture_ptr= s->current_picture_ptr;
971 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
972 s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
973 s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
974 s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
975 s->pict_type, s->dropable);*/
977 if(s->codec_id != CODEC_ID_H264){
978 if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && s->pict_type!=FF_I_TYPE){
979 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
980 /* Allocate a dummy frame */
981 i= ff_find_unused_picture(s, 0);
982 s->last_picture_ptr= &s->picture[i];
983 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
986 if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==FF_B_TYPE){
987 /* Allocate a dummy frame */
988 i= ff_find_unused_picture(s, 0);
989 s->next_picture_ptr= &s->picture[i];
990 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
995 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
996 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
998 assert(s->pict_type == FF_I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1000 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1003 if(s->picture_structure == PICT_BOTTOM_FIELD){
1004 s->current_picture.data[i] += s->current_picture.linesize[i];
1006 s->current_picture.linesize[i] *= 2;
1007 s->last_picture.linesize[i] *=2;
1008 s->next_picture.linesize[i] *=2;
1012 s->hurry_up= s->avctx->hurry_up;
1013 s->error_recognition= avctx->error_recognition;
1015 /* set dequantizer, we can't do it during init as it might change for mpeg4
1016 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1017 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1018 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1019 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1020 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1021 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1022 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1024 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1025 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1028 if(s->dct_error_sum){
1029 assert(s->avctx->noise_reduction && s->encoding);
1031 update_noise_reduction(s);
1034 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1035 return ff_xvmc_field_start(s, avctx);
1040 /* generic function for encode/decode called after a frame has been coded/decoded */
1041 void MPV_frame_end(MpegEncContext *s)
1044 /* draw edge for correct motion prediction if outside */
1045 //just to make sure that all data is rendered.
1046 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1047 ff_xvmc_field_end(s);
1048 }else if(!s->avctx->hwaccel
1049 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1050 && s->unrestricted_mv
1051 && s->current_picture.reference
1053 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1054 s->dsp.draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
1055 s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1056 s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1060 s->last_pict_type = s->pict_type;
1061 s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
1062 if(s->pict_type!=FF_B_TYPE){
1063 s->last_non_b_pict_type= s->pict_type;
1066 /* copy back current_picture variables */
1067 for(i=0; i<MAX_PICTURE_COUNT; i++){
1068 if(s->picture[i].data[0] == s->current_picture.data[0]){
1069 s->picture[i]= s->current_picture;
1073 assert(i<MAX_PICTURE_COUNT);
1077 /* release non-reference frames */
1078 for(i=0; i<MAX_PICTURE_COUNT; i++){
1079 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1080 free_frame_buffer(s, &s->picture[i]);
1084 // clear copies, to avoid confusion
1086 memset(&s->last_picture, 0, sizeof(Picture));
1087 memset(&s->next_picture, 0, sizeof(Picture));
1088 memset(&s->current_picture, 0, sizeof(Picture));
1090 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1094 * draws an line from (ex, ey) -> (sx, sy).
1095 * @param w width of the image
1096 * @param h height of the image
1097 * @param stride stride/linesize of the image
1098 * @param color color of the arrow
1100 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1103 sx= av_clip(sx, 0, w-1);
1104 sy= av_clip(sy, 0, h-1);
1105 ex= av_clip(ex, 0, w-1);
1106 ey= av_clip(ey, 0, h-1);
1108 buf[sy*stride + sx]+= color;
1110 if(FFABS(ex - sx) > FFABS(ey - sy)){
1112 FFSWAP(int, sx, ex);
1113 FFSWAP(int, sy, ey);
1115 buf+= sx + sy*stride;
1117 f= ((ey-sy)<<16)/ex;
1118 for(x= 0; x <= ex; x++){
1121 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1122 buf[(y+1)*stride + x]+= (color* fr )>>16;
1126 FFSWAP(int, sx, ex);
1127 FFSWAP(int, sy, ey);
1129 buf+= sx + sy*stride;
1131 if(ey) f= ((ex-sx)<<16)/ey;
1133 for(y= 0; y <= ey; y++){
1136 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1137 buf[y*stride + x+1]+= (color* fr )>>16;
1143 * draws an arrow from (ex, ey) -> (sx, sy).
1144 * @param w width of the image
1145 * @param h height of the image
1146 * @param stride stride/linesize of the image
1147 * @param color color of the arrow
1149 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1152 sx= av_clip(sx, -100, w+100);
1153 sy= av_clip(sy, -100, h+100);
1154 ex= av_clip(ex, -100, w+100);
1155 ey= av_clip(ey, -100, h+100);
1160 if(dx*dx + dy*dy > 3*3){
1163 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1165 //FIXME subpixel accuracy
1166 rx= ROUNDED_DIV(rx*3<<4, length);
1167 ry= ROUNDED_DIV(ry*3<<4, length);
1169 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1170 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1172 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1176 * prints debuging info for the given picture.
1178 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1180 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1182 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1185 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1186 switch (pict->pict_type) {
1187 case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1188 case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1189 case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1190 case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1191 case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1192 case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1194 for(y=0; y<s->mb_height; y++){
1195 for(x=0; x<s->mb_width; x++){
1196 if(s->avctx->debug&FF_DEBUG_SKIP){
1197 int count= s->mbskip_table[x + y*s->mb_stride];
1198 if(count>9) count=9;
1199 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1201 if(s->avctx->debug&FF_DEBUG_QP){
1202 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1204 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1205 int mb_type= pict->mb_type[x + y*s->mb_stride];
1206 //Type & MV direction
1208 av_log(s->avctx, AV_LOG_DEBUG, "P");
1209 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1210 av_log(s->avctx, AV_LOG_DEBUG, "A");
1211 else if(IS_INTRA4x4(mb_type))
1212 av_log(s->avctx, AV_LOG_DEBUG, "i");
1213 else if(IS_INTRA16x16(mb_type))
1214 av_log(s->avctx, AV_LOG_DEBUG, "I");
1215 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1216 av_log(s->avctx, AV_LOG_DEBUG, "d");
1217 else if(IS_DIRECT(mb_type))
1218 av_log(s->avctx, AV_LOG_DEBUG, "D");
1219 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1220 av_log(s->avctx, AV_LOG_DEBUG, "g");
1221 else if(IS_GMC(mb_type))
1222 av_log(s->avctx, AV_LOG_DEBUG, "G");
1223 else if(IS_SKIP(mb_type))
1224 av_log(s->avctx, AV_LOG_DEBUG, "S");
1225 else if(!USES_LIST(mb_type, 1))
1226 av_log(s->avctx, AV_LOG_DEBUG, ">");
1227 else if(!USES_LIST(mb_type, 0))
1228 av_log(s->avctx, AV_LOG_DEBUG, "<");
1230 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1231 av_log(s->avctx, AV_LOG_DEBUG, "X");
1236 av_log(s->avctx, AV_LOG_DEBUG, "+");
1237 else if(IS_16X8(mb_type))
1238 av_log(s->avctx, AV_LOG_DEBUG, "-");
1239 else if(IS_8X16(mb_type))
1240 av_log(s->avctx, AV_LOG_DEBUG, "|");
1241 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1242 av_log(s->avctx, AV_LOG_DEBUG, " ");
1244 av_log(s->avctx, AV_LOG_DEBUG, "?");
1247 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
1248 av_log(s->avctx, AV_LOG_DEBUG, "=");
1250 av_log(s->avctx, AV_LOG_DEBUG, " ");
1252 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1254 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1258 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1259 const int shift= 1 + s->quarter_sample;
1263 int h_chroma_shift, v_chroma_shift, block_height;
1264 const int width = s->avctx->width;
1265 const int height= s->avctx->height;
1266 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1267 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1268 s->low_delay=0; //needed to see the vectors without trashing the buffers
1270 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1272 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1273 pict->data[i]= s->visualization_buffer[i];
1275 pict->type= FF_BUFFER_TYPE_COPY;
1277 block_height = 16>>v_chroma_shift;
1279 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1281 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1282 const int mb_index= mb_x + mb_y*s->mb_stride;
1283 if((s->avctx->debug_mv) && pict->motion_val){
1285 for(type=0; type<3; type++){
1288 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
1292 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
1296 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
1301 if(!USES_LIST(pict->mb_type[mb_index], direction))
1304 if(IS_8X8(pict->mb_type[mb_index])){
1307 int sx= mb_x*16 + 4 + 8*(i&1);
1308 int sy= mb_y*16 + 4 + 8*(i>>1);
1309 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1310 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1311 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1312 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1314 }else if(IS_16X8(pict->mb_type[mb_index])){
1318 int sy=mb_y*16 + 4 + 8*i;
1319 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1320 int mx=(pict->motion_val[direction][xy][0]>>shift);
1321 int my=(pict->motion_val[direction][xy][1]>>shift);
1323 if(IS_INTERLACED(pict->mb_type[mb_index]))
1326 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1328 }else if(IS_8X16(pict->mb_type[mb_index])){
1331 int sx=mb_x*16 + 4 + 8*i;
1333 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1334 int mx=(pict->motion_val[direction][xy][0]>>shift);
1335 int my=(pict->motion_val[direction][xy][1]>>shift);
1337 if(IS_INTERLACED(pict->mb_type[mb_index]))
1340 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1343 int sx= mb_x*16 + 8;
1344 int sy= mb_y*16 + 8;
1345 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1346 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1347 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1348 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1352 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1353 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1355 for(y=0; y<block_height; y++){
1356 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1357 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1360 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1361 int mb_type= pict->mb_type[mb_index];
1364 #define COLOR(theta, r)\
1365 u= (int)(128 + r*cos(theta*3.141592/180));\
1366 v= (int)(128 + r*sin(theta*3.141592/180));
1370 if(IS_PCM(mb_type)){
1372 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1374 }else if(IS_INTRA4x4(mb_type)){
1376 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1378 }else if(IS_DIRECT(mb_type)){
1380 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1382 }else if(IS_GMC(mb_type)){
1384 }else if(IS_SKIP(mb_type)){
1386 }else if(!USES_LIST(mb_type, 1)){
1388 }else if(!USES_LIST(mb_type, 0)){
1391 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1395 u*= 0x0101010101010101ULL;
1396 v*= 0x0101010101010101ULL;
1397 for(y=0; y<block_height; y++){
1398 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1399 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1403 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1404 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1405 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1407 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1409 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1411 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1412 int dm= 1 << (mv_sample_log2-2);
1414 int sx= mb_x*16 + 8*(i&1);
1415 int sy= mb_y*16 + 8*(i>>1);
1416 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1418 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1419 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1421 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1422 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1423 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1427 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1431 s->mbskip_table[mb_index]=0;
1437 static inline int hpel_motion_lowres(MpegEncContext *s,
1438 uint8_t *dest, uint8_t *src,
1439 int field_based, int field_select,
1440 int src_x, int src_y,
1441 int width, int height, int stride,
1442 int h_edge_pos, int v_edge_pos,
1443 int w, int h, h264_chroma_mc_func *pix_op,
1444 int motion_x, int motion_y)
1446 const int lowres= s->avctx->lowres;
1447 const int op_index= FFMIN(lowres, 2);
1448 const int s_mask= (2<<lowres)-1;
1452 if(s->quarter_sample){
1457 sx= motion_x & s_mask;
1458 sy= motion_y & s_mask;
1459 src_x += motion_x >> (lowres+1);
1460 src_y += motion_y >> (lowres+1);
1462 src += src_y * stride + src_x;
1464 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1465 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1466 ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1467 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1468 src= s->edge_emu_buffer;
1472 sx= (sx << 2) >> lowres;
1473 sy= (sy << 2) >> lowres;
1476 pix_op[op_index](dest, src, stride, h, sx, sy);
1480 /* apply one mpeg motion vector to the three components */
1481 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1482 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1483 int field_based, int bottom_field, int field_select,
1484 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1485 int motion_x, int motion_y, int h, int mb_y)
1487 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1488 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1489 const int lowres= s->avctx->lowres;
1490 const int op_index= FFMIN(lowres, 2);
1491 const int block_s= 8>>lowres;
1492 const int s_mask= (2<<lowres)-1;
1493 const int h_edge_pos = s->h_edge_pos >> lowres;
1494 const int v_edge_pos = s->v_edge_pos >> lowres;
1495 linesize = s->current_picture.linesize[0] << field_based;
1496 uvlinesize = s->current_picture.linesize[1] << field_based;
1498 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1504 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1507 sx= motion_x & s_mask;
1508 sy= motion_y & s_mask;
1509 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1510 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1512 if (s->out_format == FMT_H263) {
1513 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1514 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1517 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1520 uvsx = (2*mx) & s_mask;
1521 uvsy = (2*my) & s_mask;
1522 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1523 uvsrc_y = mb_y*block_s + (my >> lowres);
1529 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1530 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1533 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1534 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1535 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1537 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1538 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1539 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1540 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1541 ptr_y = s->edge_emu_buffer;
1542 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1543 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1544 ff_emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1545 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1546 ff_emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1547 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1553 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
1554 dest_y += s->linesize;
1555 dest_cb+= s->uvlinesize;
1556 dest_cr+= s->uvlinesize;
1560 ptr_y += s->linesize;
1561 ptr_cb+= s->uvlinesize;
1562 ptr_cr+= s->uvlinesize;
1565 sx= (sx << 2) >> lowres;
1566 sy= (sy << 2) >> lowres;
1567 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1569 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1570 uvsx= (uvsx << 2) >> lowres;
1571 uvsy= (uvsy << 2) >> lowres;
1572 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1573 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1575 //FIXME h261 lowres loop filter
1578 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1579 uint8_t *dest_cb, uint8_t *dest_cr,
1580 uint8_t **ref_picture,
1581 h264_chroma_mc_func *pix_op,
1583 const int lowres= s->avctx->lowres;
1584 const int op_index= FFMIN(lowres, 2);
1585 const int block_s= 8>>lowres;
1586 const int s_mask= (2<<lowres)-1;
1587 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1588 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1589 int emu=0, src_x, src_y, offset, sx, sy;
1592 if(s->quarter_sample){
1597 /* In case of 8X8, we construct a single chroma motion vector
1598 with a special rounding */
1599 mx= ff_h263_round_chroma(mx);
1600 my= ff_h263_round_chroma(my);
1604 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1605 src_y = s->mb_y*block_s + (my >> (lowres+1));
1607 offset = src_y * s->uvlinesize + src_x;
1608 ptr = ref_picture[1] + offset;
1609 if(s->flags&CODEC_FLAG_EMU_EDGE){
1610 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1611 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1612 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1613 ptr= s->edge_emu_buffer;
1617 sx= (sx << 2) >> lowres;
1618 sy= (sy << 2) >> lowres;
1619 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1621 ptr = ref_picture[2] + offset;
1623 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1624 ptr= s->edge_emu_buffer;
1626 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1630 * motion compensation of a single macroblock
1632 * @param dest_y luma destination pointer
1633 * @param dest_cb chroma cb/u destination pointer
1634 * @param dest_cr chroma cr/v destination pointer
1635 * @param dir direction (0->forward, 1->backward)
1636 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1637 * @param pic_op halfpel motion compensation function (average or put normally)
1638 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1640 static inline void MPV_motion_lowres(MpegEncContext *s,
1641 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1642 int dir, uint8_t **ref_picture,
1643 h264_chroma_mc_func *pix_op)
1647 const int lowres= s->avctx->lowres;
1648 const int block_s= 8>>lowres;
1653 switch(s->mv_type) {
1655 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1657 ref_picture, pix_op,
1658 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1664 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1665 ref_picture[0], 0, 0,
1666 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1667 s->width, s->height, s->linesize,
1668 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1669 block_s, block_s, pix_op,
1670 s->mv[dir][i][0], s->mv[dir][i][1]);
1672 mx += s->mv[dir][i][0];
1673 my += s->mv[dir][i][1];
1676 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1677 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1680 if (s->picture_structure == PICT_FRAME) {
1682 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1683 1, 0, s->field_select[dir][0],
1684 ref_picture, pix_op,
1685 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1687 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1688 1, 1, s->field_select[dir][1],
1689 ref_picture, pix_op,
1690 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1692 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){
1693 ref_picture= s->current_picture_ptr->data;
1696 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1697 0, 0, s->field_select[dir][0],
1698 ref_picture, pix_op,
1699 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1704 uint8_t ** ref2picture;
1706 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == FF_B_TYPE || s->first_field){
1707 ref2picture= ref_picture;
1709 ref2picture= s->current_picture_ptr->data;
1712 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1713 0, 0, s->field_select[dir][i],
1714 ref2picture, pix_op,
1715 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1717 dest_y += 2*block_s*s->linesize;
1718 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1719 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1723 if(s->picture_structure == PICT_FRAME){
1727 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1729 ref_picture, pix_op,
1730 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1732 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1736 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1737 0, 0, s->picture_structure != i+1,
1738 ref_picture, pix_op,
1739 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1741 // after put we make avg of the same block
1742 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1744 //opposite parity is always in the same frame if this is second field
1745 if(!s->first_field){
1746 ref_picture = s->current_picture_ptr->data;
1755 /* put block[] to dest[] */
1756 static inline void put_dct(MpegEncContext *s,
1757 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1759 s->dct_unquantize_intra(s, block, i, qscale);
1760 s->dsp.idct_put (dest, line_size, block);
1763 /* add block[] to dest[] */
1764 static inline void add_dct(MpegEncContext *s,
1765 DCTELEM *block, int i, uint8_t *dest, int line_size)
1767 if (s->block_last_index[i] >= 0) {
1768 s->dsp.idct_add (dest, line_size, block);
1772 static inline void add_dequant_dct(MpegEncContext *s,
1773 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1775 if (s->block_last_index[i] >= 0) {
1776 s->dct_unquantize_inter(s, block, i, qscale);
1778 s->dsp.idct_add (dest, line_size, block);
1783 * cleans dc, ac, coded_block for the current non intra MB
1785 void ff_clean_intra_table_entries(MpegEncContext *s)
1787 int wrap = s->b8_stride;
1788 int xy = s->block_index[0];
1791 s->dc_val[0][xy + 1 ] =
1792 s->dc_val[0][xy + wrap] =
1793 s->dc_val[0][xy + 1 + wrap] = 1024;
1795 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1796 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1797 if (s->msmpeg4_version>=3) {
1798 s->coded_block[xy ] =
1799 s->coded_block[xy + 1 ] =
1800 s->coded_block[xy + wrap] =
1801 s->coded_block[xy + 1 + wrap] = 0;
1804 wrap = s->mb_stride;
1805 xy = s->mb_x + s->mb_y * wrap;
1807 s->dc_val[2][xy] = 1024;
1809 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1810 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1812 s->mbintra_table[xy]= 0;
1815 /* generic function called after a macroblock has been parsed by the
1816 decoder or after it has been encoded by the encoder.
1818 Important variables used:
1819 s->mb_intra : true if intra macroblock
1820 s->mv_dir : motion vector direction
1821 s->mv_type : motion vector type
1822 s->mv : motion vector
1823 s->interlaced_dct : true if interlaced dct used (mpeg2)
1825 static av_always_inline
1826 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
1827 int lowres_flag, int is_mpeg12)
1829 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1830 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1831 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1835 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1836 /* save DCT coefficients */
1838 DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
1841 *dct++ = block[i][s->dsp.idct_permutation[j]];
1844 s->current_picture.qscale_table[mb_xy]= s->qscale;
1846 /* update DC predictors for P macroblocks */
1848 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1849 if(s->mbintra_table[mb_xy])
1850 ff_clean_intra_table_entries(s);
1854 s->last_dc[2] = 128 << s->intra_dc_precision;
1857 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1858 s->mbintra_table[mb_xy]=1;
1860 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
1861 uint8_t *dest_y, *dest_cb, *dest_cr;
1862 int dct_linesize, dct_offset;
1863 op_pixels_func (*op_pix)[4];
1864 qpel_mc_func (*op_qpix)[16];
1865 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
1866 const int uvlinesize= s->current_picture.linesize[1];
1867 const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
1868 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
1870 /* avoid copy if macroblock skipped in last frame too */
1871 /* skip only during decoding as we might trash the buffers during encoding a bit */
1873 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1874 const int age= s->current_picture.age;
1878 if (s->mb_skipped) {
1880 assert(s->pict_type!=FF_I_TYPE);
1882 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
1883 if(*mbskip_ptr >99) *mbskip_ptr= 99;
1885 /* if previous was skipped too, then nothing to do ! */
1886 if (*mbskip_ptr >= age && s->current_picture.reference){
1889 } else if(!s->current_picture.reference){
1890 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
1891 if(*mbskip_ptr >99) *mbskip_ptr= 99;
1893 *mbskip_ptr = 0; /* not skipped */
1897 dct_linesize = linesize << s->interlaced_dct;
1898 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
1902 dest_cb= s->dest[1];
1903 dest_cr= s->dest[2];
1905 dest_y = s->b_scratchpad;
1906 dest_cb= s->b_scratchpad+16*linesize;
1907 dest_cr= s->b_scratchpad+32*linesize;
1911 /* motion handling */
1912 /* decoding or more than one mb_type (MC was already done otherwise) */
1915 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
1917 if (s->mv_dir & MV_DIR_FORWARD) {
1918 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
1919 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
1921 if (s->mv_dir & MV_DIR_BACKWARD) {
1922 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
1925 op_qpix= s->me.qpel_put;
1926 if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
1927 op_pix = s->dsp.put_pixels_tab;
1929 op_pix = s->dsp.put_no_rnd_pixels_tab;
1931 if (s->mv_dir & MV_DIR_FORWARD) {
1932 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
1933 op_pix = s->dsp.avg_pixels_tab;
1934 op_qpix= s->me.qpel_avg;
1936 if (s->mv_dir & MV_DIR_BACKWARD) {
1937 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
1942 /* skip dequant / idct if we are really late ;) */
1943 if(s->hurry_up>1) goto skip_idct;
1944 if(s->avctx->skip_idct){
1945 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
1946 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
1947 || s->avctx->skip_idct >= AVDISCARD_ALL)
1951 /* add dct residue */
1952 if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
1953 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
1954 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1955 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1956 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1957 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1959 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1960 if (s->chroma_y_shift){
1961 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1962 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1966 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1967 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1968 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1969 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1972 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
1973 add_dct(s, block[0], 0, dest_y , dct_linesize);
1974 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
1975 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
1976 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
1978 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1979 if(s->chroma_y_shift){//Chroma420
1980 add_dct(s, block[4], 4, dest_cb, uvlinesize);
1981 add_dct(s, block[5], 5, dest_cr, uvlinesize);
1984 dct_linesize = uvlinesize << s->interlaced_dct;
1985 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
1987 add_dct(s, block[4], 4, dest_cb, dct_linesize);
1988 add_dct(s, block[5], 5, dest_cr, dct_linesize);
1989 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
1990 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
1991 if(!s->chroma_x_shift){//Chroma444
1992 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
1993 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
1994 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
1995 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2000 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2001 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2004 /* dct only in intra block */
2005 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2006 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2007 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2008 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2009 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2011 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2012 if(s->chroma_y_shift){
2013 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2014 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2018 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2019 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2020 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2021 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2025 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2026 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2027 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2028 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2030 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2031 if(s->chroma_y_shift){
2032 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2033 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2036 dct_linesize = uvlinesize << s->interlaced_dct;
2037 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2039 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2040 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2041 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2042 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2043 if(!s->chroma_x_shift){//Chroma444
2044 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2045 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2046 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2047 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2055 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2056 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2057 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2062 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2064 if(s->out_format == FMT_MPEG1) {
2065 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2066 else MPV_decode_mb_internal(s, block, 0, 1);
2069 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2070 else MPV_decode_mb_internal(s, block, 0, 0);
2075 * @param h is the normal height, this will be reduced automatically if needed for the last row
2077 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2078 if (s->avctx->draw_horiz_band) {
2080 const int field_pic= s->picture_structure != PICT_FRAME;
2083 h= FFMIN(h, (s->avctx->height>>field_pic) - y);
2085 if(field_pic && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)){
2088 if(s->first_field) return;
2091 if(s->pict_type==FF_B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2092 src= (AVFrame*)s->current_picture_ptr;
2093 else if(s->last_picture_ptr)
2094 src= (AVFrame*)s->last_picture_ptr;
2098 if(s->pict_type==FF_B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2104 offset[0]= y * s->linesize;
2106 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2112 s->avctx->draw_horiz_band(s->avctx, src, offset,
2113 y, s->picture_structure, h);
2117 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2118 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2119 const int uvlinesize= s->current_picture.linesize[1];
2120 const int mb_size= 4 - s->avctx->lowres;
2122 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2123 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2124 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2125 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2126 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2127 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2128 //block_index is not used by mpeg2, so it is not affected by chroma_format
2130 s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
2131 s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2132 s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2134 if(!(s->pict_type==FF_B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2136 if(s->picture_structure==PICT_FRAME){
2137 s->dest[0] += s->mb_y * linesize << mb_size;
2138 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2139 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2141 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2142 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2143 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2144 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2149 void ff_mpeg_flush(AVCodecContext *avctx){
2151 MpegEncContext *s = avctx->priv_data;
2153 if(s==NULL || s->picture==NULL)
2156 for(i=0; i<MAX_PICTURE_COUNT; i++){
2157 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
2158 || s->picture[i].type == FF_BUFFER_TYPE_USER))
2159 free_frame_buffer(s, &s->picture[i]);
2161 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2163 s->mb_x= s->mb_y= 0;
2166 s->parse_context.state= -1;
2167 s->parse_context.frame_start_found= 0;
2168 s->parse_context.overread= 0;
2169 s->parse_context.overread_index= 0;
2170 s->parse_context.index= 0;
2171 s->parse_context.last_index= 0;
2172 s->bitstream_buffer_size=0;
2176 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2177 DCTELEM *block, int n, int qscale)
2179 int i, level, nCoeffs;
2180 const uint16_t *quant_matrix;
2182 nCoeffs= s->block_last_index[n];
2185 block[0] = block[0] * s->y_dc_scale;
2187 block[0] = block[0] * s->c_dc_scale;
2188 /* XXX: only mpeg1 */
2189 quant_matrix = s->intra_matrix;
2190 for(i=1;i<=nCoeffs;i++) {
2191 int j= s->intra_scantable.permutated[i];
2196 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2197 level = (level - 1) | 1;
2200 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2201 level = (level - 1) | 1;
2208 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2209 DCTELEM *block, int n, int qscale)
2211 int i, level, nCoeffs;
2212 const uint16_t *quant_matrix;
2214 nCoeffs= s->block_last_index[n];
2216 quant_matrix = s->inter_matrix;
2217 for(i=0; i<=nCoeffs; i++) {
2218 int j= s->intra_scantable.permutated[i];
2223 level = (((level << 1) + 1) * qscale *
2224 ((int) (quant_matrix[j]))) >> 4;
2225 level = (level - 1) | 1;
2228 level = (((level << 1) + 1) * qscale *
2229 ((int) (quant_matrix[j]))) >> 4;
2230 level = (level - 1) | 1;
2237 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2238 DCTELEM *block, int n, int qscale)
2240 int i, level, nCoeffs;
2241 const uint16_t *quant_matrix;
2243 if(s->alternate_scan) nCoeffs= 63;
2244 else nCoeffs= s->block_last_index[n];
2247 block[0] = block[0] * s->y_dc_scale;
2249 block[0] = block[0] * s->c_dc_scale;
2250 quant_matrix = s->intra_matrix;
2251 for(i=1;i<=nCoeffs;i++) {
2252 int j= s->intra_scantable.permutated[i];
2257 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2260 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2267 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2268 DCTELEM *block, int n, int qscale)
2270 int i, level, nCoeffs;
2271 const uint16_t *quant_matrix;
2274 if(s->alternate_scan) nCoeffs= 63;
2275 else nCoeffs= s->block_last_index[n];
2278 block[0] = block[0] * s->y_dc_scale;
2280 block[0] = block[0] * s->c_dc_scale;
2281 quant_matrix = s->intra_matrix;
2282 for(i=1;i<=nCoeffs;i++) {
2283 int j= s->intra_scantable.permutated[i];
2288 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2291 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2300 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2301 DCTELEM *block, int n, int qscale)
2303 int i, level, nCoeffs;
2304 const uint16_t *quant_matrix;
2307 if(s->alternate_scan) nCoeffs= 63;
2308 else nCoeffs= s->block_last_index[n];
2310 quant_matrix = s->inter_matrix;
2311 for(i=0; i<=nCoeffs; i++) {
2312 int j= s->intra_scantable.permutated[i];
2317 level = (((level << 1) + 1) * qscale *
2318 ((int) (quant_matrix[j]))) >> 4;
2321 level = (((level << 1) + 1) * qscale *
2322 ((int) (quant_matrix[j]))) >> 4;
2331 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2332 DCTELEM *block, int n, int qscale)
2334 int i, level, qmul, qadd;
2337 assert(s->block_last_index[n]>=0);
2343 block[0] = block[0] * s->y_dc_scale;
2345 block[0] = block[0] * s->c_dc_scale;
2346 qadd = (qscale - 1) | 1;
2353 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2355 for(i=1; i<=nCoeffs; i++) {
2359 level = level * qmul - qadd;
2361 level = level * qmul + qadd;
2368 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2369 DCTELEM *block, int n, int qscale)
2371 int i, level, qmul, qadd;
2374 assert(s->block_last_index[n]>=0);
2376 qadd = (qscale - 1) | 1;
2379 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2381 for(i=0; i<=nCoeffs; i++) {
2385 level = level * qmul - qadd;
2387 level = level * qmul + qadd;
2395 * set qscale and update qscale dependent variables.
2397 void ff_set_qscale(MpegEncContext * s, int qscale)
2401 else if (qscale > 31)
2405 s->chroma_qscale= s->chroma_qscale_table[qscale];
2407 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2408 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];