2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard.
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
24 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "mpegvideo.h"
34 #include "fastmemcpy.h"
40 #ifdef CONFIG_ENCODERS
41 static void encode_picture(MpegEncContext *s, int picture_number);
42 #endif //CONFIG_ENCODERS
43 static void dct_unquantize_mpeg1_c(MpegEncContext *s,
44 DCTELEM *block, int n, int qscale);
45 static void dct_unquantize_mpeg2_c(MpegEncContext *s,
46 DCTELEM *block, int n, int qscale);
47 static void dct_unquantize_h263_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w);
50 #ifdef CONFIG_ENCODERS
51 static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
52 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
53 static int sse_mb(MpegEncContext *s);
54 #endif //CONFIG_ENCODERS
57 extern int XVMC_field_start(MpegEncContext*s, AVCodecContext *avctx);
58 extern void XVMC_field_end(MpegEncContext *s);
59 extern void XVMC_decode_mb(MpegEncContext *s);
62 void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_edges_c;
65 /* enable all paranoid tests for rounding, overflows, etc... */
71 /* for jpeg fast DCT */
74 static const uint16_t aanscales[64] = {
75 /* precomputed values scaled up by 14 bits */
76 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
77 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270,
78 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906,
79 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315,
80 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
81 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552,
82 8867 , 12299, 11585, 10426, 8867, 6967, 4799, 2446,
83 4520 , 6270, 5906, 5315, 4520, 3552, 2446, 1247
86 static const uint8_t h263_chroma_roundtab[16] = {
87 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
88 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
91 #ifdef CONFIG_ENCODERS
92 static uint8_t (*default_mv_penalty)[MAX_MV*2+1]=NULL;
93 static uint8_t default_fcode_tab[MAX_MV*2+1];
95 enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1};
97 static void convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64],
98 const uint16_t *quant_matrix, int bias, int qmin, int qmax)
102 for(qscale=qmin; qscale<=qmax; qscale++){
104 if (dsp->fdct == ff_jpeg_fdct_islow
105 #ifdef FAAN_POSTSCALE
106 || dsp->fdct == ff_faandct
110 const int j= dsp->idct_permutation[i];
111 /* 16 <= qscale * quant_matrix[i] <= 7905 */
112 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
113 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
114 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
116 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) /
117 (qscale * quant_matrix[j]));
119 } else if (dsp->fdct == fdct_ifast
120 #ifndef FAAN_POSTSCALE
121 || dsp->fdct == ff_faandct
125 const int j= dsp->idct_permutation[i];
126 /* 16 <= qscale * quant_matrix[i] <= 7905 */
127 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
128 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
129 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
131 qmat[qscale][i] = (int)((uint64_t_C(1) << (QMAT_SHIFT + 14)) /
132 (aanscales[i] * qscale * quant_matrix[j]));
136 const int j= dsp->idct_permutation[i];
137 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
138 So 16 <= qscale * quant_matrix[i] <= 7905
139 so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
140 so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67
142 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j]));
143 // qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
144 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]);
146 if(qmat16[qscale][0][i]==0 || qmat16[qscale][0][i]==128*256) qmat16[qscale][0][i]=128*256-1;
147 qmat16[qscale][1][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][0][i]);
153 static inline void update_qscale(MpegEncContext *s){
154 s->qscale= (s->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
155 s->qscale= clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
157 s->lambda2= (s->lambda*s->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
159 #endif //CONFIG_ENCODERS
161 void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable){
165 st->scantable= src_scantable;
169 j = src_scantable[i];
170 st->permutated[i] = permutation[j];
179 j = st->permutated[i];
181 st->raster_end[i]= end;
185 #ifdef CONFIG_ENCODERS
186 void ff_write_quant_matrix(PutBitContext *pb, int16_t *matrix){
192 put_bits(pb, 8, matrix[ ff_zigzag_direct[i] ]);
197 #endif //CONFIG_ENCODERS
199 /* init common dct for both encoder and decoder */
200 int DCT_common_init(MpegEncContext *s)
202 s->dct_unquantize_h263 = dct_unquantize_h263_c;
203 s->dct_unquantize_mpeg1 = dct_unquantize_mpeg1_c;
204 s->dct_unquantize_mpeg2 = dct_unquantize_mpeg2_c;
206 #ifdef CONFIG_ENCODERS
207 s->dct_quantize= dct_quantize_c;
211 MPV_common_init_mmx(s);
214 MPV_common_init_axp(s);
217 MPV_common_init_mlib(s);
220 MPV_common_init_mmi(s);
223 MPV_common_init_armv4l(s);
226 MPV_common_init_ppc(s);
229 #ifdef CONFIG_ENCODERS
230 s->fast_dct_quantize= s->dct_quantize;
232 if(s->flags&CODEC_FLAG_TRELLIS_QUANT){
233 s->dct_quantize= dct_quantize_trellis_c; //move before MPV_common_init_*
236 #endif //CONFIG_ENCODERS
238 /* load & permutate scantables
239 note: only wmv uses differnt ones
241 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
242 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
243 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
244 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
246 s->picture_structure= PICT_FRAME;
251 static void copy_picture(Picture *dst, Picture *src){
253 dst->type= FF_BUFFER_TYPE_COPY;
257 * allocates a Picture
258 * The pixels are allocated/set by calling get_buffer() if shared=0
260 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
261 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11
262 const int mb_array_size= s->mb_stride*s->mb_height;
266 assert(pic->data[0]);
267 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
268 pic->type= FF_BUFFER_TYPE_SHARED;
272 assert(!pic->data[0]);
274 r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
276 if(r<0 || !pic->age || !pic->type || !pic->data[0]){
277 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
281 if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
282 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
286 if(pic->linesize[1] != pic->linesize[2]){
287 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride missmatch)\n");
291 s->linesize = pic->linesize[0];
292 s->uvlinesize= pic->linesize[1];
295 if(pic->qscale_table==NULL){
297 CHECKED_ALLOCZ(pic->mb_var , mb_array_size * sizeof(int16_t))
298 CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t))
299 CHECKED_ALLOCZ(pic->mb_mean , mb_array_size * sizeof(int8_t))
300 CHECKED_ALLOCZ(pic->mb_cmp_score, mb_array_size * sizeof(int32_t))
303 CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check
304 CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t))
305 CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num * sizeof(int))
306 pic->mb_type= pic->mb_type_base + s->mb_stride+1;
307 if(s->out_format == FMT_H264){
309 CHECKED_ALLOCZ(pic->motion_val[i], 2 * 16 * s->mb_num * sizeof(uint16_t))
310 CHECKED_ALLOCZ(pic->ref_index[i] , 4 * s->mb_num * sizeof(uint8_t))
313 pic->qstride= s->mb_stride;
314 CHECKED_ALLOCZ(pic->pan_scan , 1 * sizeof(AVPanScan))
317 //it might be nicer if the application would keep track of these but it would require a API change
318 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
319 s->prev_pict_types[0]= s->pict_type;
320 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE)
321 pic->age= INT_MAX; // skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway
324 fail: //for the CHECKED_ALLOCZ macro
329 * deallocates a picture
331 static void free_picture(MpegEncContext *s, Picture *pic){
334 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
335 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
338 av_freep(&pic->mb_var);
339 av_freep(&pic->mc_mb_var);
340 av_freep(&pic->mb_mean);
341 av_freep(&pic->mb_cmp_score);
342 av_freep(&pic->mbskip_table);
343 av_freep(&pic->qscale_table);
344 av_freep(&pic->mb_type_base);
345 av_freep(&pic->pan_scan);
348 av_freep(&pic->motion_val[i]);
349 av_freep(&pic->ref_index[i]);
352 if(pic->type == FF_BUFFER_TYPE_SHARED){
361 /* init common structure for both encoder and decoder */
362 int MPV_common_init(MpegEncContext *s)
364 int y_size, c_size, yc_size, i, mb_array_size, x, y;
366 dsputil_init(&s->dsp, s->avctx);
369 s->flags= s->avctx->flags;
371 s->mb_width = (s->width + 15) / 16;
372 s->mb_height = (s->height + 15) / 16;
373 s->mb_stride = s->mb_width + 1;
374 mb_array_size= s->mb_height * s->mb_stride;
376 /* set default edge pos, will be overriden in decode_header if needed */
377 s->h_edge_pos= s->mb_width*16;
378 s->v_edge_pos= s->mb_height*16;
380 s->mb_num = s->mb_width * s->mb_height;
385 s->block_wrap[3]= s->mb_width*2 + 2;
387 s->block_wrap[5]= s->mb_width + 2;
389 y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
390 c_size = (s->mb_width + 2) * (s->mb_height + 2);
391 yc_size = y_size + 2 * c_size;
393 /* convert fourcc to upper case */
394 s->avctx->codec_tag= toupper( s->avctx->codec_tag &0xFF)
395 + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
396 + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
397 + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
399 s->avctx->stream_codec_tag= toupper( s->avctx->stream_codec_tag &0xFF)
400 + (toupper((s->avctx->stream_codec_tag>>8 )&0xFF)<<8 )
401 + (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16)
402 + (toupper((s->avctx->stream_codec_tag>>24)&0xFF)<<24);
404 CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
405 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*17;
407 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
409 CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int)) //error ressilience code looks cleaner with this
410 for(y=0; y<s->mb_height; y++){
411 for(x=0; x<s->mb_width; x++){
412 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
415 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
418 int mv_table_size= s->mb_stride * (s->mb_height+2) + 1;
420 /* Allocate MV tables */
421 CHECKED_ALLOCZ(s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
422 CHECKED_ALLOCZ(s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
423 CHECKED_ALLOCZ(s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
424 CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
425 CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
426 CHECKED_ALLOCZ(s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
427 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
428 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
429 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
430 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
431 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
432 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
434 //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
435 CHECKED_ALLOCZ(s->me.scratchpad, s->width*2*16*3*sizeof(uint8_t))
437 CHECKED_ALLOCZ(s->me.map , ME_MAP_SIZE*sizeof(uint32_t))
438 CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
440 if(s->codec_id==CODEC_ID_MPEG4){
441 CHECKED_ALLOCZ(s->tex_pb_buffer, PB_BUFFER_SIZE);
442 CHECKED_ALLOCZ( s->pb2_buffer, PB_BUFFER_SIZE);
445 if(s->msmpeg4_version){
446 CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
448 CHECKED_ALLOCZ(s->avctx->stats_out, 256);
450 /* Allocate MB type table */
451 CHECKED_ALLOCZ(s->mb_type , mb_array_size * sizeof(uint8_t)) //needed for encoding
453 CHECKED_ALLOCZ(s->lambda_table, mb_array_size * sizeof(int))
455 CHECKED_ALLOCZ(s->q_intra_matrix, 64*32 * sizeof(int))
456 CHECKED_ALLOCZ(s->q_inter_matrix, 64*32 * sizeof(int))
457 CHECKED_ALLOCZ(s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t))
458 CHECKED_ALLOCZ(s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t))
459 CHECKED_ALLOCZ(s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
460 CHECKED_ALLOCZ(s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
462 if(s->avctx->noise_reduction){
463 CHECKED_ALLOCZ(s->dct_error_sum, 2 * 64 * sizeof(int))
464 CHECKED_ALLOCZ(s->dct_offset, 2 * 64 * sizeof(uint16_t))
467 CHECKED_ALLOCZ(s->blocks, 64*6*2 * sizeof(DCTELEM))
469 CHECKED_ALLOCZ(s->picture, MAX_PICTURE_COUNT * sizeof(Picture))
471 CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t))
473 if (s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_VIS_MV)) {
477 size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
478 CHECKED_ALLOCZ(s->motion_val, size * 2 * sizeof(int16_t));
481 if(s->codec_id==CODEC_ID_MPEG4){
482 /* interlaced direct mode decoding tables */
483 CHECKED_ALLOCZ(s->field_mv_table, mb_array_size*2*2 * sizeof(int16_t))
484 CHECKED_ALLOCZ(s->field_select_table, mb_array_size*2* sizeof(int8_t))
486 if (s->out_format == FMT_H263) {
488 CHECKED_ALLOCZ(s->ac_val[0], yc_size * sizeof(int16_t) * 16);
489 s->ac_val[1] = s->ac_val[0] + y_size;
490 s->ac_val[2] = s->ac_val[1] + c_size;
493 CHECKED_ALLOCZ(s->coded_block, y_size);
495 /* divx501 bitstream reorder buffer */
496 CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE);
498 /* cbp, ac_pred, pred_dir */
499 CHECKED_ALLOCZ(s->cbp_table , mb_array_size * sizeof(uint8_t))
500 CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
503 if (s->h263_pred || s->h263_plus || !s->encoding) {
505 //MN: we need these for error resilience of intra-frames
506 CHECKED_ALLOCZ(s->dc_val[0], yc_size * sizeof(int16_t));
507 s->dc_val[1] = s->dc_val[0] + y_size;
508 s->dc_val[2] = s->dc_val[1] + c_size;
509 for(i=0;i<yc_size;i++)
510 s->dc_val[0][i] = 1024;
513 /* which mb is a intra block */
514 CHECKED_ALLOCZ(s->mbintra_table, mb_array_size);
515 memset(s->mbintra_table, 1, mb_array_size);
517 /* default structure is frame */
518 s->picture_structure = PICT_FRAME;
520 /* init macroblock skip table */
521 CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2);
522 //Note the +1 is for a quicker mpeg4 slice_end detection
523 CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
525 s->block= s->blocks[0];
528 s->pblocks[i] = (short *)(&s->block[i]);
531 s->parse_context.state= -1;
533 s->context_initialized = 1;
543 /* init common structure for both encoder and decoder */
544 void MPV_common_end(MpegEncContext *s)
548 av_freep(&s->parse_context.buffer);
549 s->parse_context.buffer_size=0;
551 av_freep(&s->mb_type);
552 av_freep(&s->p_mv_table_base);
553 av_freep(&s->b_forw_mv_table_base);
554 av_freep(&s->b_back_mv_table_base);
555 av_freep(&s->b_bidir_forw_mv_table_base);
556 av_freep(&s->b_bidir_back_mv_table_base);
557 av_freep(&s->b_direct_mv_table_base);
559 s->b_forw_mv_table= NULL;
560 s->b_back_mv_table= NULL;
561 s->b_bidir_forw_mv_table= NULL;
562 s->b_bidir_back_mv_table= NULL;
563 s->b_direct_mv_table= NULL;
565 av_freep(&s->motion_val);
566 av_freep(&s->dc_val[0]);
567 av_freep(&s->ac_val[0]);
568 av_freep(&s->coded_block);
569 av_freep(&s->mbintra_table);
570 av_freep(&s->cbp_table);
571 av_freep(&s->pred_dir_table);
572 av_freep(&s->me.scratchpad);
573 av_freep(&s->me.map);
574 av_freep(&s->me.score_map);
576 av_freep(&s->mbskip_table);
577 av_freep(&s->prev_pict_types);
578 av_freep(&s->bitstream_buffer);
579 av_freep(&s->tex_pb_buffer);
580 av_freep(&s->pb2_buffer);
581 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
582 av_freep(&s->field_mv_table);
583 av_freep(&s->field_select_table);
584 av_freep(&s->avctx->stats_out);
585 av_freep(&s->ac_stats);
586 av_freep(&s->error_status_table);
587 av_freep(&s->mb_index2xy);
588 av_freep(&s->lambda_table);
589 av_freep(&s->q_intra_matrix);
590 av_freep(&s->q_inter_matrix);
591 av_freep(&s->q_intra_matrix16);
592 av_freep(&s->q_inter_matrix16);
593 av_freep(&s->blocks);
594 av_freep(&s->input_picture);
595 av_freep(&s->reordered_input_picture);
596 av_freep(&s->dct_error_sum);
597 av_freep(&s->dct_offset);
600 for(i=0; i<MAX_PICTURE_COUNT; i++){
601 free_picture(s, &s->picture[i]);
604 av_freep(&s->picture);
605 avcodec_default_free_buffers(s->avctx);
606 s->context_initialized = 0;
609 s->current_picture_ptr= NULL;
612 #ifdef CONFIG_ENCODERS
614 /* init video encoder */
615 int MPV_encode_init(AVCodecContext *avctx)
617 MpegEncContext *s = avctx->priv_data;
619 int chroma_h_shift, chroma_v_shift;
621 avctx->pix_fmt = PIX_FMT_YUV420P; // FIXME
623 s->bit_rate = avctx->bit_rate;
624 s->bit_rate_tolerance = avctx->bit_rate_tolerance;
625 s->width = avctx->width;
626 s->height = avctx->height;
627 if(avctx->gop_size > 600){
628 av_log(avctx, AV_LOG_ERROR, "Warning keyframe interval too large! reducing it ...\n");
631 s->gop_size = avctx->gop_size;
632 s->rtp_mode = avctx->rtp_mode;
633 s->rtp_payload_size = avctx->rtp_payload_size;
634 if (avctx->rtp_callback)
635 s->rtp_callback = avctx->rtp_callback;
636 s->max_qdiff= avctx->max_qdiff;
637 s->qcompress= avctx->qcompress;
638 s->qblur= avctx->qblur;
640 s->flags= avctx->flags;
641 s->max_b_frames= avctx->max_b_frames;
642 s->b_frame_strategy= avctx->b_frame_strategy;
643 s->codec_id= avctx->codec->id;
644 s->luma_elim_threshold = avctx->luma_elim_threshold;
645 s->chroma_elim_threshold= avctx->chroma_elim_threshold;
646 s->strict_std_compliance= avctx->strict_std_compliance;
647 s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
648 s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0;
649 s->mpeg_quant= avctx->mpeg_quant;
651 if (s->gop_size <= 1) {
658 s->me_method = avctx->me_method;
661 s->fixed_qscale = (avctx->flags & CODEC_FLAG_QSCALE);
663 s->adaptive_quant= ( s->avctx->lumi_masking
664 || s->avctx->dark_masking
665 || s->avctx->temporal_cplx_masking
666 || s->avctx->spatial_cplx_masking
667 || s->avctx->p_masking
668 || (s->flags&CODEC_FLAG_QP_RD))
671 s->progressive_sequence= !(avctx->flags & CODEC_FLAG_INTERLACED_DCT);
673 if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4){
674 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
678 if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){
679 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
683 if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){
684 av_log(avctx, AV_LOG_ERROR, "data partitioning not supported by codec\n");
688 if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO){
689 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
693 if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too
694 av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supporetd by codec\n");
698 if((s->flags & CODEC_FLAG_CBP_RD) && !(s->flags & CODEC_FLAG_TRELLIS_QUANT)){
699 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
703 if((s->flags & CODEC_FLAG_QP_RD) && s->avctx->mb_decision != FF_MB_DECISION_RD){
704 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
708 if(s->codec_id==CODEC_ID_MJPEG){
709 s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x
710 s->inter_quant_bias= 0;
711 }else if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO){
712 s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x
713 s->inter_quant_bias= 0;
715 s->intra_quant_bias=0;
716 s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2)); //(a - x/4)/x
719 if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
720 s->intra_quant_bias= avctx->intra_quant_bias;
721 if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
722 s->inter_quant_bias= avctx->inter_quant_bias;
724 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
726 av_reduce(&s->time_increment_resolution, &dummy, s->avctx->frame_rate, s->avctx->frame_rate_base, (1<<16)-1);
727 s->time_increment_bits = av_log2(s->time_increment_resolution - 1) + 1;
729 switch(avctx->codec->id) {
730 case CODEC_ID_MPEG1VIDEO:
731 s->out_format = FMT_MPEG1;
732 s->low_delay= 0; //s->max_b_frames ? 0 : 1;
733 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
735 case CODEC_ID_MPEG2VIDEO:
736 s->out_format = FMT_MPEG1;
737 s->low_delay= 0; //s->max_b_frames ? 0 : 1;
738 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
739 s->rtp_mode= 1; // mpeg2 must have slices
740 if(s->rtp_payload_size == 0) s->rtp_payload_size= 256*256*256;
744 s->out_format = FMT_MJPEG;
745 s->intra_only = 1; /* force intra only for jpeg */
746 s->mjpeg_write_tables = 1; /* write all tables */
747 s->mjpeg_data_only_frames = 0; /* write all the needed headers */
748 s->mjpeg_vsample[0] = 1<<chroma_v_shift;
749 s->mjpeg_vsample[1] = 1;
750 s->mjpeg_vsample[2] = 1;
751 s->mjpeg_hsample[0] = 1<<chroma_h_shift;
752 s->mjpeg_hsample[1] = 1;
753 s->mjpeg_hsample[2] = 1;
754 if (mjpeg_init(s) < 0)
761 if (h263_get_picture_format(s->width, s->height) == 7) {
762 av_log(avctx, AV_LOG_INFO, "Input picture size isn't suitable for h263 codec! try h263+\n");
765 s->out_format = FMT_H263;
770 s->out_format = FMT_H263;
773 s->unrestricted_mv=(avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0;
774 s->h263_aic= (avctx->flags & CODEC_FLAG_H263P_AIC) ? 1:0;
776 /* These are just to be sure */
782 s->out_format = FMT_H263;
783 s->h263_flv = 2; /* format = 1; 11-bit codes */
784 s->unrestricted_mv = 1;
785 s->rtp_mode=0; /* don't allow GOB */
790 s->out_format = FMT_H263;
796 s->out_format = FMT_H263;
798 s->unrestricted_mv = 1;
799 s->low_delay= s->max_b_frames ? 0 : 1;
800 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
802 case CODEC_ID_MSMPEG4V1:
803 s->out_format = FMT_H263;
806 s->unrestricted_mv = 1;
807 s->msmpeg4_version= 1;
811 case CODEC_ID_MSMPEG4V2:
812 s->out_format = FMT_H263;
815 s->unrestricted_mv = 1;
816 s->msmpeg4_version= 2;
820 case CODEC_ID_MSMPEG4V3:
821 s->out_format = FMT_H263;
824 s->unrestricted_mv = 1;
825 s->msmpeg4_version= 3;
826 s->flipflop_rounding=1;
831 s->out_format = FMT_H263;
834 s->unrestricted_mv = 1;
835 s->msmpeg4_version= 4;
836 s->flipflop_rounding=1;
841 s->out_format = FMT_H263;
844 s->unrestricted_mv = 1;
845 s->msmpeg4_version= 5;
846 s->flipflop_rounding=1;
855 { /* set up some save defaults, some codecs might override them later */
861 default_mv_penalty= av_mallocz( sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1) );
862 memset(default_mv_penalty, 0, sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1));
863 memset(default_fcode_tab , 0, sizeof(uint8_t)*(2*MAX_MV+1));
865 for(i=-16; i<16; i++){
866 default_fcode_tab[i + MAX_MV]= 1;
870 s->me.mv_penalty= default_mv_penalty;
871 s->fcode_tab= default_fcode_tab;
873 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
875 /* dont use mv_penalty table for crap MV as it would be confused */
876 //FIXME remove after fixing / removing old ME
877 if (s->me_method < ME_EPZS) s->me.mv_penalty = default_mv_penalty;
882 if (MPV_common_init(s) < 0)
887 #ifdef CONFIG_ENCODERS
889 if (s->out_format == FMT_H263)
891 if(s->msmpeg4_version)
892 ff_msmpeg4_encode_init(s);
894 if (s->out_format == FMT_MPEG1)
895 ff_mpeg1_encode_init(s);
898 /* init default q matrix */
900 int j= s->dsp.idct_permutation[i];
902 if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
903 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
904 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
905 }else if(s->out_format == FMT_H263){
907 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
911 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
912 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
914 if(s->avctx->intra_matrix)
915 s->intra_matrix[j] = s->avctx->intra_matrix[i];
916 if(s->avctx->inter_matrix)
917 s->inter_matrix[j] = s->avctx->inter_matrix[i];
920 /* precompute matrix */
921 /* for mjpeg, we do include qscale in the matrix */
922 if (s->out_format != FMT_MJPEG) {
923 convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
924 s->intra_matrix, s->intra_quant_bias, 1, 31);
925 convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
926 s->inter_matrix, s->inter_quant_bias, 1, 31);
929 if(ff_rate_control_init(s) < 0)
932 s->picture_number = 0;
933 s->picture_in_gop_number = 0;
934 s->fake_picture_number = 0;
935 /* motion detector init */
942 int MPV_encode_end(AVCodecContext *avctx)
944 MpegEncContext *s = avctx->priv_data;
950 ff_rate_control_uninit(s);
953 if (s->out_format == FMT_MJPEG)
956 av_freep(&avctx->extradata);
961 #endif //CONFIG_ENCODERS
963 void init_rl(RLTable *rl)
965 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
966 uint8_t index_run[MAX_RUN+1];
967 int last, run, level, start, end, i;
969 /* compute max_level[], max_run[] and index_run[] */
970 for(last=0;last<2;last++) {
979 memset(max_level, 0, MAX_RUN + 1);
980 memset(max_run, 0, MAX_LEVEL + 1);
981 memset(index_run, rl->n, MAX_RUN + 1);
982 for(i=start;i<end;i++) {
983 run = rl->table_run[i];
984 level = rl->table_level[i];
985 if (index_run[run] == rl->n)
987 if (level > max_level[run])
988 max_level[run] = level;
989 if (run > max_run[level])
990 max_run[level] = run;
992 rl->max_level[last] = av_malloc(MAX_RUN + 1);
993 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
994 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
995 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
996 rl->index_run[last] = av_malloc(MAX_RUN + 1);
997 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1001 /* draw the edges of width 'w' of an image of size width, height */
1002 //FIXME check that this is ok for mpeg4 interlaced
1003 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w)
1005 uint8_t *ptr, *last_line;
1008 last_line = buf + (height - 1) * wrap;
1010 /* top and bottom */
1011 memcpy(buf - (i + 1) * wrap, buf, width);
1012 memcpy(last_line + (i + 1) * wrap, last_line, width);
1014 /* left and right */
1016 for(i=0;i<height;i++) {
1017 memset(ptr - w, ptr[0], w);
1018 memset(ptr + width, ptr[width-1], w);
1023 memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
1024 memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
1025 memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
1026 memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
1030 int ff_find_unused_picture(MpegEncContext *s, int shared){
1034 for(i=0; i<MAX_PICTURE_COUNT; i++){
1035 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
1038 for(i=0; i<MAX_PICTURE_COUNT; i++){
1039 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
1041 for(i=0; i<MAX_PICTURE_COUNT; i++){
1042 if(s->picture[i].data[0]==NULL) return i;
1050 static void update_noise_reduction(MpegEncContext *s){
1053 for(intra=0; intra<2; intra++){
1054 if(s->dct_count[intra] > (1<<16)){
1055 for(i=0; i<64; i++){
1056 s->dct_error_sum[intra][i] >>=1;
1058 s->dct_count[intra] >>= 1;
1061 for(i=0; i<64; i++){
1062 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1068 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1070 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1076 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1078 /* mark&release old frames */
1079 if (s->pict_type != B_TYPE && s->last_picture_ptr && s->last_picture_ptr->data[0]) {
1080 avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
1082 /* release forgotten pictures */
1083 /* if(mpeg124/h263) */
1085 for(i=0; i<MAX_PICTURE_COUNT; i++){
1086 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
1087 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1088 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
1095 /* release non refernce frames */
1096 for(i=0; i<MAX_PICTURE_COUNT; i++){
1097 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1098 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1102 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
1103 pic= (AVFrame*)s->current_picture_ptr; //we allready have a unused image (maybe it was set before reading the header)
1105 i= ff_find_unused_picture(s, 0);
1106 pic= (AVFrame*)&s->picture[i];
1109 pic->reference= s->pict_type != B_TYPE ? 3 : 0;
1111 if(s->current_picture_ptr) //FIXME broken, we need a coded_picture_number in MpegEncContext
1112 pic->coded_picture_number= s->current_picture_ptr->coded_picture_number+1;
1114 if( alloc_picture(s, (Picture*)pic, 0) < 0)
1117 s->current_picture_ptr= (Picture*)pic;
1120 s->current_picture_ptr->pict_type= s->pict_type;
1121 // if(s->flags && CODEC_FLAG_QSCALE)
1122 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1123 s->current_picture_ptr->key_frame= s->pict_type == I_TYPE;
1125 copy_picture(&s->current_picture, s->current_picture_ptr);
1127 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1128 if (s->pict_type != B_TYPE) {
1129 s->last_picture_ptr= s->next_picture_ptr;
1130 s->next_picture_ptr= s->current_picture_ptr;
1133 if(s->last_picture_ptr) copy_picture(&s->last_picture, s->last_picture_ptr);
1134 if(s->next_picture_ptr) copy_picture(&s->next_picture, s->next_picture_ptr);
1136 if(s->pict_type != I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL)){
1137 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1138 assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference
1142 assert(s->pict_type == I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1144 if(s->picture_structure!=PICT_FRAME){
1147 if(s->picture_structure == PICT_BOTTOM_FIELD){
1148 s->current_picture.data[i] += s->current_picture.linesize[i];
1150 s->current_picture.linesize[i] *= 2;
1151 s->last_picture.linesize[i] *=2;
1152 s->next_picture.linesize[i] *=2;
1157 s->hurry_up= s->avctx->hurry_up;
1158 s->error_resilience= avctx->error_resilience;
1160 /* set dequantizer, we cant do it during init as it might change for mpeg4
1161 and we cant do it in the header decode as init isnt called for mpeg4 there yet */
1162 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO)
1163 s->dct_unquantize = s->dct_unquantize_mpeg2;
1164 else if(s->out_format == FMT_H263)
1165 s->dct_unquantize = s->dct_unquantize_h263;
1167 s->dct_unquantize = s->dct_unquantize_mpeg1;
1169 if(s->dct_error_sum){
1170 assert(s->avctx->noise_reduction && s->encoding);
1172 update_noise_reduction(s);
1176 if(s->avctx->xvmc_acceleration)
1177 return XVMC_field_start(s, avctx);
1182 /* generic function for encode/decode called after a frame has been coded/decoded */
1183 void MPV_frame_end(MpegEncContext *s)
1186 /* draw edge for correct motion prediction if outside */
1188 //just to make sure that all data is rendered.
1189 if(s->avctx->xvmc_acceleration){
1193 if(s->unrestricted_mv && s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1194 draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
1195 draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1196 draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1200 s->last_pict_type = s->pict_type;
1201 if(s->pict_type!=B_TYPE){
1202 s->last_non_b_pict_type= s->pict_type;
1205 /* copy back current_picture variables */
1206 for(i=0; i<MAX_PICTURE_COUNT; i++){
1207 if(s->picture[i].data[0] == s->current_picture.data[0]){
1208 s->picture[i]= s->current_picture;
1212 assert(i<MAX_PICTURE_COUNT);
1216 /* release non refernce frames */
1217 for(i=0; i<MAX_PICTURE_COUNT; i++){
1218 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1219 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1223 // clear copies, to avoid confusion
1225 memset(&s->last_picture, 0, sizeof(Picture));
1226 memset(&s->next_picture, 0, sizeof(Picture));
1227 memset(&s->current_picture, 0, sizeof(Picture));
1232 * draws an line from (ex, ey) -> (sx, sy).
1233 * @param w width of the image
1234 * @param h height of the image
1235 * @param stride stride/linesize of the image
1236 * @param color color of the arrow
1238 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1241 sx= clip(sx, 0, w-1);
1242 sy= clip(sy, 0, h-1);
1243 ex= clip(ex, 0, w-1);
1244 ey= clip(ey, 0, h-1);
1246 buf[sy*stride + sx]+= color;
1248 if(ABS(ex - sx) > ABS(ey - sy)){
1253 buf+= sx + sy*stride;
1255 f= ((ey-sy)<<16)/ex;
1256 for(x= 0; x <= ex; x++){
1257 y= ((x*f) + (1<<15))>>16;
1258 buf[y*stride + x]+= color;
1265 buf+= sx + sy*stride;
1267 if(ey) f= ((ex-sx)<<16)/ey;
1269 for(y= 0; y <= ey; y++){
1270 x= ((y*f) + (1<<15))>>16;
1271 buf[y*stride + x]+= color;
1277 * draws an arrow from (ex, ey) -> (sx, sy).
1278 * @param w width of the image
1279 * @param h height of the image
1280 * @param stride stride/linesize of the image
1281 * @param color color of the arrow
1283 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1286 sx= clip(sx, -100, w+100);
1287 sy= clip(sy, -100, h+100);
1288 ex= clip(ex, -100, w+100);
1289 ey= clip(ey, -100, h+100);
1294 if(dx*dx + dy*dy > 3*3){
1297 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1299 //FIXME subpixel accuracy
1300 rx= ROUNDED_DIV(rx*3<<4, length);
1301 ry= ROUNDED_DIV(ry*3<<4, length);
1303 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1304 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1306 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1310 * prints debuging info for the given picture.
1312 void ff_print_debug_info(MpegEncContext *s, Picture *pict){
1314 if(!pict || !pict->mb_type) return;
1316 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1319 for(y=0; y<s->mb_height; y++){
1320 for(x=0; x<s->mb_width; x++){
1321 if(s->avctx->debug&FF_DEBUG_SKIP){
1322 int count= s->mbskip_table[x + y*s->mb_stride];
1323 if(count>9) count=9;
1324 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1326 if(s->avctx->debug&FF_DEBUG_QP){
1327 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1329 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1330 int mb_type= pict->mb_type[x + y*s->mb_stride];
1332 //Type & MV direction
1334 av_log(s->avctx, AV_LOG_DEBUG, "P");
1335 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1336 av_log(s->avctx, AV_LOG_DEBUG, "A");
1337 else if(IS_INTRA4x4(mb_type))
1338 av_log(s->avctx, AV_LOG_DEBUG, "i");
1339 else if(IS_INTRA16x16(mb_type))
1340 av_log(s->avctx, AV_LOG_DEBUG, "I");
1341 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1342 av_log(s->avctx, AV_LOG_DEBUG, "d");
1343 else if(IS_DIRECT(mb_type))
1344 av_log(s->avctx, AV_LOG_DEBUG, "D");
1345 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1346 av_log(s->avctx, AV_LOG_DEBUG, "g");
1347 else if(IS_GMC(mb_type))
1348 av_log(s->avctx, AV_LOG_DEBUG, "G");
1349 else if(IS_SKIP(mb_type))
1350 av_log(s->avctx, AV_LOG_DEBUG, "S");
1351 else if(!USES_LIST(mb_type, 1))
1352 av_log(s->avctx, AV_LOG_DEBUG, ">");
1353 else if(!USES_LIST(mb_type, 0))
1354 av_log(s->avctx, AV_LOG_DEBUG, "<");
1356 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1357 av_log(s->avctx, AV_LOG_DEBUG, "X");
1362 av_log(s->avctx, AV_LOG_DEBUG, "+");
1363 else if(IS_16X8(mb_type))
1364 av_log(s->avctx, AV_LOG_DEBUG, "-");
1365 else if(IS_8X16(mb_type))
1366 av_log(s->avctx, AV_LOG_DEBUG, "¦");
1367 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1368 av_log(s->avctx, AV_LOG_DEBUG, " ");
1370 av_log(s->avctx, AV_LOG_DEBUG, "?");
1373 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
1374 av_log(s->avctx, AV_LOG_DEBUG, "=");
1376 av_log(s->avctx, AV_LOG_DEBUG, " ");
1378 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1380 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1384 if((s->avctx->debug&FF_DEBUG_VIS_MV) && s->motion_val){
1385 const int shift= 1 + s->quarter_sample;
1387 uint8_t *ptr= pict->data[0];
1388 s->low_delay=0; //needed to see the vectors without trashing the buffers
1390 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1392 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1393 const int mb_index= mb_x + mb_y*s->mb_stride;
1394 if(IS_8X8(s->current_picture.mb_type[mb_index])){
1397 int sx= mb_x*16 + 4 + 8*(i&1);
1398 int sy= mb_y*16 + 4 + 8*(i>>1);
1399 int xy= 1 + mb_x*2 + (i&1) + (mb_y*2 + 1 + (i>>1))*(s->mb_width*2 + 2);
1400 int mx= (s->motion_val[xy][0]>>shift) + sx;
1401 int my= (s->motion_val[xy][1]>>shift) + sy;
1402 draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
1405 int sx= mb_x*16 + 8;
1406 int sy= mb_y*16 + 8;
1407 int xy= 1 + mb_x*2 + (mb_y*2 + 1)*(s->mb_width*2 + 2);
1408 int mx= (s->motion_val[xy][0]>>shift) + sx;
1409 int my= (s->motion_val[xy][1]>>shift) + sy;
1410 draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
1412 s->mbskip_table[mb_index]=0;
1418 #ifdef CONFIG_ENCODERS
1420 static int get_sae(uint8_t *src, int ref, int stride){
1424 for(y=0; y<16; y++){
1425 for(x=0; x<16; x++){
1426 acc+= ABS(src[x+y*stride] - ref);
1433 static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){
1440 for(y=0; y<h; y+=16){
1441 for(x=0; x<w; x+=16){
1442 int offset= x + y*stride;
1443 int sad = s->dsp.pix_abs16x16(src + offset, ref + offset, stride);
1444 int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8;
1445 int sae = get_sae(src + offset, mean, stride);
1447 acc+= sae + 500 < sad;
1454 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
1457 const int encoding_delay= s->max_b_frames;
1461 if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
1462 if(pic_arg->linesize[0] != s->linesize) direct=0;
1463 if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
1464 if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
1466 // av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
1469 i= ff_find_unused_picture(s, 1);
1471 pic= (AVFrame*)&s->picture[i];
1475 pic->data[i]= pic_arg->data[i];
1476 pic->linesize[i]= pic_arg->linesize[i];
1478 alloc_picture(s, (Picture*)pic, 1);
1481 i= ff_find_unused_picture(s, 0);
1483 pic= (AVFrame*)&s->picture[i];
1486 alloc_picture(s, (Picture*)pic, 0);
1488 if( pic->data[0] + offset == pic_arg->data[0]
1489 && pic->data[1] + offset == pic_arg->data[1]
1490 && pic->data[2] + offset == pic_arg->data[2]){
1493 int h_chroma_shift, v_chroma_shift;
1494 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1497 int src_stride= pic_arg->linesize[i];
1498 int dst_stride= i ? s->uvlinesize : s->linesize;
1499 int h_shift= i ? h_chroma_shift : 0;
1500 int v_shift= i ? v_chroma_shift : 0;
1501 int w= s->width >>h_shift;
1502 int h= s->height>>v_shift;
1503 uint8_t *src= pic_arg->data[i];
1504 uint8_t *dst= pic->data[i] + offset;
1506 if(src_stride==dst_stride)
1507 memcpy(dst, src, src_stride*h);
1510 memcpy(dst, src, w);
1518 pic->quality= pic_arg->quality;
1519 pic->pict_type= pic_arg->pict_type;
1520 pic->pts = pic_arg->pts;
1522 if(s->input_picture[encoding_delay])
1523 pic->display_picture_number= s->input_picture[encoding_delay]->display_picture_number + 1;
1527 /* shift buffer entries */
1528 for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++)
1529 s->input_picture[i-1]= s->input_picture[i];
1531 s->input_picture[encoding_delay]= (Picture*)pic;
1536 static void select_input_picture(MpegEncContext *s){
1538 int coded_pic_num=0;
1540 if(s->reordered_input_picture[0])
1541 coded_pic_num= s->reordered_input_picture[0]->coded_picture_number + 1;
1543 for(i=1; i<MAX_PICTURE_COUNT; i++)
1544 s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
1545 s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
1547 /* set next picture types & ordering */
1548 if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
1549 if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
1550 s->reordered_input_picture[0]= s->input_picture[0];
1551 s->reordered_input_picture[0]->pict_type= I_TYPE;
1552 s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
1556 if(s->flags&CODEC_FLAG_PASS2){
1557 for(i=0; i<s->max_b_frames+1; i++){
1558 int pict_num= s->input_picture[0]->display_picture_number + i;
1559 int pict_type= s->rc_context.entry[pict_num].new_pict_type;
1560 s->input_picture[i]->pict_type= pict_type;
1562 if(i + 1 >= s->rc_context.num_entries) break;
1566 if(s->input_picture[0]->pict_type){
1567 /* user selected pict_type */
1568 for(b_frames=0; b_frames<s->max_b_frames+1; b_frames++){
1569 if(s->input_picture[b_frames]->pict_type!=B_TYPE) break;
1572 if(b_frames > s->max_b_frames){
1573 av_log(s->avctx, AV_LOG_ERROR, "warning, too many bframes in a row\n");
1574 b_frames = s->max_b_frames;
1576 }else if(s->b_frame_strategy==0){
1577 b_frames= s->max_b_frames;
1578 while(b_frames && !s->input_picture[b_frames]) b_frames--;
1579 }else if(s->b_frame_strategy==1){
1580 for(i=1; i<s->max_b_frames+1; i++){
1581 if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){
1582 s->input_picture[i]->b_frame_score=
1583 get_intra_count(s, s->input_picture[i ]->data[0],
1584 s->input_picture[i-1]->data[0], s->linesize) + 1;
1587 for(i=0; i<s->max_b_frames; i++){
1588 if(s->input_picture[i]==NULL || s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break;
1591 b_frames= FFMAX(0, i-1);
1594 for(i=0; i<b_frames+1; i++){
1595 s->input_picture[i]->b_frame_score=0;
1598 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1603 //static int b_count=0;
1604 //b_count+= b_frames;
1605 //av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
1607 s->reordered_input_picture[0]= s->input_picture[b_frames];
1608 if( s->picture_in_gop_number + b_frames >= s->gop_size
1609 || s->reordered_input_picture[0]->pict_type== I_TYPE)
1610 s->reordered_input_picture[0]->pict_type= I_TYPE;
1612 s->reordered_input_picture[0]->pict_type= P_TYPE;
1613 s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
1614 for(i=0; i<b_frames; i++){
1616 s->reordered_input_picture[i+1]= s->input_picture[i];
1617 s->reordered_input_picture[i+1]->pict_type= B_TYPE;
1618 s->reordered_input_picture[i+1]->coded_picture_number= coded_pic_num;
1623 if(s->reordered_input_picture[0]){
1624 s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0;
1626 copy_picture(&s->new_picture, s->reordered_input_picture[0]);
1628 if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
1629 // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable
1631 int i= ff_find_unused_picture(s, 0);
1632 Picture *pic= &s->picture[i];
1634 /* mark us unused / free shared pic */
1636 s->reordered_input_picture[0]->data[i]= NULL;
1637 s->reordered_input_picture[0]->type= 0;
1639 //FIXME bad, copy * except
1640 pic->pict_type = s->reordered_input_picture[0]->pict_type;
1641 pic->quality = s->reordered_input_picture[0]->quality;
1642 pic->coded_picture_number = s->reordered_input_picture[0]->coded_picture_number;
1643 pic->reference = s->reordered_input_picture[0]->reference;
1644 pic->pts = s->reordered_input_picture[0]->pts;
1646 alloc_picture(s, pic, 0);
1648 s->current_picture_ptr= pic;
1650 // input is not a shared pix -> reuse buffer for current_pix
1652 assert( s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER
1653 || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
1655 s->current_picture_ptr= s->reordered_input_picture[0];
1657 s->new_picture.data[i]+=16;
1660 copy_picture(&s->current_picture, s->current_picture_ptr);
1662 s->picture_number= s->new_picture.display_picture_number;
1663 //printf("dpn:%d\n", s->picture_number);
1665 memset(&s->new_picture, 0, sizeof(Picture));
1669 int MPV_encode_picture(AVCodecContext *avctx,
1670 unsigned char *buf, int buf_size, void *data)
1672 MpegEncContext *s = avctx->priv_data;
1673 AVFrame *pic_arg = data;
1676 if(avctx->pix_fmt != PIX_FMT_YUV420P){
1677 av_log(avctx, AV_LOG_ERROR, "this codec supports only YUV420P\n");
1681 init_put_bits(&s->pb, buf, buf_size);
1683 s->picture_in_gop_number++;
1685 load_input_picture(s, pic_arg);
1687 select_input_picture(s);
1690 if(s->new_picture.data[0]){
1692 s->pict_type= s->new_picture.pict_type;
1694 //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
1695 MPV_frame_start(s, avctx);
1697 encode_picture(s, s->picture_number);
1699 avctx->real_pict_num = s->picture_number;
1700 avctx->header_bits = s->header_bits;
1701 avctx->mv_bits = s->mv_bits;
1702 avctx->misc_bits = s->misc_bits;
1703 avctx->i_tex_bits = s->i_tex_bits;
1704 avctx->p_tex_bits = s->p_tex_bits;
1705 avctx->i_count = s->i_count;
1706 avctx->p_count = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx
1707 avctx->skip_count = s->skip_count;
1711 if (s->out_format == FMT_MJPEG)
1712 mjpeg_picture_trailer(s);
1714 if(s->flags&CODEC_FLAG_PASS1)
1715 ff_write_pass1_stats(s);
1718 avctx->error[i] += s->current_picture_ptr->error[i];
1722 s->input_picture_number++;
1724 flush_put_bits(&s->pb);
1725 s->frame_bits = (pbBufPtr(&s->pb) - s->pb.buf) * 8;
1727 s->total_bits += s->frame_bits;
1728 avctx->frame_bits = s->frame_bits;
1730 return pbBufPtr(&s->pb) - s->pb.buf;
1733 #endif //CONFIG_ENCODERS
1735 static inline void gmc1_motion(MpegEncContext *s,
1736 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1738 uint8_t **ref_picture, int src_offset)
1741 int offset, src_x, src_y, linesize, uvlinesize;
1742 int motion_x, motion_y;
1745 motion_x= s->sprite_offset[0][0];
1746 motion_y= s->sprite_offset[0][1];
1747 src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
1748 src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
1749 motion_x<<=(3-s->sprite_warping_accuracy);
1750 motion_y<<=(3-s->sprite_warping_accuracy);
1751 src_x = clip(src_x, -16, s->width);
1752 if (src_x == s->width)
1754 src_y = clip(src_y, -16, s->height);
1755 if (src_y == s->height)
1758 linesize = s->linesize;
1759 uvlinesize = s->uvlinesize;
1761 ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
1763 dest_y+=dest_offset;
1764 if(s->flags&CODEC_FLAG_EMU_EDGE){
1765 if( (unsigned)src_x >= s->h_edge_pos - 17
1766 || (unsigned)src_y >= s->v_edge_pos - 17){
1767 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1768 ptr= s->edge_emu_buffer;
1772 if((motion_x|motion_y)&7){
1773 s->dsp.gmc1(dest_y , ptr , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
1774 s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
1778 dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
1779 if (s->no_rounding){
1780 s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
1782 s->dsp.put_pixels_tab [0][dxy](dest_y, ptr, linesize, 16);
1786 if(s->flags&CODEC_FLAG_GRAY) return;
1788 motion_x= s->sprite_offset[1][0];
1789 motion_y= s->sprite_offset[1][1];
1790 src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
1791 src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
1792 motion_x<<=(3-s->sprite_warping_accuracy);
1793 motion_y<<=(3-s->sprite_warping_accuracy);
1794 src_x = clip(src_x, -8, s->width>>1);
1795 if (src_x == s->width>>1)
1797 src_y = clip(src_y, -8, s->height>>1);
1798 if (src_y == s->height>>1)
1801 offset = (src_y * uvlinesize) + src_x + (src_offset>>1);
1802 ptr = ref_picture[1] + offset;
1803 if(s->flags&CODEC_FLAG_EMU_EDGE){
1804 if( (unsigned)src_x >= (s->h_edge_pos>>1) - 9
1805 || (unsigned)src_y >= (s->v_edge_pos>>1) - 9){
1806 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1807 ptr= s->edge_emu_buffer;
1811 s->dsp.gmc1(dest_cb + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
1813 ptr = ref_picture[2] + offset;
1815 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1816 ptr= s->edge_emu_buffer;
1818 s->dsp.gmc1(dest_cr + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
1823 static inline void gmc_motion(MpegEncContext *s,
1824 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1826 uint8_t **ref_picture, int src_offset)
1829 int linesize, uvlinesize;
1830 const int a= s->sprite_warping_accuracy;
1833 linesize = s->linesize;
1834 uvlinesize = s->uvlinesize;
1836 ptr = ref_picture[0] + src_offset;
1838 dest_y+=dest_offset;
1840 ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16;
1841 oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16;
1843 s->dsp.gmc(dest_y, ptr, linesize, 16,
1846 s->sprite_delta[0][0], s->sprite_delta[0][1],
1847 s->sprite_delta[1][0], s->sprite_delta[1][1],
1848 a+1, (1<<(2*a+1)) - s->no_rounding,
1849 s->h_edge_pos, s->v_edge_pos);
1850 s->dsp.gmc(dest_y+8, ptr, linesize, 16,
1851 ox + s->sprite_delta[0][0]*8,
1852 oy + s->sprite_delta[1][0]*8,
1853 s->sprite_delta[0][0], s->sprite_delta[0][1],
1854 s->sprite_delta[1][0], s->sprite_delta[1][1],
1855 a+1, (1<<(2*a+1)) - s->no_rounding,
1856 s->h_edge_pos, s->v_edge_pos);
1858 if(s->flags&CODEC_FLAG_GRAY) return;
1861 dest_cb+=dest_offset>>1;
1862 dest_cr+=dest_offset>>1;
1864 ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8;
1865 oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8;
1867 ptr = ref_picture[1] + (src_offset>>1);
1868 s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
1871 s->sprite_delta[0][0], s->sprite_delta[0][1],
1872 s->sprite_delta[1][0], s->sprite_delta[1][1],
1873 a+1, (1<<(2*a+1)) - s->no_rounding,
1874 s->h_edge_pos>>1, s->v_edge_pos>>1);
1876 ptr = ref_picture[2] + (src_offset>>1);
1877 s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
1880 s->sprite_delta[0][0], s->sprite_delta[0][1],
1881 s->sprite_delta[1][0], s->sprite_delta[1][1],
1882 a+1, (1<<(2*a+1)) - s->no_rounding,
1883 s->h_edge_pos>>1, s->v_edge_pos>>1);
1887 * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples.
1888 * @param buf destination buffer
1889 * @param src source buffer
1890 * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
1891 * @param block_w width of block
1892 * @param block_h height of block
1893 * @param src_x x coordinate of the top left sample of the block in the source buffer
1894 * @param src_y y coordinate of the top left sample of the block in the source buffer
1895 * @param w width of the source buffer
1896 * @param h height of the source buffer
1898 void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h,
1899 int src_x, int src_y, int w, int h){
1901 int start_y, start_x, end_y, end_x;
1904 src+= (h-1-src_y)*linesize;
1906 }else if(src_y<=-block_h){
1907 src+= (1-block_h-src_y)*linesize;
1913 }else if(src_x<=-block_w){
1914 src+= (1-block_w-src_x);
1918 start_y= FFMAX(0, -src_y);
1919 start_x= FFMAX(0, -src_x);
1920 end_y= FFMIN(block_h, h-src_y);
1921 end_x= FFMIN(block_w, w-src_x);
1923 // copy existing part
1924 for(y=start_y; y<end_y; y++){
1925 for(x=start_x; x<end_x; x++){
1926 buf[x + y*linesize]= src[x + y*linesize];
1931 for(y=0; y<start_y; y++){
1932 for(x=start_x; x<end_x; x++){
1933 buf[x + y*linesize]= buf[x + start_y*linesize];
1938 for(y=end_y; y<block_h; y++){
1939 for(x=start_x; x<end_x; x++){
1940 buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
1944 for(y=0; y<block_h; y++){
1946 for(x=0; x<start_x; x++){
1947 buf[x + y*linesize]= buf[start_x + y*linesize];
1951 for(x=end_x; x<block_w; x++){
1952 buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
1958 /* apply one mpeg motion vector to the three components */
1959 static inline void mpeg_motion(MpegEncContext *s,
1960 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1962 uint8_t **ref_picture, int src_offset,
1963 int field_based, op_pixels_func (*pix_op)[4],
1964 int motion_x, int motion_y, int h)
1967 int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1970 if(s->quarter_sample)
1976 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
1977 src_x = s->mb_x * 16 + (motion_x >> 1);
1978 src_y = s->mb_y * (16 >> field_based) + (motion_y >> 1);
1980 /* WARNING: do no forget half pels */
1981 height = s->height >> field_based;
1982 v_edge_pos = s->v_edge_pos >> field_based;
1983 src_x = clip(src_x, -16, s->width);
1984 if (src_x == s->width)
1986 src_y = clip(src_y, -16, height);
1987 if (src_y == height)
1989 linesize = s->current_picture.linesize[0] << field_based;
1990 uvlinesize = s->current_picture.linesize[1] << field_based;
1991 ptr = ref_picture[0] + (src_y * linesize) + (src_x) + src_offset;
1992 dest_y += dest_offset;
1994 if(s->unrestricted_mv && (s->flags&CODEC_FLAG_EMU_EDGE)){
1995 if( (unsigned)src_x > s->h_edge_pos - (motion_x&1) - 16
1996 || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){
1997 ff_emulated_edge_mc(s->edge_emu_buffer, ptr - src_offset, s->linesize, 17, 17+field_based, //FIXME linesize? and uv below
1998 src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
1999 ptr= s->edge_emu_buffer + src_offset;
2003 pix_op[0][dxy](dest_y, ptr, linesize, h);
2005 if(s->flags&CODEC_FLAG_GRAY) return;
2007 if (s->out_format == FMT_H263) {
2009 if ((motion_x & 3) != 0)
2011 if ((motion_y & 3) != 0)
2018 dxy = ((my & 1) << 1) | (mx & 1);
2023 src_x = s->mb_x * 8 + mx;
2024 src_y = s->mb_y * (8 >> field_based) + my;
2025 src_x = clip(src_x, -8, s->width >> 1);
2026 if (src_x == (s->width >> 1))
2028 src_y = clip(src_y, -8, height >> 1);
2029 if (src_y == (height >> 1))
2031 offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
2032 ptr = ref_picture[1] + offset;
2034 ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based,
2035 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2036 ptr= s->edge_emu_buffer + (src_offset >> 1);
2038 pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
2040 ptr = ref_picture[2] + offset;
2042 ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based,
2043 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2044 ptr= s->edge_emu_buffer + (src_offset >> 1);
2046 pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
2049 static inline void qpel_motion(MpegEncContext *s,
2050 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2052 uint8_t **ref_picture, int src_offset,
2053 int field_based, op_pixels_func (*pix_op)[4],
2054 qpel_mc_func (*qpix_op)[16],
2055 int motion_x, int motion_y, int h)
2058 int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
2061 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
2062 src_x = s->mb_x * 16 + (motion_x >> 2);
2063 src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
2065 height = s->height >> field_based;
2066 v_edge_pos = s->v_edge_pos >> field_based;
2067 src_x = clip(src_x, -16, s->width);
2068 if (src_x == s->width)
2070 src_y = clip(src_y, -16, height);
2071 if (src_y == height)
2073 linesize = s->linesize << field_based;
2074 uvlinesize = s->uvlinesize << field_based;
2075 ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
2076 dest_y += dest_offset;
2077 //printf("%d %d %d\n", src_x, src_y, dxy);
2079 if(s->flags&CODEC_FLAG_EMU_EDGE){
2080 if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 16
2081 || (unsigned)src_y > v_edge_pos - (motion_y&3) - h ){
2082 ff_emulated_edge_mc(s->edge_emu_buffer, ptr - src_offset, s->linesize, 17, 17+field_based,
2083 src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
2084 ptr= s->edge_emu_buffer + src_offset;
2089 qpix_op[0][dxy](dest_y, ptr, linesize);
2091 //damn interlaced mode
2092 //FIXME boundary mirroring is not exactly correct here
2093 qpix_op[1][dxy](dest_y , ptr , linesize);
2094 qpix_op[1][dxy](dest_y+8, ptr+8, linesize);
2097 if(s->flags&CODEC_FLAG_GRAY) return;
2102 }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA2){
2103 static const int rtab[8]= {0,0,1,1,0,0,0,1};
2104 mx= (motion_x>>1) + rtab[motion_x&7];
2105 my= (motion_y>>1) + rtab[motion_y&7];
2106 }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){
2107 mx= (motion_x>>1)|(motion_x&1);
2108 my= (motion_y>>1)|(motion_y&1);
2116 dxy= (mx&1) | ((my&1)<<1);
2120 src_x = s->mb_x * 8 + mx;
2121 src_y = s->mb_y * (8 >> field_based) + my;
2122 src_x = clip(src_x, -8, s->width >> 1);
2123 if (src_x == (s->width >> 1))
2125 src_y = clip(src_y, -8, height >> 1);
2126 if (src_y == (height >> 1))
2129 offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
2130 ptr = ref_picture[1] + offset;
2132 ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based,
2133 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2134 ptr= s->edge_emu_buffer + (src_offset >> 1);
2136 pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
2138 ptr = ref_picture[2] + offset;
2140 ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based,
2141 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2142 ptr= s->edge_emu_buffer + (src_offset >> 1);
2144 pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
2147 inline int ff_h263_round_chroma(int x){
2149 return (h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
2152 return -(h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
2157 * motion compesation of a single macroblock
2159 * @param dest_y luma destination pointer
2160 * @param dest_cb chroma cb/u destination pointer
2161 * @param dest_cr chroma cr/v destination pointer
2162 * @param dir direction (0->forward, 1->backward)
2163 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2164 * @param pic_op halfpel motion compensation function (average or put normally)
2165 * @param pic_op qpel motion compensation function (average or put normally)
2166 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2168 static inline void MPV_motion(MpegEncContext *s,
2169 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2170 int dir, uint8_t **ref_picture,
2171 op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16])
2173 int dxy, offset, mx, my, src_x, src_y, motion_x, motion_y;
2175 uint8_t *ptr, *dest;
2181 switch(s->mv_type) {
2185 if(s->real_sprite_warping_points==1){
2186 gmc1_motion(s, dest_y, dest_cb, dest_cr, 0,
2189 gmc_motion(s, dest_y, dest_cb, dest_cr, 0,
2192 }else if(s->quarter_sample){
2193 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
2196 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2198 ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
2199 ref_picture, pix_op,
2200 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2204 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2207 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2213 if(s->quarter_sample){
2215 motion_x = s->mv[dir][i][0];
2216 motion_y = s->mv[dir][i][1];
2218 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
2219 src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
2220 src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
2222 /* WARNING: do no forget half pels */
2223 src_x = clip(src_x, -16, s->width);
2224 if (src_x == s->width)
2226 src_y = clip(src_y, -16, s->height);
2227 if (src_y == s->height)
2230 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
2231 if(s->flags&CODEC_FLAG_EMU_EDGE){
2232 if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 8
2233 || (unsigned)src_y > s->v_edge_pos - (motion_y&3) - 8 ){
2234 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
2235 ptr= s->edge_emu_buffer;
2238 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
2239 qpix_op[1][dxy](dest, ptr, s->linesize);
2241 mx += s->mv[dir][i][0]/2;
2242 my += s->mv[dir][i][1]/2;
2246 motion_x = s->mv[dir][i][0];
2247 motion_y = s->mv[dir][i][1];
2249 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
2250 src_x = mb_x * 16 + (motion_x >> 1) + (i & 1) * 8;
2251 src_y = mb_y * 16 + (motion_y >> 1) + (i >>1) * 8;
2253 /* WARNING: do no forget half pels */
2254 src_x = clip(src_x, -16, s->width);
2255 if (src_x == s->width)
2257 src_y = clip(src_y, -16, s->height);
2258 if (src_y == s->height)
2261 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
2262 if(s->flags&CODEC_FLAG_EMU_EDGE){
2263 if( (unsigned)src_x > s->h_edge_pos - (motion_x&1) - 8
2264 || (unsigned)src_y > s->v_edge_pos - (motion_y&1) - 8){
2265 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
2266 ptr= s->edge_emu_buffer;
2269 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
2270 pix_op[1][dxy](dest, ptr, s->linesize, 8);
2272 mx += s->mv[dir][i][0];
2273 my += s->mv[dir][i][1];
2277 if(s->flags&CODEC_FLAG_GRAY) break;
2278 /* In case of 8X8, we construct a single chroma motion vector
2279 with a special rounding */
2280 mx= ff_h263_round_chroma(mx);
2281 my= ff_h263_round_chroma(my);
2282 dxy = ((my & 1) << 1) | (mx & 1);
2286 src_x = mb_x * 8 + mx;
2287 src_y = mb_y * 8 + my;
2288 src_x = clip(src_x, -8, s->width/2);
2289 if (src_x == s->width/2)
2291 src_y = clip(src_y, -8, s->height/2);
2292 if (src_y == s->height/2)
2295 offset = (src_y * (s->uvlinesize)) + src_x;
2296 ptr = ref_picture[1] + offset;
2297 if(s->flags&CODEC_FLAG_EMU_EDGE){
2298 if( (unsigned)src_x > (s->h_edge_pos>>1) - (dxy &1) - 8
2299 || (unsigned)src_y > (s->v_edge_pos>>1) - (dxy>>1) - 8){
2300 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2301 ptr= s->edge_emu_buffer;
2305 pix_op[1][dxy](dest_cb, ptr, s->uvlinesize, 8);
2307 ptr = ref_picture[2] + offset;
2309 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2310 ptr= s->edge_emu_buffer;
2312 pix_op[1][dxy](dest_cr, ptr, s->uvlinesize, 8);
2315 if (s->picture_structure == PICT_FRAME) {
2316 if(s->quarter_sample){
2318 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
2319 ref_picture, s->field_select[dir][0] ? s->linesize : 0,
2321 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2323 qpel_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2324 ref_picture, s->field_select[dir][1] ? s->linesize : 0,
2326 s->mv[dir][1][0], s->mv[dir][1][1], 8);
2329 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2330 ref_picture, s->field_select[dir][0] ? s->linesize : 0,
2332 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2334 mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2335 ref_picture, s->field_select[dir][1] ? s->linesize : 0,
2337 s->mv[dir][1][0], s->mv[dir][1][1], 8);
2341 if(s->picture_structure == s->field_select[dir][0] + 1 || s->pict_type == B_TYPE || s->first_field){
2342 offset= s->field_select[dir][0] ? s->linesize : 0;
2344 ref_picture= s->current_picture.data;
2345 offset= s->field_select[dir][0] ? s->linesize : -s->linesize;
2348 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2349 ref_picture, offset,
2351 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2356 uint8_t ** ref2picture;
2358 if(s->picture_structure == s->field_select[dir][0] + 1 || s->pict_type == B_TYPE || s->first_field){
2359 ref2picture= ref_picture;
2360 offset= s->field_select[dir][0] ? s->linesize : 0;
2362 ref2picture= s->current_picture.data;
2363 offset= s->field_select[dir][0] ? s->linesize : -s->linesize;
2366 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2367 ref2picture, offset,
2369 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2372 if(s->picture_structure == s->field_select[dir][1] + 1 || s->pict_type == B_TYPE || s->first_field){
2373 ref2picture= ref_picture;
2374 offset= s->field_select[dir][1] ? s->linesize : 0;
2376 ref2picture= s->current_picture.data;
2377 offset= s->field_select[dir][1] ? s->linesize : -s->linesize;
2379 // I know it is ugly but this is the only way to fool emu_edge without rewrite mpeg_motion
2380 mpeg_motion(s, dest_y+16*s->linesize, dest_cb+8*s->uvlinesize, dest_cr+8*s->uvlinesize,
2382 ref2picture, offset,
2384 s->mv[dir][1][0], s->mv[dir][1][1]+16, 8);
2390 op_pixels_func (*dmv_pix_op)[4];
2393 dmv_pix_op = s->dsp.put_pixels_tab;
2395 if(s->picture_structure == PICT_FRAME){
2396 //put top field from top field
2397 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2400 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2401 //put bottom field from bottom field
2402 mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2403 ref_picture, s->linesize,
2405 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2407 dmv_pix_op = s->dsp.avg_pixels_tab;
2409 //avg top field from bottom field
2410 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2411 ref_picture, s->linesize,
2413 s->mv[dir][2][0], s->mv[dir][2][1], 8);
2414 //avg bottom field from top field
2415 mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2418 s->mv[dir][3][0], s->mv[dir][3][1], 8);
2421 offset=(s->picture_structure == PICT_BOTTOM_FIELD)?
2424 //put field from the same parity
2425 //same parity is never in the same frame
2426 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2429 s->mv[dir][0][0],s->mv[dir][0][1],16);
2431 // after put we make avg of the same block
2432 dmv_pix_op=s->dsp.avg_pixels_tab;
2434 //opposite parity is always in the same frame if this is second field
2435 if(!s->first_field){
2436 ref_picture = s->current_picture.data;
2437 //top field is one linesize from frame beginig
2438 offset=(s->picture_structure == PICT_BOTTOM_FIELD)?
2439 -s->linesize : s->linesize;
2441 offset=(s->picture_structure == PICT_BOTTOM_FIELD)?
2444 //avg field from the opposite parity
2445 mpeg_motion(s, dest_y, dest_cb, dest_cr,0,
2446 ref_picture, offset,
2448 s->mv[dir][2][0],s->mv[dir][2][1],16);
2457 /* put block[] to dest[] */
2458 static inline void put_dct(MpegEncContext *s,
2459 DCTELEM *block, int i, uint8_t *dest, int line_size)
2461 s->dct_unquantize(s, block, i, s->qscale);
2462 s->dsp.idct_put (dest, line_size, block);
2465 /* add block[] to dest[] */
2466 static inline void add_dct(MpegEncContext *s,
2467 DCTELEM *block, int i, uint8_t *dest, int line_size)
2469 if (s->block_last_index[i] >= 0) {
2470 s->dsp.idct_add (dest, line_size, block);
2474 static inline void add_dequant_dct(MpegEncContext *s,
2475 DCTELEM *block, int i, uint8_t *dest, int line_size)
2477 if (s->block_last_index[i] >= 0) {
2478 s->dct_unquantize(s, block, i, s->qscale);
2480 s->dsp.idct_add (dest, line_size, block);
2485 * cleans dc, ac, coded_block for the current non intra MB
2487 void ff_clean_intra_table_entries(MpegEncContext *s)
2489 int wrap = s->block_wrap[0];
2490 int xy = s->block_index[0];
2493 s->dc_val[0][xy + 1 ] =
2494 s->dc_val[0][xy + wrap] =
2495 s->dc_val[0][xy + 1 + wrap] = 1024;
2497 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2498 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2499 if (s->msmpeg4_version>=3) {
2500 s->coded_block[xy ] =
2501 s->coded_block[xy + 1 ] =
2502 s->coded_block[xy + wrap] =
2503 s->coded_block[xy + 1 + wrap] = 0;
2506 wrap = s->block_wrap[4];
2507 xy = s->mb_x + 1 + (s->mb_y + 1) * wrap;
2509 s->dc_val[2][xy] = 1024;
2511 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2512 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2514 s->mbintra_table[s->mb_x + s->mb_y*s->mb_stride]= 0;
2517 /* generic function called after a macroblock has been parsed by the
2518 decoder or after it has been encoded by the encoder.
2520 Important variables used:
2521 s->mb_intra : true if intra macroblock
2522 s->mv_dir : motion vector direction
2523 s->mv_type : motion vector type
2524 s->mv : motion vector
2525 s->interlaced_dct : true if interlaced dct used (mpeg2)
2527 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
2530 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2532 if(s->avctx->xvmc_acceleration){
2533 XVMC_decode_mb(s);//xvmc uses pblocks
2541 s->current_picture.qscale_table[mb_xy]= s->qscale;
2543 /* update DC predictors for P macroblocks */
2545 if (s->h263_pred || s->h263_aic) {
2546 if(s->mbintra_table[mb_xy])
2547 ff_clean_intra_table_entries(s);
2551 s->last_dc[2] = 128 << s->intra_dc_precision;
2554 else if (s->h263_pred || s->h263_aic)
2555 s->mbintra_table[mb_xy]=1;
2557 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { //FIXME precalc
2558 uint8_t *dest_y, *dest_cb, *dest_cr;
2559 int dct_linesize, dct_offset;
2560 op_pixels_func (*op_pix)[4];
2561 qpel_mc_func (*op_qpix)[16];
2562 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
2563 const int uvlinesize= s->current_picture.linesize[1];
2565 /* avoid copy if macroblock skipped in last frame too */
2566 /* skip only during decoding as we might trash the buffers during encoding a bit */
2568 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2569 const int age= s->current_picture.age;
2575 assert(s->pict_type!=I_TYPE);
2577 (*mbskip_ptr) ++; /* indicate that this time we skiped it */
2578 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2580 /* if previous was skipped too, then nothing to do ! */
2581 if (*mbskip_ptr >= age && s->current_picture.reference){
2584 } else if(!s->current_picture.reference){
2585 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2586 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2588 *mbskip_ptr = 0; /* not skipped */
2592 if (s->interlaced_dct) {
2593 dct_linesize = linesize * 2;
2594 dct_offset = linesize;
2596 dct_linesize = linesize;
2597 dct_offset = linesize * 8;
2601 dest_cb= s->dest[1];
2602 dest_cr= s->dest[2];
2605 /* motion handling */
2606 /* decoding or more than one mb_type (MC was allready done otherwise) */
2608 if ((!s->no_rounding) || s->pict_type==B_TYPE){
2609 op_pix = s->dsp.put_pixels_tab;
2610 op_qpix= s->dsp.put_qpel_pixels_tab;
2612 op_pix = s->dsp.put_no_rnd_pixels_tab;
2613 op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
2616 if (s->mv_dir & MV_DIR_FORWARD) {
2617 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2618 op_pix = s->dsp.avg_pixels_tab;
2619 op_qpix= s->dsp.avg_qpel_pixels_tab;
2621 if (s->mv_dir & MV_DIR_BACKWARD) {
2622 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2626 /* skip dequant / idct if we are really late ;) */
2627 if(s->hurry_up>1) return;
2629 /* add dct residue */
2630 if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2631 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2632 add_dequant_dct(s, block[0], 0, dest_y, dct_linesize);
2633 add_dequant_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2634 add_dequant_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2635 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2637 if(!(s->flags&CODEC_FLAG_GRAY)){
2638 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize);
2639 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize);
2641 } else if(s->codec_id != CODEC_ID_WMV2){
2642 add_dct(s, block[0], 0, dest_y, dct_linesize);
2643 add_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2644 add_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2645 add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2647 if(!(s->flags&CODEC_FLAG_GRAY)){
2648 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2649 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2654 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2658 /* dct only in intra block */
2659 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2660 put_dct(s, block[0], 0, dest_y, dct_linesize);
2661 put_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2662 put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2663 put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2665 if(!(s->flags&CODEC_FLAG_GRAY)){
2666 put_dct(s, block[4], 4, dest_cb, uvlinesize);
2667 put_dct(s, block[5], 5, dest_cr, uvlinesize);
2670 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2671 s->dsp.idct_put(dest_y + 8, dct_linesize, block[1]);
2672 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2673 s->dsp.idct_put(dest_y + dct_offset + 8, dct_linesize, block[3]);
2675 if(!(s->flags&CODEC_FLAG_GRAY)){
2676 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2677 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2684 #ifdef CONFIG_ENCODERS
2686 static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
2688 static const char tab[64]=
2700 DCTELEM *block= s->block[n];
2701 const int last_index= s->block_last_index[n];
2706 threshold= -threshold;
2710 /* are all which we could set to zero are allready zero? */
2711 if(last_index<=skip_dc - 1) return;
2713 for(i=0; i<=last_index; i++){
2714 const int j = s->intra_scantable.permutated[i];
2715 const int level = ABS(block[j]);
2717 if(skip_dc && i==0) continue;
2726 if(score >= threshold) return;
2727 for(i=skip_dc; i<=last_index; i++){
2728 const int j = s->intra_scantable.permutated[i];
2731 if(block[0]) s->block_last_index[n]= 0;
2732 else s->block_last_index[n]= -1;
2735 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index)
2738 const int maxlevel= s->max_qcoeff;
2739 const int minlevel= s->min_qcoeff;
2742 i=1; //skip clipping of intra dc
2746 for(;i<=last_index; i++){
2747 const int j= s->intra_scantable.permutated[i];
2748 int level = block[j];
2750 if (level>maxlevel) level=maxlevel;
2751 else if(level<minlevel) level=minlevel;
2758 static int pix_vcmp16x8(uint8_t *s, int stride){ //FIXME move to dsputil & optimize
2763 for(x=0; x<16; x+=4){
2764 score+= ABS(s[x ] - s[x +stride]) + ABS(s[x+1] - s[x+1+stride])
2765 +ABS(s[x+2] - s[x+2+stride]) + ABS(s[x+3] - s[x+3+stride]);
2773 static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move to dsputil & optimize
2778 for(x=0; x<16; x++){
2779 score+= ABS(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
2788 #define SQ(a) ((a)*(a))
2790 static int pix_vcmp16x8(uint8_t *s, int stride){ //FIXME move to dsputil & optimize
2795 for(x=0; x<16; x+=4){
2796 score+= SQ(s[x ] - s[x +stride]) + SQ(s[x+1] - s[x+1+stride])
2797 +SQ(s[x+2] - s[x+2+stride]) + SQ(s[x+3] - s[x+3+stride]);
2805 static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move to dsputil & optimize
2810 for(x=0; x<16; x++){
2811 score+= SQ(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
2822 #endif //CONFIG_ENCODERS
2826 * @param h is the normal height, this will be reduced automatically if needed for the last row
2828 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2829 if (s->avctx->draw_horiz_band) {
2833 if(s->picture_structure != PICT_FRAME){
2836 if(s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2839 h= FFMIN(h, s->height - y);
2841 if(s->pict_type==B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2842 src= (AVFrame*)s->current_picture_ptr;
2843 else if(s->last_picture_ptr)
2844 src= (AVFrame*)s->last_picture_ptr;
2848 if(s->pict_type==B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2854 offset[0]= y * s->linesize;;
2856 offset[2]= (y>>1) * s->uvlinesize;;
2862 s->avctx->draw_horiz_band(s->avctx, src, offset,
2863 y, s->picture_structure, h);
2867 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2868 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
2869 const int uvlinesize= s->current_picture.linesize[1];
2871 s->block_index[0]= s->block_wrap[0]*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2872 s->block_index[1]= s->block_wrap[0]*(s->mb_y*2 + 1) + s->mb_x*2;
2873 s->block_index[2]= s->block_wrap[0]*(s->mb_y*2 + 2) - 1 + s->mb_x*2;
2874 s->block_index[3]= s->block_wrap[0]*(s->mb_y*2 + 2) + s->mb_x*2;
2875 s->block_index[4]= s->block_wrap[4]*(s->mb_y + 1) + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x;
2876 s->block_index[5]= s->block_wrap[4]*(s->mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x;
2878 if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME){
2879 s->dest[0] = s->current_picture.data[0] + s->mb_x * 16 - 16;
2880 s->dest[1] = s->current_picture.data[1] + s->mb_x * 8 - 8;
2881 s->dest[2] = s->current_picture.data[2] + s->mb_x * 8 - 8;
2883 s->dest[0] = s->current_picture.data[0] + (s->mb_y * 16* linesize ) + s->mb_x * 16 - 16;
2884 s->dest[1] = s->current_picture.data[1] + (s->mb_y * 8 * uvlinesize) + s->mb_x * 8 - 8;
2885 s->dest[2] = s->current_picture.data[2] + (s->mb_y * 8 * uvlinesize) + s->mb_x * 8 - 8;
2889 #ifdef CONFIG_ENCODERS
2891 static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2893 const int mb_x= s->mb_x;
2894 const int mb_y= s->mb_y;
2897 int dct_offset = s->linesize*8; //default for progressive frames
2899 for(i=0; i<6; i++) skip_dct[i]=0;
2901 if(s->adaptive_quant){
2902 const int last_qp= s->qscale;
2903 const int mb_xy= mb_x + mb_y*s->mb_stride;
2905 s->lambda= s->lambda_table[mb_xy];
2908 if(!(s->flags&CODEC_FLAG_QP_RD)){
2909 s->dquant= s->qscale - last_qp;
2911 if(s->out_format==FMT_H263)
2912 s->dquant= clip(s->dquant, -2, 2); //FIXME RD
2914 if(s->codec_id==CODEC_ID_MPEG4){
2916 if((s->mv_dir&MV_DIRECT) || s->mv_type==MV_TYPE_8X8)
2921 s->qscale= last_qp + s->dquant;
2922 s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
2923 s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
2931 wrap_y = s->linesize;
2932 ptr = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
2934 if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
2935 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
2936 ptr= s->edge_emu_buffer;
2940 if(s->flags&CODEC_FLAG_INTERLACED_DCT){
2941 int progressive_score, interlaced_score;
2943 progressive_score= pix_vcmp16x8(ptr, wrap_y ) + pix_vcmp16x8(ptr + wrap_y*8, wrap_y );
2944 interlaced_score = pix_vcmp16x8(ptr, wrap_y*2) + pix_vcmp16x8(ptr + wrap_y , wrap_y*2);
2946 if(progressive_score > interlaced_score + 100){
2947 s->interlaced_dct=1;
2952 s->interlaced_dct=0;
2955 s->dsp.get_pixels(s->block[0], ptr , wrap_y);
2956 s->dsp.get_pixels(s->block[1], ptr + 8, wrap_y);
2957 s->dsp.get_pixels(s->block[2], ptr + dct_offset , wrap_y);
2958 s->dsp.get_pixels(s->block[3], ptr + dct_offset + 8, wrap_y);
2960 if(s->flags&CODEC_FLAG_GRAY){
2964 int wrap_c = s->uvlinesize;
2965 ptr = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
2967 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2968 ptr= s->edge_emu_buffer;
2970 s->dsp.get_pixels(s->block[4], ptr, wrap_c);
2972 ptr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
2974 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2975 ptr= s->edge_emu_buffer;
2977 s->dsp.get_pixels(s->block[5], ptr, wrap_c);
2980 op_pixels_func (*op_pix)[4];
2981 qpel_mc_func (*op_qpix)[16];
2982 uint8_t *dest_y, *dest_cb, *dest_cr;
2983 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2987 dest_y = s->dest[0];
2988 dest_cb = s->dest[1];
2989 dest_cr = s->dest[2];
2990 wrap_y = s->linesize;
2991 wrap_c = s->uvlinesize;
2992 ptr_y = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
2993 ptr_cb = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
2994 ptr_cr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
2996 if ((!s->no_rounding) || s->pict_type==B_TYPE){
2997 op_pix = s->dsp.put_pixels_tab;
2998 op_qpix= s->dsp.put_qpel_pixels_tab;
3000 op_pix = s->dsp.put_no_rnd_pixels_tab;
3001 op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
3004 if (s->mv_dir & MV_DIR_FORWARD) {
3005 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
3006 op_pix = s->dsp.avg_pixels_tab;
3007 op_qpix= s->dsp.avg_qpel_pixels_tab;
3009 if (s->mv_dir & MV_DIR_BACKWARD) {
3010 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
3013 if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
3014 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
3015 ptr_y= s->edge_emu_buffer;
3019 if(s->flags&CODEC_FLAG_INTERLACED_DCT){
3020 int progressive_score, interlaced_score;
3022 progressive_score= pix_diff_vcmp16x8(ptr_y , dest_y , wrap_y )
3023 + pix_diff_vcmp16x8(ptr_y + wrap_y*8, dest_y + wrap_y*8, wrap_y );
3024 interlaced_score = pix_diff_vcmp16x8(ptr_y , dest_y , wrap_y*2)
3025 + pix_diff_vcmp16x8(ptr_y + wrap_y , dest_y + wrap_y , wrap_y*2);
3027 if(progressive_score > interlaced_score + 600){
3028 s->interlaced_dct=1;
3033 s->interlaced_dct=0;
3036 s->dsp.diff_pixels(s->block[0], ptr_y , dest_y , wrap_y);
3037 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
3038 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset , dest_y + dct_offset , wrap_y);
3039 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y);
3041 if(s->flags&CODEC_FLAG_GRAY){
3046 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
3047 ptr_cb= s->edge_emu_buffer;
3049 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
3051 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
3052 ptr_cr= s->edge_emu_buffer;
3054 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
3056 /* pre quantization */
3057 if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){
3059 if(s->dsp.pix_abs8x8(ptr_y , dest_y , wrap_y) < 20*s->qscale) skip_dct[0]= 1;
3060 if(s->dsp.pix_abs8x8(ptr_y + 8, dest_y + 8, wrap_y) < 20*s->qscale) skip_dct[1]= 1;
3061 if(s->dsp.pix_abs8x8(ptr_y +dct_offset , dest_y +dct_offset , wrap_y) < 20*s->qscale) skip_dct[2]= 1;
3062 if(s->dsp.pix_abs8x8(ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y) < 20*s->qscale) skip_dct[3]= 1;
3063 if(s->dsp.pix_abs8x8(ptr_cb , dest_cb , wrap_c) < 20*s->qscale) skip_dct[4]= 1;
3064 if(s->dsp.pix_abs8x8(ptr_cr , dest_cr , wrap_c) < 20*s->qscale) skip_dct[5]= 1;
3070 if(skip_dct[i]) num++;
3073 if(s->mb_x==0 && s->mb_y==0){
3075 printf("%6d %1d\n", stat[i], i);
3084 /* DCT & quantize */
3085 if(s->out_format==FMT_MJPEG){
3088 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, 8, &overflow);
3089 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
3095 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
3096 // FIXME we could decide to change to quantizer instead of clipping
3097 // JS: I don't think that would be a good idea it could lower quality instead
3098 // of improve it. Just INTRADC clipping deserves changes in quantizer
3099 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
3101 s->block_last_index[i]= -1;
3104 if(s->luma_elim_threshold && !s->mb_intra)
3106 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
3107 if(s->chroma_elim_threshold && !s->mb_intra)
3109 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
3111 if(s->flags & CODEC_FLAG_CBP_RD){
3113 if(s->block_last_index[i] == -1)
3114 s->coded_score[i]= INT_MAX/256;
3119 if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){
3120 s->block_last_index[4]=
3121 s->block_last_index[5]= 0;
3123 s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale;
3126 /* huffman encode */
3127 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
3128 case CODEC_ID_MPEG1VIDEO:
3129 case CODEC_ID_MPEG2VIDEO:
3130 mpeg1_encode_mb(s, s->block, motion_x, motion_y); break;
3132 case CODEC_ID_MPEG4:
3133 mpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
3134 case CODEC_ID_MSMPEG4V2:
3135 case CODEC_ID_MSMPEG4V3:
3137 msmpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
3139 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); break;
3141 case CODEC_ID_H263P:
3144 h263_encode_mb(s, s->block, motion_x, motion_y); break;
3146 case CODEC_ID_MJPEG:
3147 mjpeg_encode_mb(s, s->block); break;
3153 #endif //CONFIG_ENCODERS
3156 * combines the (truncated) bitstream to a complete frame
3157 * @returns -1 if no complete frame could be created
3159 int ff_combine_frame( MpegEncContext *s, int next, uint8_t **buf, int *buf_size){
3160 ParseContext *pc= &s->parse_context;
3164 printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index);
3165 printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
3169 /* copy overreaded byes from last frame into buffer */
3170 for(; pc->overread>0; pc->overread--){
3171 pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
3174 pc->last_index= pc->index;
3176 /* copy into buffer end return */
3177 if(next == END_NOT_FOUND){
3178 pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
3180 memcpy(&pc->buffer[pc->index], *buf, *buf_size);
3181 pc->index += *buf_size;
3186 pc->overread_index= pc->index + next;
3188 /* append to buffer */
3190 pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
3192 memcpy(&pc->buffer[pc->index], *buf, next + FF_INPUT_BUFFER_PADDING_SIZE );
3197 /* store overread bytes */
3198 for(;next < 0; next++){
3199 pc->state = (pc->state<<8) | pc->buffer[pc->last_index + next];
3205 printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index);
3206 printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
3213 void ff_mpeg_flush(AVCodecContext *avctx){
3215 MpegEncContext *s = avctx->priv_data;
3217 for(i=0; i<MAX_PICTURE_COUNT; i++){
3218 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
3219 || s->picture[i].type == FF_BUFFER_TYPE_USER))
3220 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
3222 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3224 s->parse_context.state= -1;
3225 s->parse_context.frame_start_found= 0;
3226 s->parse_context.overread= 0;
3227 s->parse_context.overread_index= 0;
3228 s->parse_context.index= 0;
3229 s->parse_context.last_index= 0;
3232 #ifdef CONFIG_ENCODERS
3233 void ff_copy_bits(PutBitContext *pb, uint8_t *src, int length)
3235 int bytes= length>>4;
3236 int bits= length&15;
3239 if(length==0) return;
3241 for(i=0; i<bytes; i++) put_bits(pb, 16, be2me_16(((uint16_t*)src)[i]));
3242 put_bits(pb, bits, be2me_16(((uint16_t*)src)[i])>>(16-bits));
3245 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
3248 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
3251 d->mb_skip_run= s->mb_skip_run;
3253 d->last_dc[i]= s->last_dc[i];
3256 d->mv_bits= s->mv_bits;
3257 d->i_tex_bits= s->i_tex_bits;
3258 d->p_tex_bits= s->p_tex_bits;
3259 d->i_count= s->i_count;
3260 d->f_count= s->f_count;
3261 d->b_count= s->b_count;
3262 d->skip_count= s->skip_count;
3263 d->misc_bits= s->misc_bits;
3267 d->qscale= s->qscale;
3268 d->dquant= s->dquant;
3271 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
3274 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
3275 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
3278 d->mb_skip_run= s->mb_skip_run;
3280 d->last_dc[i]= s->last_dc[i];
3283 d->mv_bits= s->mv_bits;
3284 d->i_tex_bits= s->i_tex_bits;
3285 d->p_tex_bits= s->p_tex_bits;
3286 d->i_count= s->i_count;
3287 d->f_count= s->f_count;
3288 d->b_count= s->b_count;
3289 d->skip_count= s->skip_count;
3290 d->misc_bits= s->misc_bits;
3292 d->mb_intra= s->mb_intra;
3293 d->mb_skiped= s->mb_skiped;
3294 d->mv_type= s->mv_type;
3295 d->mv_dir= s->mv_dir;
3297 if(s->data_partitioning){
3299 d->tex_pb= s->tex_pb;
3303 d->block_last_index[i]= s->block_last_index[i];
3304 d->interlaced_dct= s->interlaced_dct;
3305 d->qscale= s->qscale;
3308 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
3309 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
3310 int *dmin, int *next_block, int motion_x, int motion_y)
3313 uint8_t *dest_backup[3];
3315 copy_context_before_encode(s, backup, type);
3317 s->block= s->blocks[*next_block];
3318 s->pb= pb[*next_block];
3319 if(s->data_partitioning){
3320 s->pb2 = pb2 [*next_block];
3321 s->tex_pb= tex_pb[*next_block];
3325 memcpy(dest_backup, s->dest, sizeof(s->dest));
3326 s->dest[0] = s->me.scratchpad;
3327 s->dest[1] = s->me.scratchpad + 16;
3328 s->dest[2] = s->me.scratchpad + 16 + 8;
3329 assert(2*s->uvlinesize == s->linesize); //should be no prob for encoding
3330 assert(s->linesize >= 64); //FIXME
3333 encode_mb(s, motion_x, motion_y);
3335 score= get_bit_count(&s->pb);
3336 if(s->data_partitioning){
3337 score+= get_bit_count(&s->pb2);
3338 score+= get_bit_count(&s->tex_pb);
3341 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
3342 MPV_decode_mb(s, s->block);
3344 score *= s->lambda2;
3345 score += sse_mb(s) << FF_LAMBDA_SHIFT;
3349 memcpy(s->dest, dest_backup, sizeof(s->dest));
3356 copy_context_after_encode(best, s, type);
3360 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
3361 uint32_t *sq = squareTbl + 256;
3366 return s->dsp.sse[0](NULL, src1, src2, stride);
3367 else if(w==8 && h==8)
3368 return s->dsp.sse[1](NULL, src1, src2, stride);
3372 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
3381 static int sse_mb(MpegEncContext *s){
3385 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3386 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3389 return s->dsp.sse[0](NULL, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize)
3390 +s->dsp.sse[1](NULL, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize)
3391 +s->dsp.sse[1](NULL, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize);
3393 return sse(s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
3394 +sse(s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
3395 +sse(s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
3398 static void encode_picture(MpegEncContext *s, int picture_number)
3400 int mb_x, mb_y, pdif = 0;
3403 MpegEncContext best_s, backup_s;
3404 uint8_t bit_buf[2][3000];
3405 uint8_t bit_buf2[2][3000];
3406 uint8_t bit_buf_tex[2][3000];
3407 PutBitContext pb[2], pb2[2], tex_pb[2];
3410 init_put_bits(&pb [i], bit_buf [i], 3000);
3411 init_put_bits(&pb2 [i], bit_buf2 [i], 3000);
3412 init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000);
3415 s->picture_number = picture_number;
3417 /* Reset the average MB variance */
3418 s->current_picture.mb_var_sum = 0;
3419 s->current_picture.mc_mb_var_sum = 0;
3422 /* we need to initialize some time vars before we can encode b-frames */
3423 // RAL: Condition added for MPEG1VIDEO
3424 if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->h263_msmpeg4))
3425 ff_set_mpeg4_time(s, s->picture_number);
3428 s->scene_change_score=0;
3430 s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME ratedistoration
3432 if(s->pict_type==I_TYPE){
3433 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3434 else s->no_rounding=0;
3435 }else if(s->pict_type!=B_TYPE){
3436 if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
3437 s->no_rounding ^= 1;
3440 /* Estimate motion for every MB */
3441 s->mb_intra=0; //for the rate distoration & bit compare functions
3442 if(s->pict_type != I_TYPE){
3443 if(s->pict_type != B_TYPE){
3444 if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){
3446 s->me.dia_size= s->avctx->pre_dia_size;
3448 for(mb_y=s->mb_height-1; mb_y >=0 ; mb_y--) {
3449 for(mb_x=s->mb_width-1; mb_x >=0 ; mb_x--) {
3452 ff_pre_estimate_p_frame_motion(s, mb_x, mb_y);
3459 s->me.dia_size= s->avctx->dia_size;
3460 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3461 s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
3462 s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
3463 s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
3464 s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
3465 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3468 s->block_index[0]+=2;
3469 s->block_index[1]+=2;
3470 s->block_index[2]+=2;
3471 s->block_index[3]+=2;
3473 /* compute motion vector & mb_type and store in context */
3474 if(s->pict_type==B_TYPE)
3475 ff_estimate_b_frame_motion(s, mb_x, mb_y);
3477 ff_estimate_p_frame_motion(s, mb_x, mb_y);
3480 }else /* if(s->pict_type == I_TYPE) */{
3482 //FIXME do we need to zero them?
3483 memset(s->motion_val[0], 0, sizeof(int16_t)*(s->mb_width*2 + 2)*(s->mb_height*2 + 2)*2);
3484 memset(s->p_mv_table , 0, sizeof(int16_t)*(s->mb_stride)*s->mb_height*2);
3485 memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height);
3487 if(!s->fixed_qscale){
3488 /* finding spatial complexity for I-frame rate control */
3489 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3490 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3493 uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
3495 int sum = s->dsp.pix_sum(pix, s->linesize);
3497 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8;
3499 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
3500 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
3501 s->current_picture.mb_var_sum += varc;
3508 if(s->scene_change_score > s->avctx->scenechange_threshold && s->pict_type == P_TYPE){
3509 s->pict_type= I_TYPE;
3510 memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height);
3511 //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3515 if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) {
3516 s->f_code= ff_get_best_fcode(s, s->p_mv_table, MB_TYPE_INTER);
3518 ff_fix_long_p_mvs(s);
3521 if(s->pict_type==B_TYPE){
3524 a = ff_get_best_fcode(s, s->b_forw_mv_table, MB_TYPE_FORWARD);
3525 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, MB_TYPE_BIDIR);
3526 s->f_code = FFMAX(a, b);
3528 a = ff_get_best_fcode(s, s->b_back_mv_table, MB_TYPE_BACKWARD);
3529 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, MB_TYPE_BIDIR);
3530 s->b_code = FFMAX(a, b);
3532 ff_fix_long_b_mvs(s, s->b_forw_mv_table, s->f_code, MB_TYPE_FORWARD);
3533 ff_fix_long_b_mvs(s, s->b_back_mv_table, s->b_code, MB_TYPE_BACKWARD);
3534 ff_fix_long_b_mvs(s, s->b_bidir_forw_mv_table, s->f_code, MB_TYPE_BIDIR);
3535 ff_fix_long_b_mvs(s, s->b_bidir_back_mv_table, s->b_code, MB_TYPE_BIDIR);
3539 if (!s->fixed_qscale)
3540 s->current_picture.quality = ff_rate_estimate_qscale(s);
3542 if(s->adaptive_quant){
3544 switch(s->codec_id){
3545 case CODEC_ID_MPEG4:
3546 ff_clean_mpeg4_qscales(s);
3549 case CODEC_ID_H263P:
3551 ff_clean_h263_qscales(s);
3556 s->lambda= s->lambda_table[0];
3559 s->lambda= s->current_picture.quality;
3560 //printf("%d %d\n", s->avctx->global_quality, s->current_picture.quality);
3563 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==I_TYPE && !(s->flags & CODEC_FLAG_QSCALE))
3564 s->qscale= 3; //reduce cliping problems
3566 if (s->out_format == FMT_MJPEG) {
3567 /* for mjpeg, we do include qscale in the matrix */
3568 s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0];
3570 int j= s->dsp.idct_permutation[i];
3572 s->intra_matrix[j] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3574 convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3575 s->intra_matrix, s->intra_quant_bias, 8, 8);
3578 //FIXME var duplication
3579 s->current_picture.key_frame= s->pict_type == I_TYPE;
3580 s->current_picture.pict_type= s->pict_type;
3582 if(s->current_picture.key_frame)
3583 s->picture_in_gop_number=0;
3585 s->last_bits= get_bit_count(&s->pb);
3586 switch(s->out_format) {
3588 mjpeg_picture_header(s);
3592 if (s->codec_id == CODEC_ID_WMV2)
3593 ff_wmv2_encode_picture_header(s, picture_number);
3594 else if (s->h263_msmpeg4)
3595 msmpeg4_encode_picture_header(s, picture_number);
3596 else if (s->h263_pred)
3597 mpeg4_encode_picture_header(s, picture_number);
3598 else if (s->h263_rv10)
3599 rv10_encode_picture_header(s, picture_number);
3600 else if (s->codec_id == CODEC_ID_FLV1)
3601 ff_flv_encode_picture_header(s, picture_number);
3603 h263_encode_picture_header(s, picture_number);
3607 mpeg1_encode_picture_header(s, picture_number);
3612 bits= get_bit_count(&s->pb);
3613 s->header_bits= bits - s->last_bits;
3625 /* init last dc values */
3626 /* note: quant matrix value (8) is implied here */
3627 s->last_dc[i] = 128;
3629 s->current_picture_ptr->error[i] = 0;
3632 s->last_mv[0][0][0] = 0;
3633 s->last_mv[0][0][1] = 0;
3634 s->last_mv[1][0][0] = 0;
3635 s->last_mv[1][0][1] = 0;
3640 switch(s->codec_id){
3642 case CODEC_ID_H263P:
3644 s->gob_index = ff_h263_get_gob_height(s);
3646 case CODEC_ID_MPEG4:
3647 if(s->partitioned_frame)
3648 ff_mpeg4_init_partitions(s);
3655 s->first_slice_line = 1;
3656 s->ptr_lastgob = s->pb.buf;
3657 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3661 s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
3662 s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
3663 ff_init_block_index(s);
3665 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3666 const int xy= mb_y*s->mb_stride + mb_x;
3667 int mb_type= s->mb_type[xy];
3672 ff_update_block_index(s);
3674 /* write gob / video packet header */
3676 if(s->rtp_mode && mb_y + mb_x>0){
3677 int current_packet_size, is_gob_start;
3679 current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob;
3682 if(s->codec_id==CODEC_ID_MPEG4){
3683 if(current_packet_size >= s->rtp_payload_size){
3685 if(s->partitioned_frame){
3686 ff_mpeg4_merge_partitions(s);
3687 ff_mpeg4_init_partitions(s);
3689 ff_mpeg4_encode_video_packet_header(s);
3691 if(s->flags&CODEC_FLAG_PASS1){
3692 int bits= get_bit_count(&s->pb);
3693 s->misc_bits+= bits - s->last_bits;
3696 ff_mpeg4_clean_buffers(s);
3699 }else if(s->codec_id==CODEC_ID_MPEG1VIDEO){
3700 if( current_packet_size >= s->rtp_payload_size
3701 && s->mb_skip_run==0){
3702 ff_mpeg1_encode_slice_header(s);
3703 ff_mpeg1_clean_buffers(s);
3706 }else if(s->codec_id==CODEC_ID_MPEG2VIDEO){
3707 if( ( current_packet_size >= s->rtp_payload_size || mb_x==0)
3708 && s->mb_skip_run==0){
3709 ff_mpeg1_encode_slice_header(s);
3710 ff_mpeg1_clean_buffers(s);
3714 if(current_packet_size >= s->rtp_payload_size
3715 && s->mb_x==0 && s->mb_y%s->gob_index==0){
3717 h263_encode_gob_header(s, mb_y);
3723 s->ptr_lastgob = pbBufPtr(&s->pb);
3724 s->first_slice_line=1;
3725 s->resync_mb_x=mb_x;
3726 s->resync_mb_y=mb_y;
3731 if( (s->resync_mb_x == s->mb_x)
3732 && s->resync_mb_y+1 == s->mb_y){
3733 s->first_slice_line=0;
3737 s->dquant=0; //only for QP_RD
3739 if(mb_type & (mb_type-1) || (s->flags & CODEC_FLAG_QP_RD)){ // more than 1 MB type possible
3741 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3743 copy_context_before_encode(&backup_s, s, -1);
3745 best_s.data_partitioning= s->data_partitioning;
3746 best_s.partitioned_frame= s->partitioned_frame;
3747 if(s->data_partitioning){
3748 backup_s.pb2= s->pb2;
3749 backup_s.tex_pb= s->tex_pb;
3752 if(mb_type&MB_TYPE_INTER){
3753 s->mv_dir = MV_DIR_FORWARD;
3754 s->mv_type = MV_TYPE_16X16;
3756 s->mv[0][0][0] = s->p_mv_table[xy][0];
3757 s->mv[0][0][1] = s->p_mv_table[xy][1];
3758 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER, pb, pb2, tex_pb,
3759 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3761 if(mb_type&MB_TYPE_SKIPED){
3762 s->mv_dir = MV_DIR_FORWARD;
3763 s->mv_type = MV_TYPE_16X16;
3767 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_SKIPED, pb, pb2, tex_pb,
3768 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3770 if(mb_type&MB_TYPE_INTER4V){
3771 s->mv_dir = MV_DIR_FORWARD;
3772 s->mv_type = MV_TYPE_8X8;
3775 s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
3776 s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
3778 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER4V, pb, pb2, tex_pb,
3779 &dmin, &next_block, 0, 0);
3781 if(mb_type&MB_TYPE_FORWARD){
3782 s->mv_dir = MV_DIR_FORWARD;
3783 s->mv_type = MV_TYPE_16X16;
3785 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3786 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3787 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_FORWARD, pb, pb2, tex_pb,
3788 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3790 if(mb_type&MB_TYPE_BACKWARD){
3791 s->mv_dir = MV_DIR_BACKWARD;
3792 s->mv_type = MV_TYPE_16X16;
3794 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3795 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3796 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3797 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3799 if(mb_type&MB_TYPE_BIDIR){
3800 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3801 s->mv_type = MV_TYPE_16X16;
3803 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3804 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3805 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3806 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3807 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BIDIR, pb, pb2, tex_pb,
3808 &dmin, &next_block, 0, 0);
3810 if(mb_type&MB_TYPE_DIRECT){
3811 int mx= s->b_direct_mv_table[xy][0];
3812 int my= s->b_direct_mv_table[xy][1];
3814 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3817 ff_mpeg4_set_direct_mv(s, mx, my);
3819 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_DIRECT, pb, pb2, tex_pb,
3820 &dmin, &next_block, mx, my);
3822 if(mb_type&MB_TYPE_INTRA){
3824 s->mv_type = MV_TYPE_16X16;
3828 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTRA, pb, pb2, tex_pb,
3829 &dmin, &next_block, 0, 0);
3830 if(s->h263_pred || s->h263_aic){
3832 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3834 ff_clean_intra_table_entries(s); //old mode?
3838 if(s->flags & CODEC_FLAG_QP_RD){
3839 if(best_s.mv_type==MV_TYPE_16X16 && !(best_s.mv_dir&MV_DIRECT)){
3840 const int last_qp= backup_s.qscale;
3841 int dquant, dir, qp, dc[6];
3843 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3845 assert(backup_s.dquant == 0);
3848 s->mv_dir= best_s.mv_dir;
3849 s->mv_type = MV_TYPE_16X16;
3850 s->mb_intra= best_s.mb_intra;
3851 s->mv[0][0][0] = best_s.mv[0][0][0];
3852 s->mv[0][0][1] = best_s.mv[0][0][1];
3853 s->mv[1][0][0] = best_s.mv[1][0][0];
3854 s->mv[1][0][1] = best_s.mv[1][0][1];
3856 dir= s->pict_type == B_TYPE ? 2 : 1;
3857 if(last_qp + dir > s->avctx->qmax) dir= -dir;
3858 for(dquant= dir; dquant<=2 && dquant>=-2; dquant += dir){
3859 qp= last_qp + dquant;
3860 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3862 backup_s.dquant= dquant;
3865 dc[i]= s->dc_val[0][ s->block_index[i] ];
3866 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16);
3870 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3871 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3872 if(best_s.qscale != qp){
3875 s->dc_val[0][ s->block_index[i] ]= dc[i];
3876 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16);
3879 if(dir > 0 && dquant==dir){
3887 s->current_picture.qscale_table[xy]= qp;
3891 copy_context_after_encode(s, &best_s, -1);
3893 pb_bits_count= get_bit_count(&s->pb);
3894 flush_put_bits(&s->pb);
3895 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3898 if(s->data_partitioning){
3899 pb2_bits_count= get_bit_count(&s->pb2);
3900 flush_put_bits(&s->pb2);
3901 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3902 s->pb2= backup_s.pb2;
3904 tex_pb_bits_count= get_bit_count(&s->tex_pb);
3905 flush_put_bits(&s->tex_pb);
3906 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3907 s->tex_pb= backup_s.tex_pb;
3909 s->last_bits= get_bit_count(&s->pb);
3912 if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
3913 ff_h263_update_motion_val(s);
3917 s->dsp.put_pixels_tab[0][0](s->dest[0], s->me.scratchpad , s->linesize ,16);
3918 s->dsp.put_pixels_tab[1][0](s->dest[1], s->me.scratchpad + 16, s->uvlinesize, 8);
3919 s->dsp.put_pixels_tab[1][0](s->dest[2], s->me.scratchpad + 24, s->uvlinesize, 8);
3922 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3923 MPV_decode_mb(s, s->block);
3925 int motion_x, motion_y;
3927 int inter_score= s->current_picture.mb_cmp_score[mb_x + mb_y*s->mb_stride];
3929 if(s->avctx->mb_decision==FF_MB_DECISION_SIMPLE && s->pict_type==P_TYPE){ //FIXME check if the mess is usefull at all
3930 /* get luma score */
3931 if((s->avctx->mb_cmp&0xFF)==FF_CMP_SSE){
3932 intra_score= (s->current_picture.mb_var[mb_x + mb_y*s->mb_stride]<<8) - 500; //FIXME dont scale it down so we dont have to fix it
3936 int mean= s->current_picture.mb_mean[mb_x + mb_y*s->mb_stride]; //FIXME
3939 dest_y = s->new_picture.data[0] + (mb_y * 16 * s->linesize ) + mb_x * 16;
3941 for(i=0; i<16; i++){
3942 *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 0]) = mean;
3943 *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 4]) = mean;
3944 *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 8]) = mean;
3945 *(uint32_t*)(&s->me.scratchpad[i*s->linesize+12]) = mean;
3949 intra_score= s->dsp.mb_cmp[0](s, s->me.scratchpad, dest_y, s->linesize);
3951 /* printf("intra:%7d inter:%7d var:%7d mc_var.%7d\n", intra_score>>8, inter_score>>8,
3952 s->current_picture.mb_var[mb_x + mb_y*s->mb_stride],
3953 s->current_picture.mc_mb_var[mb_x + mb_y*s->mb_stride]);*/
3956 /* get chroma score */
3957 if(s->avctx->mb_cmp&FF_CMP_CHROMA){
3965 if(s->out_format == FMT_H263){
3966 mean= (s->dc_val[i][mb_x + (mb_y+1)*(s->mb_width+2)] + 4)>>3; //FIXME not exact but simple ;)
3968 mean= (s->last_dc[i] + 4)>>3;
3970 dest_c = s->new_picture.data[i] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
3974 *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 0]) = mean;
3975 *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 4]) = mean;
3978 intra_score+= s->dsp.mb_cmp[1](s, s->me.scratchpad, dest_c, s->uvlinesize);
3983 switch(s->avctx->mb_cmp&0xFF){
3986 intra_score+= 32*s->qscale;
3989 intra_score+= 24*s->qscale*s->qscale;
3992 intra_score+= 96*s->qscale;
3995 intra_score+= 48*s->qscale;
4002 intra_score+= (s->qscale*s->qscale*109*8 + 64)>>7;
4006 if(intra_score < inter_score)
4007 mb_type= MB_TYPE_INTRA;
4010 s->mv_type=MV_TYPE_16X16;
4011 // only one MB-Type possible
4017 motion_x= s->mv[0][0][0] = 0;
4018 motion_y= s->mv[0][0][1] = 0;
4021 s->mv_dir = MV_DIR_FORWARD;
4023 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
4024 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
4026 case MB_TYPE_INTER4V:
4027 s->mv_dir = MV_DIR_FORWARD;
4028 s->mv_type = MV_TYPE_8X8;
4031 s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
4032 s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
4034 motion_x= motion_y= 0;
4036 case MB_TYPE_DIRECT:
4037 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
4039 motion_x=s->b_direct_mv_table[xy][0];
4040 motion_y=s->b_direct_mv_table[xy][1];
4042 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
4046 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
4050 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
4051 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
4052 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
4053 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
4055 case MB_TYPE_BACKWARD:
4056 s->mv_dir = MV_DIR_BACKWARD;
4058 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
4059 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
4061 case MB_TYPE_FORWARD:
4062 s->mv_dir = MV_DIR_FORWARD;
4064 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
4065 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
4066 // printf(" %d %d ", motion_x, motion_y);
4069 motion_x=motion_y=0; //gcc warning fix
4070 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
4073 encode_mb(s, motion_x, motion_y);
4075 // RAL: Update last macrobloc type
4076 s->last_mv_dir = s->mv_dir;
4079 if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
4080 ff_h263_update_motion_val(s);
4083 MPV_decode_mb(s, s->block);
4086 /* clean the MV table in IPS frames for direct mode in B frames */
4087 if(s->mb_intra /* && I,P,S_TYPE */){
4088 s->p_mv_table[xy][0]=0;
4089 s->p_mv_table[xy][1]=0;
4092 if(s->flags&CODEC_FLAG_PSNR){
4096 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
4097 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
4099 s->current_picture_ptr->error[0] += sse(
4100 s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
4101 s->dest[0], w, h, s->linesize);
4102 s->current_picture_ptr->error[1] += sse(
4103 s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
4104 s->dest[1], w>>1, h>>1, s->uvlinesize);
4105 s->current_picture_ptr->error[2] += sse(
4106 s, s->new_picture .data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
4107 s->dest[2], w>>1, h>>1, s->uvlinesize);
4109 //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, get_bit_count(&s->pb));
4115 if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame)
4116 ff_mpeg4_merge_partitions(s);
4118 if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE)
4119 msmpeg4_encode_ext_header(s);
4121 if(s->codec_id==CODEC_ID_MPEG4)
4122 ff_mpeg4_stuffing(&s->pb);
4125 //if (s->gob_number)
4126 // fprintf(stderr,"\nNumber of GOB: %d", s->gob_number);
4128 /* Send the last GOB if RTP */
4130 flush_put_bits(&s->pb);
4131 pdif = pbBufPtr(&s->pb) - s->ptr_lastgob;
4132 /* Call the RTP callback to send the last GOB */
4133 if (s->rtp_callback)
4134 s->rtp_callback(s->ptr_lastgob, pdif, s->gob_number);
4135 s->ptr_lastgob = pbBufPtr(&s->pb);
4136 //fprintf(stderr,"\nGOB: %2d size: %d (last)", s->gob_number, pdif);
4140 void ff_denoise_dct(MpegEncContext *s, DCTELEM *block){
4141 const int intra= s->mb_intra;
4144 s->dct_count[intra]++;
4146 for(i=0; i<64; i++){
4147 int level= block[i];
4151 s->dct_error_sum[intra][i] += level;
4152 level -= s->dct_offset[intra][i];
4153 if(level<0) level=0;
4155 s->dct_error_sum[intra][i] -= level;
4156 level += s->dct_offset[intra][i];
4157 if(level>0) level=0;
4164 static int dct_quantize_trellis_c(MpegEncContext *s,
4165 DCTELEM *block, int n,
4166 int qscale, int *overflow){
4168 const uint8_t *scantable= s->intra_scantable.scantable;
4170 unsigned int threshold1, threshold2;
4179 int not_coded_score= 0;
4181 int coeff_count[64];
4182 int qmul, qadd, start_i, last_non_zero, i, dc;
4183 const int esc_length= s->ac_esc_length;
4185 uint8_t * last_length;
4188 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4189 const int patch_table= s->out_format == FMT_MPEG1 && !s->mb_intra;
4191 s->dsp.fdct (block);
4193 if(s->dct_error_sum)
4194 ff_denoise_dct(s, block);
4197 qadd= ((qscale-1)|1)*8;
4208 /* For AIC we skip quant/dequant of INTRADC */
4213 /* note: block[0] is assumed to be positive */
4214 block[0] = (block[0] + (q >> 1)) / q;
4217 qmat = s->q_intra_matrix[qscale];
4218 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4219 bias= 1<<(QMAT_SHIFT-1);
4220 length = s->intra_ac_vlc_length;
4221 last_length= s->intra_ac_vlc_last_length;
4225 qmat = s->q_inter_matrix[qscale];
4226 length = s->inter_ac_vlc_length;
4227 last_length= s->inter_ac_vlc_last_length;
4230 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4231 threshold2= (threshold1<<1);
4233 for(i=start_i; i<64; i++) {
4234 const int j = scantable[i];
4235 const int k= i-start_i;
4236 int level = block[j];
4237 level = level * qmat[j];
4239 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4240 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4241 if(((unsigned)(level+threshold1))>threshold2){
4243 level= (bias + level)>>QMAT_SHIFT;
4245 coeff[1][k]= level-1;
4246 // coeff[2][k]= level-2;
4248 level= (bias - level)>>QMAT_SHIFT;
4249 coeff[0][k]= -level;
4250 coeff[1][k]= -level+1;
4251 // coeff[2][k]= -level+2;
4253 coeff_count[k]= FFMIN(level, 2);
4254 assert(coeff_count[k]);
4258 coeff[0][k]= (level>>31)|1;
4263 *overflow= s->max_qcoeff < max; //overflow might have happend
4265 if(last_non_zero < start_i){
4266 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
4267 return last_non_zero;
4273 // length[UNI_AC_ENC_INDEX(0, 63)]=
4274 // length[UNI_AC_ENC_INDEX(0, 65)]= 2;
4277 for(i=0; i<=last_non_zero - start_i; i++){
4278 int level_index, run, j;
4279 const int dct_coeff= block[ scantable[i + start_i] ];
4280 const int zero_distoration= dct_coeff*dct_coeff;
4281 int best_score=256*256*256*120;
4283 last_score += zero_distoration;
4284 not_coded_score += zero_distoration;
4285 for(level_index=0; level_index < coeff_count[i]; level_index++){
4287 int level= coeff[level_index][i];
4292 if(s->out_format == FMT_H263){
4294 unquant_coeff= level*qmul + qadd;
4296 unquant_coeff= level*qmul - qadd;
4299 j= s->dsp.idct_permutation[ scantable[i + start_i] ]; //FIXME optimize
4302 unquant_coeff = (int)((-level) * qscale * s->intra_matrix[j]) >> 3;
4303 unquant_coeff = -((unquant_coeff - 1) | 1);
4305 unquant_coeff = (int)( level * qscale * s->intra_matrix[j]) >> 3;
4306 unquant_coeff = (unquant_coeff - 1) | 1;
4310 unquant_coeff = ((((-level) << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
4311 unquant_coeff = -((unquant_coeff - 1) | 1);
4313 unquant_coeff = ((( level << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
4314 unquant_coeff = (unquant_coeff - 1) | 1;
4320 distoration= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff);
4322 if((level&(~127)) == 0){
4323 for(run=0; run<=i - left_limit; run++){
4324 int score= distoration + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4325 score += score_tab[i-run];
4327 if(score < best_score){
4329 score_tab[i+1]= score;
4331 level_tab[i+1]= level-64;
4335 if(s->out_format == FMT_H263){
4336 for(run=0; run<=i - left_limit; run++){
4337 int score= distoration + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4338 score += score_tab[i-run];
4339 if(score < last_score){
4342 last_level= level-64;
4348 distoration += esc_length*lambda;
4349 for(run=0; run<=i - left_limit; run++){
4350 int score= distoration + score_tab[i-run];
4352 if(score < best_score){
4354 score_tab[i+1]= score;
4356 level_tab[i+1]= level-64;
4360 if(s->out_format == FMT_H263){
4361 for(run=0; run<=i - left_limit; run++){
4362 int score= distoration + score_tab[i-run];
4363 if(score < last_score){
4366 last_level= level-64;
4374 for(j=left_limit; j<=i; j++){
4375 score_tab[j] += zero_distoration;
4377 score_limit+= zero_distoration;
4378 if(score_tab[i+1] < score_limit)
4379 score_limit= score_tab[i+1];
4381 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
4382 while(score_tab[ left_limit ] > score_limit + lambda) left_limit++;
4385 // length[UNI_AC_ENC_INDEX(0, 63)]=
4386 // length[UNI_AC_ENC_INDEX(0, 65)]= 3;
4390 if(s->out_format != FMT_H263){
4391 last_score= 256*256*256*120;
4392 for(i= left_limit; i<=last_non_zero - start_i + 1; i++){
4393 int score= score_tab[i];
4394 if(i) score += lambda*2; //FIXME exacter?
4396 if(score < last_score){
4399 last_level= level_tab[i];
4400 last_run= run_tab[i];
4405 s->coded_score[n] = last_score - not_coded_score;
4408 last_non_zero= last_i - 1 + start_i;
4409 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
4411 if(last_non_zero < start_i)
4412 return last_non_zero;
4414 if(last_non_zero == 0 && start_i == 0){
4416 int best_score= dc * dc;
4418 for(i=0; i<coeff_count[0]; i++){
4419 int level= coeff[i][0];
4420 int unquant_coeff, score, distoration;
4422 if(s->out_format == FMT_H263){
4424 unquant_coeff= (level*qmul + qadd)>>3;
4426 unquant_coeff= (level*qmul - qadd)>>3;
4430 unquant_coeff = ((((-level) << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
4431 unquant_coeff = -((unquant_coeff - 1) | 1);
4433 unquant_coeff = ((( level << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
4434 unquant_coeff = (unquant_coeff - 1) | 1;
4437 unquant_coeff = (unquant_coeff + 4) >> 3;
4438 unquant_coeff<<= 3 + 3;
4440 distoration= (unquant_coeff - dc) * (unquant_coeff - dc);
4442 if((level&(~127)) == 0)
4443 score= distoration + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4445 score= distoration + esc_length*lambda;
4447 if(score < best_score){
4449 best_level= level - 64;
4452 block[0]= best_level;
4453 s->coded_score[n] = best_score - dc*dc;
4454 if(best_level == 0) return -1;
4455 else return last_non_zero;
4460 //FIXME use permutated scantable
4461 block[ s->dsp.idct_permutation[ scantable[last_non_zero] ] ]= last_level;
4464 for(;i>0 ; i -= run_tab[i] + 1){
4465 const int j= s->dsp.idct_permutation[ scantable[i - 1 + start_i] ];
4467 block[j]= level_tab[i];
4471 return last_non_zero;
4474 static int dct_quantize_c(MpegEncContext *s,
4475 DCTELEM *block, int n,
4476 int qscale, int *overflow)
4478 int i, j, level, last_non_zero, q;
4480 const uint8_t *scantable= s->intra_scantable.scantable;
4483 unsigned int threshold1, threshold2;
4485 s->dsp.fdct (block);
4487 if(s->dct_error_sum)
4488 ff_denoise_dct(s, block);
4498 /* For AIC we skip quant/dequant of INTRADC */
4501 /* note: block[0] is assumed to be positive */
4502 block[0] = (block[0] + (q >> 1)) / q;
4505 qmat = s->q_intra_matrix[qscale];
4506 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4510 qmat = s->q_inter_matrix[qscale];
4511 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4513 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4514 threshold2= (threshold1<<1);
4519 level = level * qmat[j];
4521 // if( bias+level >= (1<<QMAT_SHIFT)
4522 // || bias-level >= (1<<QMAT_SHIFT)){
4523 if(((unsigned)(level+threshold1))>threshold2){
4525 level= (bias + level)>>QMAT_SHIFT;
4528 level= (bias - level)>>QMAT_SHIFT;
4537 *overflow= s->max_qcoeff < max; //overflow might have happend
4539 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4540 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4541 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4543 return last_non_zero;
4546 #endif //CONFIG_ENCODERS
4548 static void dct_unquantize_mpeg1_c(MpegEncContext *s,
4549 DCTELEM *block, int n, int qscale)
4551 int i, level, nCoeffs;
4552 const uint16_t *quant_matrix;
4554 nCoeffs= s->block_last_index[n];
4558 block[0] = block[0] * s->y_dc_scale;
4560 block[0] = block[0] * s->c_dc_scale;
4561 /* XXX: only mpeg1 */
4562 quant_matrix = s->intra_matrix;
4563 for(i=1;i<=nCoeffs;i++) {
4564 int j= s->intra_scantable.permutated[i];
4569 level = (int)(level * qscale * quant_matrix[j]) >> 3;
4570 level = (level - 1) | 1;
4573 level = (int)(level * qscale * quant_matrix[j]) >> 3;
4574 level = (level - 1) | 1;
4577 if (level < -2048 || level > 2047)
4578 fprintf(stderr, "unquant error %d %d\n", i, level);
4585 quant_matrix = s->inter_matrix;
4586 for(;i<=nCoeffs;i++) {
4587 int j= s->intra_scantable.permutated[i];
4592 level = (((level << 1) + 1) * qscale *
4593 ((int) (quant_matrix[j]))) >> 4;
4594 level = (level - 1) | 1;
4597 level = (((level << 1) + 1) * qscale *
4598 ((int) (quant_matrix[j]))) >> 4;
4599 level = (level - 1) | 1;
4602 if (level < -2048 || level > 2047)
4603 fprintf(stderr, "unquant error %d %d\n", i, level);
4611 static void dct_unquantize_mpeg2_c(MpegEncContext *s,
4612 DCTELEM *block, int n, int qscale)
4614 int i, level, nCoeffs;
4615 const uint16_t *quant_matrix;
4617 if(s->alternate_scan) nCoeffs= 63;
4618 else nCoeffs= s->block_last_index[n];
4622 block[0] = block[0] * s->y_dc_scale;
4624 block[0] = block[0] * s->c_dc_scale;
4625 quant_matrix = s->intra_matrix;
4626 for(i=1;i<=nCoeffs;i++) {
4627 int j= s->intra_scantable.permutated[i];
4632 level = (int)(level * qscale * quant_matrix[j]) >> 3;
4635 level = (int)(level * qscale * quant_matrix[j]) >> 3;
4638 if (level < -2048 || level > 2047)
4639 fprintf(stderr, "unquant error %d %d\n", i, level);
4647 quant_matrix = s->inter_matrix;
4648 for(;i<=nCoeffs;i++) {
4649 int j= s->intra_scantable.permutated[i];
4654 level = (((level << 1) + 1) * qscale *
4655 ((int) (quant_matrix[j]))) >> 4;
4658 level = (((level << 1) + 1) * qscale *
4659 ((int) (quant_matrix[j]))) >> 4;
4662 if (level < -2048 || level > 2047)
4663 fprintf(stderr, "unquant error %d %d\n", i, level);
4674 static void dct_unquantize_h263_c(MpegEncContext *s,
4675 DCTELEM *block, int n, int qscale)
4677 int i, level, qmul, qadd;
4680 assert(s->block_last_index[n]>=0);
4682 qadd = (qscale - 1) | 1;
4688 block[0] = block[0] * s->y_dc_scale;
4690 block[0] = block[0] * s->c_dc_scale;
4694 nCoeffs= 63; //does not allways use zigzag table
4697 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
4700 for(;i<=nCoeffs;i++) {
4704 level = level * qmul - qadd;
4706 level = level * qmul + qadd;
4709 if (level < -2048 || level > 2047)
4710 fprintf(stderr, "unquant error %d %d\n", i, level);
4718 static const AVOption mpeg4_options[] =
4720 AVOPTION_CODEC_INT("bitrate", "desired video bitrate", bit_rate, 4, 240000000, 800000),
4721 AVOPTION_CODEC_INT("ratetol", "number of bits the bitstream is allowed to diverge from the reference"
4722 "the reference can be CBR (for CBR pass1) or VBR (for pass2)",
4723 bit_rate_tolerance, 4, 240000000, 8000),
4724 AVOPTION_CODEC_INT("qmin", "minimum quantizer", qmin, 1, 31, 2),
4725 AVOPTION_CODEC_INT("qmax", "maximum quantizer", qmax, 1, 31, 31),
4726 AVOPTION_CODEC_STRING("rc_eq", "rate control equation",
4727 rc_eq, "tex^qComp,option1,options2", 0),
4728 AVOPTION_CODEC_INT("rc_minrate", "rate control minimum bitrate",
4729 rc_min_rate, 4, 24000000, 0),
4730 AVOPTION_CODEC_INT("rc_maxrate", "rate control maximum bitrate",
4731 rc_max_rate, 4, 24000000, 0),
4732 AVOPTION_CODEC_DOUBLE("rc_buf_aggresivity", "rate control buffer aggresivity",
4733 rc_buffer_aggressivity, 4, 24000000, 0),
4734 AVOPTION_CODEC_DOUBLE("rc_initial_cplx", "initial complexity for pass1 ratecontrol",
4735 rc_initial_cplx, 0., 9999999., 0),
4736 AVOPTION_CODEC_DOUBLE("i_quant_factor", "qscale factor between p and i frames",
4737 i_quant_factor, 0., 0., 0),
4738 AVOPTION_CODEC_DOUBLE("i_quant_offset", "qscale offset between p and i frames",
4739 i_quant_factor, -999999., 999999., 0),
4740 AVOPTION_CODEC_INT("dct_algo", "dct alghorithm",
4741 dct_algo, 0, 5, 0), // fixme - "Auto,FastInt,Int,MMX,MLib,Altivec"
4742 AVOPTION_CODEC_DOUBLE("lumi_masking", "luminance masking",
4743 lumi_masking, 0., 999999., 0),
4744 AVOPTION_CODEC_DOUBLE("temporal_cplx_masking", "temporary complexity masking",
4745 temporal_cplx_masking, 0., 999999., 0),
4746 AVOPTION_CODEC_DOUBLE("spatial_cplx_masking", "spatial complexity masking",
4747 spatial_cplx_masking, 0., 999999., 0),
4748 AVOPTION_CODEC_DOUBLE("p_masking", "p block masking",
4749 p_masking, 0., 999999., 0),
4750 AVOPTION_CODEC_DOUBLE("dark_masking", "darkness masking",
4751 dark_masking, 0., 999999., 0),
4752 AVOPTION_CODEC_INT("idct_algo", "idct alghorithm",
4753 idct_algo, 0, 8, 0), // fixme - "Auto,Int,Simple,SimpleMMX,LibMPEG2MMX,PS2,MLib,ARM,Altivec"
4755 AVOPTION_CODEC_INT("mb_qmin", "minimum MB quantizer",
4757 AVOPTION_CODEC_INT("mb_qmax", "maximum MB quantizer",
4760 AVOPTION_CODEC_INT("me_cmp", "ME compare function",
4761 me_cmp, 0, 24000000, 0),
4762 AVOPTION_CODEC_INT("me_sub_cmp", "subpixel ME compare function",
4763 me_sub_cmp, 0, 24000000, 0),
4766 AVOPTION_CODEC_INT("dia_size", "ME diamond size & shape",
4767 dia_size, 0, 24000000, 0),
4768 AVOPTION_CODEC_INT("last_predictor_count", "amount of previous MV predictors",
4769 last_predictor_count, 0, 24000000, 0),
4771 AVOPTION_CODEC_INT("pre_me", "pre pass for ME",
4772 pre_me, 0, 24000000, 0),
4773 AVOPTION_CODEC_INT("me_pre_cmp", "ME pre pass compare function",
4774 me_pre_cmp, 0, 24000000, 0),
4776 AVOPTION_CODEC_INT("me_range", "maximum ME search range",
4777 me_range, 0, 24000000, 0),
4778 AVOPTION_CODEC_INT("pre_dia_size", "ME pre pass diamod size & shape",
4779 pre_dia_size, 0, 24000000, 0),
4780 AVOPTION_CODEC_INT("me_subpel_quality", "subpel ME quality",
4781 me_subpel_quality, 0, 24000000, 0),
4782 AVOPTION_CODEC_INT("me_range", "maximum ME search range",
4783 me_range, 0, 24000000, 0),
4784 AVOPTION_CODEC_FLAG("psnr", "calculate PSNR of compressed frames",
4785 flags, CODEC_FLAG_PSNR, 0),
4786 AVOPTION_CODEC_RCOVERRIDE("rc_override", "ratecontrol override (=startframe,endframe,qscale,quality_factor)",
4788 AVOPTION_SUB(avoptions_common),
4792 #ifdef CONFIG_ENCODERS
4794 AVCodec mpeg1video_encoder = {
4797 CODEC_ID_MPEG1VIDEO,
4798 sizeof(MpegEncContext),
4806 AVCodec mpeg2video_encoder = {
4809 CODEC_ID_MPEG2VIDEO,
4810 sizeof(MpegEncContext),
4816 AVCodec h263_encoder = {
4820 sizeof(MpegEncContext),
4826 AVCodec h263p_encoder = {
4830 sizeof(MpegEncContext),
4836 AVCodec flv_encoder = {
4840 sizeof(MpegEncContext),
4846 AVCodec rv10_encoder = {
4850 sizeof(MpegEncContext),
4856 AVCodec mpeg4_encoder = {
4860 sizeof(MpegEncContext),
4864 .options = mpeg4_options,
4867 AVCodec msmpeg4v1_encoder = {
4871 sizeof(MpegEncContext),
4875 .options = mpeg4_options,
4878 AVCodec msmpeg4v2_encoder = {
4882 sizeof(MpegEncContext),
4886 .options = mpeg4_options,
4889 AVCodec msmpeg4v3_encoder = {
4893 sizeof(MpegEncContext),
4897 .options = mpeg4_options,
4900 AVCodec wmv1_encoder = {
4904 sizeof(MpegEncContext),
4908 .options = mpeg4_options,
4913 AVCodec mjpeg_encoder = {
4917 sizeof(MpegEncContext),
4923 #endif //CONFIG_ENCODERS