2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard.
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
24 * The simplest mpeg encoder (well, it was the simplest!).
31 #include "mpegvideo.h"
34 #include "fastmemcpy.h"
40 #ifdef CONFIG_ENCODERS
41 static void encode_picture(MpegEncContext *s, int picture_number);
42 #endif //CONFIG_ENCODERS
43 static void dct_unquantize_mpeg1_c(MpegEncContext *s,
44 DCTELEM *block, int n, int qscale);
45 static void dct_unquantize_mpeg2_c(MpegEncContext *s,
46 DCTELEM *block, int n, int qscale);
47 static void dct_unquantize_h263_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w);
50 #ifdef CONFIG_ENCODERS
51 static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
52 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
53 #endif //CONFIG_ENCODERS
55 void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_edges_c;
58 /* enable all paranoid tests for rounding, overflows, etc... */
64 /* for jpeg fast DCT */
67 static const uint16_t aanscales[64] = {
68 /* precomputed values scaled up by 14 bits */
69 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
70 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270,
71 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906,
72 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315,
73 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
74 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552,
75 8867 , 12299, 11585, 10426, 8867, 6967, 4799, 2446,
76 4520 , 6270, 5906, 5315, 4520, 3552, 2446, 1247
79 static const uint8_t h263_chroma_roundtab[16] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
84 #ifdef CONFIG_ENCODERS
85 static uint8_t (*default_mv_penalty)[MAX_MV*2+1]=NULL;
86 static uint8_t default_fcode_tab[MAX_MV*2+1];
88 enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1};
90 static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16)[64], uint16_t (*qmat16_bias)[64],
91 const uint16_t *quant_matrix, int bias, int qmin, int qmax)
95 for(qscale=qmin; qscale<=qmax; qscale++){
97 if (s->dsp.fdct == ff_jpeg_fdct_islow) {
99 const int j= s->dsp.idct_permutation[i];
100 /* 16 <= qscale * quant_matrix[i] <= 7905 */
101 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
102 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
103 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
105 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) /
106 (qscale * quant_matrix[j]));
108 } else if (s->dsp.fdct == fdct_ifast) {
110 const int j= s->dsp.idct_permutation[i];
111 /* 16 <= qscale * quant_matrix[i] <= 7905 */
112 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
113 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
114 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
116 qmat[qscale][i] = (int)((uint64_t_C(1) << (QMAT_SHIFT + 14)) /
117 (aanscales[i] * qscale * quant_matrix[j]));
121 const int j= s->dsp.idct_permutation[i];
122 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
123 So 16 <= qscale * quant_matrix[i] <= 7905
124 so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
125 so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67
127 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j]));
128 // qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
129 qmat16[qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]);
131 if(qmat16[qscale][i]==0 || qmat16[qscale][i]==128*256) qmat16[qscale][i]=128*256-1;
132 qmat16_bias[qscale][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][i]);
137 #endif //CONFIG_ENCODERS
139 void ff_init_scantable(MpegEncContext *s, ScanTable *st, const uint8_t *src_scantable){
143 st->scantable= src_scantable;
147 j = src_scantable[i];
148 st->permutated[i] = s->dsp.idct_permutation[j];
157 j = st->permutated[i];
159 st->raster_end[i]= end;
163 /* init common dct for both encoder and decoder */
164 int DCT_common_init(MpegEncContext *s)
166 s->dct_unquantize_h263 = dct_unquantize_h263_c;
167 s->dct_unquantize_mpeg1 = dct_unquantize_mpeg1_c;
168 s->dct_unquantize_mpeg2 = dct_unquantize_mpeg2_c;
170 #ifdef CONFIG_ENCODERS
171 s->dct_quantize= dct_quantize_c;
175 MPV_common_init_mmx(s);
178 MPV_common_init_axp(s);
181 MPV_common_init_mlib(s);
184 MPV_common_init_mmi(s);
187 MPV_common_init_armv4l(s);
190 MPV_common_init_ppc(s);
193 #ifdef CONFIG_ENCODERS
194 s->fast_dct_quantize= s->dct_quantize;
196 if(s->flags&CODEC_FLAG_TRELLIS_QUANT){
197 s->dct_quantize= dct_quantize_trellis_c; //move before MPV_common_init_*
200 #endif //CONFIG_ENCODERS
202 /* load & permutate scantables
203 note: only wmv uses differnt ones
205 ff_init_scantable(s, &s->inter_scantable , ff_zigzag_direct);
206 ff_init_scantable(s, &s->intra_scantable , ff_zigzag_direct);
207 ff_init_scantable(s, &s->intra_h_scantable, ff_alternate_horizontal_scan);
208 ff_init_scantable(s, &s->intra_v_scantable, ff_alternate_vertical_scan);
210 s->picture_structure= PICT_FRAME;
216 * allocates a Picture
217 * The pixels are allocated/set by calling get_buffer() if shared=0
219 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
220 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11
221 const int mb_array_size= s->mb_stride*s->mb_height;
225 assert(pic->data[0]);
226 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
227 pic->type= FF_BUFFER_TYPE_SHARED;
231 assert(!pic->data[0]);
233 r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
235 if(r<0 || !pic->age || !pic->type || !pic->data[0]){
236 fprintf(stderr, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
240 if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
241 fprintf(stderr, "get_buffer() failed (stride changed)\n");
245 if(pic->linesize[1] != pic->linesize[2]){
246 fprintf(stderr, "get_buffer() failed (uv stride missmatch)\n");
250 s->linesize = pic->linesize[0];
251 s->uvlinesize= pic->linesize[1];
254 if(pic->qscale_table==NULL){
256 CHECKED_ALLOCZ(pic->mb_var , mb_array_size * sizeof(int16_t))
257 CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t))
258 CHECKED_ALLOCZ(pic->mb_mean , mb_array_size * sizeof(int8_t))
259 CHECKED_ALLOCZ(pic->mb_cmp_score, mb_array_size * sizeof(int32_t))
262 CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check
263 CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t))
264 CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num * sizeof(int))
265 pic->mb_type= pic->mb_type_base + s->mb_stride+1;
266 if(s->out_format == FMT_H264){
268 CHECKED_ALLOCZ(pic->motion_val[i], 2 * 16 * s->mb_num * sizeof(uint16_t))
269 CHECKED_ALLOCZ(pic->ref_index[i] , 4 * s->mb_num * sizeof(uint8_t))
272 pic->qstride= s->mb_stride;
275 //it might be nicer if the application would keep track of these but it would require a API change
276 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
277 s->prev_pict_types[0]= s->pict_type;
278 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE)
279 pic->age= INT_MAX; // skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway
282 fail: //for the CHECKED_ALLOCZ macro
287 * deallocates a picture
289 static void free_picture(MpegEncContext *s, Picture *pic){
292 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
293 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
296 av_freep(&pic->mb_var);
297 av_freep(&pic->mc_mb_var);
298 av_freep(&pic->mb_mean);
299 av_freep(&pic->mb_cmp_score);
300 av_freep(&pic->mbskip_table);
301 av_freep(&pic->qscale_table);
302 av_freep(&pic->mb_type_base);
305 av_freep(&pic->motion_val[i]);
306 av_freep(&pic->ref_index[i]);
309 if(pic->type == FF_BUFFER_TYPE_SHARED){
318 /* init common structure for both encoder and decoder */
319 int MPV_common_init(MpegEncContext *s)
321 int y_size, c_size, yc_size, i, mb_array_size, x, y;
323 dsputil_init(&s->dsp, s->avctx);
326 s->flags= s->avctx->flags;
328 s->mb_width = (s->width + 15) / 16;
329 s->mb_height = (s->height + 15) / 16;
330 s->mb_stride = s->mb_width + 1;
331 mb_array_size= s->mb_height * s->mb_stride;
333 /* set default edge pos, will be overriden in decode_header if needed */
334 s->h_edge_pos= s->mb_width*16;
335 s->v_edge_pos= s->mb_height*16;
337 s->mb_num = s->mb_width * s->mb_height;
342 s->block_wrap[3]= s->mb_width*2 + 2;
344 s->block_wrap[5]= s->mb_width + 2;
346 y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
347 c_size = (s->mb_width + 2) * (s->mb_height + 2);
348 yc_size = y_size + 2 * c_size;
350 /* convert fourcc to upper case */
351 s->avctx->codec_tag= toupper( s->avctx->codec_tag &0xFF)
352 + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
353 + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
354 + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
356 CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
357 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*17;
359 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
361 CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int)) //error ressilience code looks cleaner with this
362 for(y=0; y<s->mb_height; y++){
363 for(x=0; x<s->mb_width; x++){
364 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
367 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
370 int mv_table_size= s->mb_stride * (s->mb_height+2) + 1;
372 /* Allocate MV tables */
373 CHECKED_ALLOCZ(s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
374 CHECKED_ALLOCZ(s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
375 CHECKED_ALLOCZ(s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
376 CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
377 CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
378 CHECKED_ALLOCZ(s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
379 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
380 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
381 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
382 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
383 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
384 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
386 //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
387 CHECKED_ALLOCZ(s->me.scratchpad, s->width*2*16*3*sizeof(uint8_t))
389 CHECKED_ALLOCZ(s->me.map , ME_MAP_SIZE*sizeof(uint32_t))
390 CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
392 if(s->codec_id==CODEC_ID_MPEG4){
393 CHECKED_ALLOCZ(s->tex_pb_buffer, PB_BUFFER_SIZE);
394 CHECKED_ALLOCZ( s->pb2_buffer, PB_BUFFER_SIZE);
397 if(s->msmpeg4_version){
398 CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
400 CHECKED_ALLOCZ(s->avctx->stats_out, 256);
402 /* Allocate MB type table */
403 CHECKED_ALLOCZ(s->mb_type , mb_array_size * sizeof(uint8_t)) //needed for encoding
406 CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t))
408 if (s->out_format == FMT_H263 || s->encoding) {
412 size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
413 CHECKED_ALLOCZ(s->motion_val, size * 2 * sizeof(int16_t));
416 if(s->codec_id==CODEC_ID_MPEG4){
417 /* interlaced direct mode decoding tables */
418 CHECKED_ALLOCZ(s->field_mv_table, mb_array_size*2*2 * sizeof(int16_t))
419 CHECKED_ALLOCZ(s->field_select_table, mb_array_size*2* sizeof(int8_t))
421 if (s->out_format == FMT_H263) {
423 CHECKED_ALLOCZ(s->ac_val[0], yc_size * sizeof(int16_t) * 16);
424 s->ac_val[1] = s->ac_val[0] + y_size;
425 s->ac_val[2] = s->ac_val[1] + c_size;
428 CHECKED_ALLOCZ(s->coded_block, y_size);
430 /* divx501 bitstream reorder buffer */
431 CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE);
433 /* cbp, ac_pred, pred_dir */
434 CHECKED_ALLOCZ(s->cbp_table , mb_array_size * sizeof(uint8_t))
435 CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
438 if (s->h263_pred || s->h263_plus || !s->encoding) {
440 //MN: we need these for error resilience of intra-frames
441 CHECKED_ALLOCZ(s->dc_val[0], yc_size * sizeof(int16_t));
442 s->dc_val[1] = s->dc_val[0] + y_size;
443 s->dc_val[2] = s->dc_val[1] + c_size;
444 for(i=0;i<yc_size;i++)
445 s->dc_val[0][i] = 1024;
448 /* which mb is a intra block */
449 CHECKED_ALLOCZ(s->mbintra_table, mb_array_size);
450 memset(s->mbintra_table, 1, mb_array_size);
452 /* default structure is frame */
453 s->picture_structure = PICT_FRAME;
455 /* init macroblock skip table */
456 CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2);
457 //Note the +1 is for a quicker mpeg4 slice_end detection
458 CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
460 s->block= s->blocks[0];
462 s->parse_context.state= -1;
464 s->context_initialized = 1;
474 /* init common structure for both encoder and decoder */
475 void MPV_common_end(MpegEncContext *s)
479 av_freep(&s->mb_type);
480 av_freep(&s->p_mv_table_base);
481 av_freep(&s->b_forw_mv_table_base);
482 av_freep(&s->b_back_mv_table_base);
483 av_freep(&s->b_bidir_forw_mv_table_base);
484 av_freep(&s->b_bidir_back_mv_table_base);
485 av_freep(&s->b_direct_mv_table_base);
487 s->b_forw_mv_table= NULL;
488 s->b_back_mv_table= NULL;
489 s->b_bidir_forw_mv_table= NULL;
490 s->b_bidir_back_mv_table= NULL;
491 s->b_direct_mv_table= NULL;
493 av_freep(&s->motion_val);
494 av_freep(&s->dc_val[0]);
495 av_freep(&s->ac_val[0]);
496 av_freep(&s->coded_block);
497 av_freep(&s->mbintra_table);
498 av_freep(&s->cbp_table);
499 av_freep(&s->pred_dir_table);
500 av_freep(&s->me.scratchpad);
501 av_freep(&s->me.map);
502 av_freep(&s->me.score_map);
504 av_freep(&s->mbskip_table);
505 av_freep(&s->prev_pict_types);
506 av_freep(&s->bitstream_buffer);
507 av_freep(&s->tex_pb_buffer);
508 av_freep(&s->pb2_buffer);
509 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
510 av_freep(&s->field_mv_table);
511 av_freep(&s->field_select_table);
512 av_freep(&s->avctx->stats_out);
513 av_freep(&s->ac_stats);
514 av_freep(&s->error_status_table);
515 av_freep(&s->mb_index2xy);
517 for(i=0; i<MAX_PICTURE_COUNT; i++){
518 free_picture(s, &s->picture[i]);
520 avcodec_default_free_buffers(s->avctx);
521 s->context_initialized = 0;
524 #ifdef CONFIG_ENCODERS
526 /* init video encoder */
527 int MPV_encode_init(AVCodecContext *avctx)
529 MpegEncContext *s = avctx->priv_data;
532 avctx->pix_fmt = PIX_FMT_YUV420P;
534 s->bit_rate = avctx->bit_rate;
535 s->bit_rate_tolerance = avctx->bit_rate_tolerance;
536 s->width = avctx->width;
537 s->height = avctx->height;
538 if(avctx->gop_size > 600){
539 fprintf(stderr, "Warning keyframe interval too large! reducing it ...\n");
542 s->gop_size = avctx->gop_size;
543 s->rtp_mode = avctx->rtp_mode;
544 s->rtp_payload_size = avctx->rtp_payload_size;
545 if (avctx->rtp_callback)
546 s->rtp_callback = avctx->rtp_callback;
547 s->max_qdiff= avctx->max_qdiff;
548 s->qcompress= avctx->qcompress;
549 s->qblur= avctx->qblur;
551 s->flags= avctx->flags;
552 s->max_b_frames= avctx->max_b_frames;
553 s->b_frame_strategy= avctx->b_frame_strategy;
554 s->codec_id= avctx->codec->id;
555 s->luma_elim_threshold = avctx->luma_elim_threshold;
556 s->chroma_elim_threshold= avctx->chroma_elim_threshold;
557 s->strict_std_compliance= avctx->strict_std_compliance;
558 s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
559 s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0;
560 s->mpeg_quant= avctx->mpeg_quant;
562 if (s->gop_size <= 1) {
569 s->me_method = avctx->me_method;
572 s->fixed_qscale = (avctx->flags & CODEC_FLAG_QSCALE);
574 s->adaptive_quant= ( s->avctx->lumi_masking
575 || s->avctx->dark_masking
576 || s->avctx->temporal_cplx_masking
577 || s->avctx->spatial_cplx_masking
578 || s->avctx->p_masking)
581 s->progressive_sequence= !(avctx->flags & CODEC_FLAG_INTERLACED_DCT);
583 if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4){
584 fprintf(stderr, "4MV not supporetd by codec\n");
588 if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){
589 fprintf(stderr, "qpel not supporetd by codec\n");
593 if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){
594 fprintf(stderr, "data partitioning not supporetd by codec\n");
598 if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO){
599 fprintf(stderr, "b frames not supporetd by codec\n");
603 if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too
604 fprintf(stderr, "mpeg2 style quantization not supporetd by codec\n");
608 if(s->codec_id==CODEC_ID_MJPEG){
609 s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x
610 s->inter_quant_bias= 0;
611 }else if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO){
612 s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x
613 s->inter_quant_bias= 0;
615 s->intra_quant_bias=0;
616 s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2)); //(a - x/4)/x
619 if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
620 s->intra_quant_bias= avctx->intra_quant_bias;
621 if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
622 s->inter_quant_bias= avctx->inter_quant_bias;
624 switch(avctx->codec->id) {
625 case CODEC_ID_MPEG1VIDEO:
626 s->out_format = FMT_MPEG1;
627 s->low_delay= 0; //s->max_b_frames ? 0 : 1;
628 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
631 s->out_format = FMT_MJPEG;
632 s->intra_only = 1; /* force intra only for jpeg */
633 s->mjpeg_write_tables = 1; /* write all tables */
634 s->mjpeg_data_only_frames = 0; /* write all the needed headers */
635 s->mjpeg_vsample[0] = 2; /* set up default sampling factors */
636 s->mjpeg_vsample[1] = 1; /* the only currently supported values */
637 s->mjpeg_vsample[2] = 1;
638 s->mjpeg_hsample[0] = 2;
639 s->mjpeg_hsample[1] = 1;
640 s->mjpeg_hsample[2] = 1;
641 if (mjpeg_init(s) < 0)
648 if (h263_get_picture_format(s->width, s->height) == 7) {
649 printf("Input picture size isn't suitable for h263 codec! try h263+\n");
652 s->out_format = FMT_H263;
657 s->out_format = FMT_H263;
660 s->unrestricted_mv=(avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0;
661 s->h263_aic= (avctx->flags & CODEC_FLAG_H263P_AIC) ? 1:0;
663 /* These are just to be sure */
669 s->out_format = FMT_H263;
675 s->out_format = FMT_H263;
677 s->unrestricted_mv = 1;
678 s->low_delay= s->max_b_frames ? 0 : 1;
679 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
681 case CODEC_ID_MSMPEG4V1:
682 s->out_format = FMT_H263;
685 s->unrestricted_mv = 1;
686 s->msmpeg4_version= 1;
690 case CODEC_ID_MSMPEG4V2:
691 s->out_format = FMT_H263;
694 s->unrestricted_mv = 1;
695 s->msmpeg4_version= 2;
699 case CODEC_ID_MSMPEG4V3:
700 s->out_format = FMT_H263;
703 s->unrestricted_mv = 1;
704 s->msmpeg4_version= 3;
705 s->flipflop_rounding=1;
710 s->out_format = FMT_H263;
713 s->unrestricted_mv = 1;
714 s->msmpeg4_version= 4;
715 s->flipflop_rounding=1;
720 s->out_format = FMT_H263;
723 s->unrestricted_mv = 1;
724 s->msmpeg4_version= 5;
725 s->flipflop_rounding=1;
734 { /* set up some save defaults, some codecs might override them later */
740 default_mv_penalty= av_mallocz( sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1) );
741 memset(default_mv_penalty, 0, sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1));
742 memset(default_fcode_tab , 0, sizeof(uint8_t)*(2*MAX_MV+1));
744 for(i=-16; i<16; i++){
745 default_fcode_tab[i + MAX_MV]= 1;
749 s->me.mv_penalty= default_mv_penalty;
750 s->fcode_tab= default_fcode_tab;
752 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
754 /* dont use mv_penalty table for crap MV as it would be confused */
755 //FIXME remove after fixing / removing old ME
756 if (s->me_method < ME_EPZS) s->me.mv_penalty = default_mv_penalty;
761 if (MPV_common_init(s) < 0)
766 #ifdef CONFIG_ENCODERS
768 if (s->out_format == FMT_H263)
770 if(s->msmpeg4_version)
771 ff_msmpeg4_encode_init(s);
773 if (s->out_format == FMT_MPEG1)
774 ff_mpeg1_encode_init(s);
777 /* init default q matrix */
779 int j= s->dsp.idct_permutation[i];
781 if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
782 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
783 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
784 }else if(s->out_format == FMT_H263){
786 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
790 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
791 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
795 /* precompute matrix */
796 /* for mjpeg, we do include qscale in the matrix */
797 if (s->out_format != FMT_MJPEG) {
798 convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, s->q_intra_matrix16_bias,
799 s->intra_matrix, s->intra_quant_bias, 1, 31);
800 convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16, s->q_inter_matrix16_bias,
801 s->inter_matrix, s->inter_quant_bias, 1, 31);
804 if(ff_rate_control_init(s) < 0)
807 s->picture_number = 0;
808 s->picture_in_gop_number = 0;
809 s->fake_picture_number = 0;
810 /* motion detector init */
817 int MPV_encode_end(AVCodecContext *avctx)
819 MpegEncContext *s = avctx->priv_data;
825 ff_rate_control_uninit(s);
828 if (s->out_format == FMT_MJPEG)
834 #endif //CONFIG_ENCODERS
836 void init_rl(RLTable *rl)
838 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
839 uint8_t index_run[MAX_RUN+1];
840 int last, run, level, start, end, i;
842 /* compute max_level[], max_run[] and index_run[] */
843 for(last=0;last<2;last++) {
852 memset(max_level, 0, MAX_RUN + 1);
853 memset(max_run, 0, MAX_LEVEL + 1);
854 memset(index_run, rl->n, MAX_RUN + 1);
855 for(i=start;i<end;i++) {
856 run = rl->table_run[i];
857 level = rl->table_level[i];
858 if (index_run[run] == rl->n)
860 if (level > max_level[run])
861 max_level[run] = level;
862 if (run > max_run[level])
863 max_run[level] = run;
865 rl->max_level[last] = av_malloc(MAX_RUN + 1);
866 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
867 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
868 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
869 rl->index_run[last] = av_malloc(MAX_RUN + 1);
870 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
874 /* draw the edges of width 'w' of an image of size width, height */
875 //FIXME check that this is ok for mpeg4 interlaced
876 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w)
878 uint8_t *ptr, *last_line;
881 last_line = buf + (height - 1) * wrap;
884 memcpy(buf - (i + 1) * wrap, buf, width);
885 memcpy(last_line + (i + 1) * wrap, last_line, width);
889 for(i=0;i<height;i++) {
890 memset(ptr - w, ptr[0], w);
891 memset(ptr + width, ptr[width-1], w);
896 memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
897 memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
898 memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
899 memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
903 static int find_unused_picture(MpegEncContext *s, int shared){
907 for(i=0; i<MAX_PICTURE_COUNT; i++){
908 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) break;
911 for(i=0; i<MAX_PICTURE_COUNT; i++){
912 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) break; //FIXME
914 for(i=0; i<MAX_PICTURE_COUNT; i++){
915 if(s->picture[i].data[0]==NULL) break;
919 assert(i<MAX_PICTURE_COUNT);
923 /* generic function for encode/decode called before a frame is coded/decoded */
924 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
931 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
933 /* mark&release old frames */
934 if (s->pict_type != B_TYPE && s->last_picture_ptr) {
935 avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
937 /* release forgotten pictures */
938 /* if(mpeg124/h263) */
940 for(i=0; i<MAX_PICTURE_COUNT; i++){
941 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
942 fprintf(stderr, "releasing zombie picture\n");
943 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
950 /* release non refernce frames */
951 for(i=0; i<MAX_PICTURE_COUNT; i++){
952 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
953 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
957 i= find_unused_picture(s, 0);
959 pic= (AVFrame*)&s->picture[i];
960 pic->reference= s->pict_type != B_TYPE ? 3 : 0;
962 if(s->current_picture_ptr)
963 pic->coded_picture_number= s->current_picture_ptr->coded_picture_number+1;
965 alloc_picture(s, (Picture*)pic, 0);
967 s->current_picture_ptr= &s->picture[i];
970 s->current_picture_ptr->pict_type= s->pict_type;
971 s->current_picture_ptr->quality= s->qscale;
972 s->current_picture_ptr->key_frame= s->pict_type == I_TYPE;
974 s->current_picture= *s->current_picture_ptr;
976 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
977 if (s->pict_type != B_TYPE) {
978 s->last_picture_ptr= s->next_picture_ptr;
979 s->next_picture_ptr= s->current_picture_ptr;
982 if(s->last_picture_ptr) s->last_picture= *s->last_picture_ptr;
983 if(s->next_picture_ptr) s->next_picture= *s->next_picture_ptr;
984 if(s->new_picture_ptr ) s->new_picture = *s->new_picture_ptr;
986 if(s->picture_structure!=PICT_FRAME){
989 if(s->picture_structure == PICT_BOTTOM_FIELD){
990 s->current_picture.data[i] += s->current_picture.linesize[i];
992 s->current_picture.linesize[i] *= 2;
993 s->last_picture.linesize[i] *=2;
994 s->next_picture.linesize[i] *=2;
998 if(s->pict_type != I_TYPE && s->last_picture_ptr==NULL){
999 fprintf(stderr, "warning: first frame is no keyframe\n");
1000 assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference
1005 s->hurry_up= s->avctx->hurry_up;
1006 s->error_resilience= avctx->error_resilience;
1008 /* set dequantizer, we cant do it during init as it might change for mpeg4
1009 and we cant do it in the header decode as init isnt called for mpeg4 there yet */
1010 if(s->out_format == FMT_H263){
1012 s->dct_unquantize = s->dct_unquantize_mpeg2;
1014 s->dct_unquantize = s->dct_unquantize_h263;
1016 s->dct_unquantize = s->dct_unquantize_mpeg1;
1021 /* generic function for encode/decode called after a frame has been coded/decoded */
1022 void MPV_frame_end(MpegEncContext *s)
1025 /* draw edge for correct motion prediction if outside */
1026 if(s->codec_id!=CODEC_ID_SVQ1){
1027 if (s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1028 draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
1029 draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1030 draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1035 s->last_pict_type = s->pict_type;
1036 if(s->pict_type!=B_TYPE){
1037 s->last_non_b_pict_type= s->pict_type;
1040 /* copy back current_picture variables */
1041 for(i=0; i<MAX_PICTURE_COUNT; i++){
1042 if(s->picture[i].data[0] == s->current_picture.data[0]){
1043 s->picture[i]= s->current_picture;
1047 assert(i<MAX_PICTURE_COUNT);
1051 /* release non refernce frames */
1052 for(i=0; i<MAX_PICTURE_COUNT; i++){
1053 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1054 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1058 // clear copies, to avoid confusion
1060 memset(&s->last_picture, 0, sizeof(Picture));
1061 memset(&s->next_picture, 0, sizeof(Picture));
1062 memset(&s->current_picture, 0, sizeof(Picture));
1067 * prints debuging info for the given picture.
1069 void ff_print_debug_info(MpegEncContext *s, Picture *pict){
1071 if(!pict || !pict->mb_type) return;
1073 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1076 for(y=0; y<s->mb_height; y++){
1077 for(x=0; x<s->mb_width; x++){
1078 if(s->avctx->debug&FF_DEBUG_SKIP){
1079 int count= s->mbskip_table[x + y*s->mb_stride];
1080 if(count>9) count=9;
1081 printf("%1d", count);
1083 if(s->avctx->debug&FF_DEBUG_QP){
1084 printf("%2d", pict->qscale_table[x + y*s->mb_stride]);
1086 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1087 int mb_type= pict->mb_type[x + y*s->mb_stride];
1089 //Type & MV direction
1092 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1094 else if(IS_INTRA4x4(mb_type))
1096 else if(IS_INTRA16x16(mb_type))
1098 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1100 else if(IS_DIRECT(mb_type))
1102 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1104 else if(IS_GMC(mb_type))
1106 else if(IS_SKIP(mb_type))
1108 else if(!USES_LIST(mb_type, 1))
1110 else if(!USES_LIST(mb_type, 0))
1113 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1120 else if(IS_16X8(mb_type))
1122 else if(IS_8X16(mb_type))
1124 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1130 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
1142 #ifdef CONFIG_ENCODERS
1144 static int get_sae(uint8_t *src, int ref, int stride){
1148 for(y=0; y<16; y++){
1149 for(x=0; x<16; x++){
1150 acc+= ABS(src[x+y*stride] - ref);
1157 static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){
1164 for(y=0; y<h; y+=16){
1165 for(x=0; x<w; x+=16){
1166 int offset= x + y*stride;
1167 int sad = s->dsp.pix_abs16x16(src + offset, ref + offset, stride);
1168 int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8;
1169 int sae = get_sae(src + offset, mean, stride);
1171 acc+= sae + 500 < sad;
1178 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
1181 const int encoding_delay= s->max_b_frames;
1184 if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
1185 if(pic_arg->linesize[0] != s->linesize) direct=0;
1186 if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
1187 if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
1189 // printf("%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
1192 i= find_unused_picture(s, 1);
1194 pic= (AVFrame*)&s->picture[i];
1198 pic->data[i]= pic_arg->data[i];
1199 pic->linesize[i]= pic_arg->linesize[i];
1201 alloc_picture(s, (Picture*)pic, 1);
1203 i= find_unused_picture(s, 0);
1205 pic= (AVFrame*)&s->picture[i];
1208 alloc_picture(s, (Picture*)pic, 0);
1210 /* the input will be 16 pixels to the right relative to the actual buffer start
1211 * and the current_pic, so the buffer can be reused, yes its not beatifull
1216 if( pic->data[0] == pic_arg->data[0]
1217 && pic->data[1] == pic_arg->data[1]
1218 && pic->data[2] == pic_arg->data[2]){
1221 int h_chroma_shift, v_chroma_shift;
1223 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1226 int src_stride= pic_arg->linesize[i];
1227 int dst_stride= i ? s->uvlinesize : s->linesize;
1228 int h_shift= i ? h_chroma_shift : 0;
1229 int v_shift= i ? v_chroma_shift : 0;
1230 int w= s->width >>h_shift;
1231 int h= s->height>>v_shift;
1232 uint8_t *src= pic_arg->data[i];
1233 uint8_t *dst= pic->data[i];
1235 if(src_stride==dst_stride)
1236 memcpy(dst, src, src_stride*h);
1239 memcpy(dst, src, w);
1247 pic->quality= pic_arg->quality;
1248 pic->pict_type= pic_arg->pict_type;
1249 pic->pts = pic_arg->pts;
1251 if(s->input_picture[encoding_delay])
1252 pic->display_picture_number= s->input_picture[encoding_delay]->display_picture_number + 1;
1254 /* shift buffer entries */
1255 for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++)
1256 s->input_picture[i-1]= s->input_picture[i];
1258 s->input_picture[encoding_delay]= (Picture*)pic;
1263 static void select_input_picture(MpegEncContext *s){
1265 const int encoding_delay= s->max_b_frames;
1266 int coded_pic_num=0;
1268 if(s->reordered_input_picture[0])
1269 coded_pic_num= s->reordered_input_picture[0]->coded_picture_number + 1;
1271 for(i=1; i<MAX_PICTURE_COUNT; i++)
1272 s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
1273 s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
1275 /* set next picture types & ordering */
1276 if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
1277 if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
1278 s->reordered_input_picture[0]= s->input_picture[0];
1279 s->reordered_input_picture[0]->pict_type= I_TYPE;
1280 s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
1284 if(s->flags&CODEC_FLAG_PASS2){
1285 for(i=0; i<s->max_b_frames+1; i++){
1286 int pict_num= s->input_picture[0]->display_picture_number + i;
1287 int pict_type= s->rc_context.entry[pict_num].new_pict_type;
1288 s->input_picture[i]->pict_type= pict_type;
1290 if(i + 1 >= s->rc_context.num_entries) break;
1294 if(s->input_picture[0]->pict_type){
1295 /* user selected pict_type */
1296 for(b_frames=0; b_frames<s->max_b_frames+1; b_frames++){
1297 if(s->input_picture[b_frames]->pict_type!=B_TYPE) break;
1300 if(b_frames > s->max_b_frames){
1301 fprintf(stderr, "warning, too many bframes in a row\n");
1302 b_frames = s->max_b_frames;
1304 }else if(s->b_frame_strategy==0){
1305 b_frames= s->max_b_frames;
1306 }else if(s->b_frame_strategy==1){
1307 for(i=1; i<s->max_b_frames+1; i++){
1308 if(s->input_picture[i]->b_frame_score==0){
1309 s->input_picture[i]->b_frame_score=
1310 get_intra_count(s, s->input_picture[i ]->data[0],
1311 s->input_picture[i-1]->data[0], s->linesize) + 1;
1314 for(i=0; i<s->max_b_frames; i++){
1315 if(s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break;
1318 b_frames= FFMAX(0, i-1);
1321 for(i=0; i<b_frames+1; i++){
1322 s->input_picture[i]->b_frame_score=0;
1325 fprintf(stderr, "illegal b frame strategy\n");
1330 //static int b_count=0;
1331 //b_count+= b_frames;
1332 //printf("b_frames: %d\n", b_count);
1334 s->reordered_input_picture[0]= s->input_picture[b_frames];
1335 if( s->picture_in_gop_number + b_frames >= s->gop_size
1336 || s->reordered_input_picture[0]->pict_type== I_TYPE)
1337 s->reordered_input_picture[0]->pict_type= I_TYPE;
1339 s->reordered_input_picture[0]->pict_type= P_TYPE;
1340 s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
1341 for(i=0; i<b_frames; i++){
1343 s->reordered_input_picture[i+1]= s->input_picture[i];
1344 s->reordered_input_picture[i+1]->pict_type= B_TYPE;
1345 s->reordered_input_picture[i+1]->coded_picture_number= coded_pic_num;
1350 if(s->reordered_input_picture[0]){
1351 s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0;
1353 s->new_picture= *s->reordered_input_picture[0];
1355 if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
1356 // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable
1358 int i= find_unused_picture(s, 0);
1359 Picture *pic= &s->picture[i];
1361 /* mark us unused / free shared pic */
1363 s->reordered_input_picture[0]->data[i]= NULL;
1364 s->reordered_input_picture[0]->type= 0;
1366 //FIXME bad, copy * except
1367 pic->pict_type = s->reordered_input_picture[0]->pict_type;
1368 pic->quality = s->reordered_input_picture[0]->quality;
1369 pic->coded_picture_number = s->reordered_input_picture[0]->coded_picture_number;
1370 pic->reference = s->reordered_input_picture[0]->reference;
1372 alloc_picture(s, pic, 0);
1374 s->current_picture_ptr= pic;
1376 // input is not a shared pix -> reuse buffer for current_pix
1378 assert( s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER
1379 || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
1381 s->current_picture_ptr= s->reordered_input_picture[0];
1383 //reverse the +16 we did before storing the input
1384 s->current_picture_ptr->data[i]-=16;
1387 s->current_picture= *s->current_picture_ptr;
1389 s->picture_number= s->new_picture.display_picture_number;
1390 //printf("dpn:%d\n", s->picture_number);
1392 memset(&s->new_picture, 0, sizeof(Picture));
1396 int MPV_encode_picture(AVCodecContext *avctx,
1397 unsigned char *buf, int buf_size, void *data)
1399 MpegEncContext *s = avctx->priv_data;
1400 AVFrame *pic_arg = data;
1403 init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
1405 s->picture_in_gop_number++;
1407 load_input_picture(s, pic_arg);
1409 select_input_picture(s);
1412 if(s->new_picture.data[0]){
1414 s->pict_type= s->new_picture.pict_type;
1415 if (s->fixed_qscale){ /* the ratecontrol needs the last qscale so we dont touch it for CBR */
1416 s->qscale= (int)(s->new_picture.quality+0.5);
1420 //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
1421 MPV_frame_start(s, avctx);
1423 encode_picture(s, s->picture_number);
1425 avctx->real_pict_num = s->picture_number;
1426 avctx->header_bits = s->header_bits;
1427 avctx->mv_bits = s->mv_bits;
1428 avctx->misc_bits = s->misc_bits;
1429 avctx->i_tex_bits = s->i_tex_bits;
1430 avctx->p_tex_bits = s->p_tex_bits;
1431 avctx->i_count = s->i_count;
1432 avctx->p_count = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx
1433 avctx->skip_count = s->skip_count;
1437 if (s->out_format == FMT_MJPEG)
1438 mjpeg_picture_trailer(s);
1440 if(s->flags&CODEC_FLAG_PASS1)
1441 ff_write_pass1_stats(s);
1444 avctx->error[i] += s->current_picture_ptr->error[i];
1448 s->input_picture_number++;
1450 flush_put_bits(&s->pb);
1451 s->frame_bits = (pbBufPtr(&s->pb) - s->pb.buf) * 8;
1453 s->total_bits += s->frame_bits;
1454 avctx->frame_bits = s->frame_bits;
1456 return pbBufPtr(&s->pb) - s->pb.buf;
1459 #endif //CONFIG_ENCODERS
1461 static inline void gmc1_motion(MpegEncContext *s,
1462 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1464 uint8_t **ref_picture, int src_offset)
1467 int offset, src_x, src_y, linesize, uvlinesize;
1468 int motion_x, motion_y;
1471 motion_x= s->sprite_offset[0][0];
1472 motion_y= s->sprite_offset[0][1];
1473 src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
1474 src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
1475 motion_x<<=(3-s->sprite_warping_accuracy);
1476 motion_y<<=(3-s->sprite_warping_accuracy);
1477 src_x = clip(src_x, -16, s->width);
1478 if (src_x == s->width)
1480 src_y = clip(src_y, -16, s->height);
1481 if (src_y == s->height)
1484 linesize = s->linesize;
1485 uvlinesize = s->uvlinesize;
1487 ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
1489 dest_y+=dest_offset;
1490 if(s->flags&CODEC_FLAG_EMU_EDGE){
1491 if(src_x<0 || src_y<0 || src_x + 17 >= s->h_edge_pos
1492 || src_y + 17 >= s->v_edge_pos){
1493 ff_emulated_edge_mc(s, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1494 ptr= s->edge_emu_buffer;
1498 if((motion_x|motion_y)&7){
1499 s->dsp.gmc1(dest_y , ptr , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
1500 s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
1504 dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
1505 if (s->no_rounding){
1506 s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
1508 s->dsp.put_pixels_tab [0][dxy](dest_y, ptr, linesize, 16);
1512 if(s->flags&CODEC_FLAG_GRAY) return;
1514 motion_x= s->sprite_offset[1][0];
1515 motion_y= s->sprite_offset[1][1];
1516 src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
1517 src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
1518 motion_x<<=(3-s->sprite_warping_accuracy);
1519 motion_y<<=(3-s->sprite_warping_accuracy);
1520 src_x = clip(src_x, -8, s->width>>1);
1521 if (src_x == s->width>>1)
1523 src_y = clip(src_y, -8, s->height>>1);
1524 if (src_y == s->height>>1)
1527 offset = (src_y * uvlinesize) + src_x + (src_offset>>1);
1528 ptr = ref_picture[1] + offset;
1529 if(s->flags&CODEC_FLAG_EMU_EDGE){
1530 if(src_x<0 || src_y<0 || src_x + 9 >= s->h_edge_pos>>1
1531 || src_y + 9 >= s->v_edge_pos>>1){
1532 ff_emulated_edge_mc(s, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1533 ptr= s->edge_emu_buffer;
1537 s->dsp.gmc1(dest_cb + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
1539 ptr = ref_picture[2] + offset;
1541 ff_emulated_edge_mc(s, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1542 ptr= s->edge_emu_buffer;
1544 s->dsp.gmc1(dest_cr + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
1549 static inline void gmc_motion(MpegEncContext *s,
1550 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1552 uint8_t **ref_picture, int src_offset)
1555 int linesize, uvlinesize;
1556 const int a= s->sprite_warping_accuracy;
1559 linesize = s->linesize;
1560 uvlinesize = s->uvlinesize;
1562 ptr = ref_picture[0] + src_offset;
1564 dest_y+=dest_offset;
1566 ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16;
1567 oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16;
1569 s->dsp.gmc(dest_y, ptr, linesize, 16,
1572 s->sprite_delta[0][0], s->sprite_delta[0][1],
1573 s->sprite_delta[1][0], s->sprite_delta[1][1],
1574 a+1, (1<<(2*a+1)) - s->no_rounding,
1575 s->h_edge_pos, s->v_edge_pos);
1576 s->dsp.gmc(dest_y+8, ptr, linesize, 16,
1577 ox + s->sprite_delta[0][0]*8,
1578 oy + s->sprite_delta[1][0]*8,
1579 s->sprite_delta[0][0], s->sprite_delta[0][1],
1580 s->sprite_delta[1][0], s->sprite_delta[1][1],
1581 a+1, (1<<(2*a+1)) - s->no_rounding,
1582 s->h_edge_pos, s->v_edge_pos);
1584 if(s->flags&CODEC_FLAG_GRAY) return;
1587 dest_cb+=dest_offset>>1;
1588 dest_cr+=dest_offset>>1;
1590 ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8;
1591 oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8;
1593 ptr = ref_picture[1] + (src_offset>>1);
1594 s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
1597 s->sprite_delta[0][0], s->sprite_delta[0][1],
1598 s->sprite_delta[1][0], s->sprite_delta[1][1],
1599 a+1, (1<<(2*a+1)) - s->no_rounding,
1600 s->h_edge_pos>>1, s->v_edge_pos>>1);
1602 ptr = ref_picture[2] + (src_offset>>1);
1603 s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
1606 s->sprite_delta[0][0], s->sprite_delta[0][1],
1607 s->sprite_delta[1][0], s->sprite_delta[1][1],
1608 a+1, (1<<(2*a+1)) - s->no_rounding,
1609 s->h_edge_pos>>1, s->v_edge_pos>>1);
1613 void ff_emulated_edge_mc(MpegEncContext *s, uint8_t *src, int linesize, int block_w, int block_h,
1614 int src_x, int src_y, int w, int h){
1616 int start_y, start_x, end_y, end_x;
1617 uint8_t *buf= s->edge_emu_buffer;
1620 src+= (h-1-src_y)*linesize;
1622 }else if(src_y<=-block_h){
1623 src+= (1-block_h-src_y)*linesize;
1629 }else if(src_x<=-block_w){
1630 src+= (1-block_w-src_x);
1634 start_y= FFMAX(0, -src_y);
1635 start_x= FFMAX(0, -src_x);
1636 end_y= FFMIN(block_h, h-src_y);
1637 end_x= FFMIN(block_w, w-src_x);
1639 // copy existing part
1640 for(y=start_y; y<end_y; y++){
1641 for(x=start_x; x<end_x; x++){
1642 buf[x + y*linesize]= src[x + y*linesize];
1647 for(y=0; y<start_y; y++){
1648 for(x=start_x; x<end_x; x++){
1649 buf[x + y*linesize]= buf[x + start_y*linesize];
1654 for(y=end_y; y<block_h; y++){
1655 for(x=start_x; x<end_x; x++){
1656 buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
1660 for(y=0; y<block_h; y++){
1662 for(x=0; x<start_x; x++){
1663 buf[x + y*linesize]= buf[start_x + y*linesize];
1667 for(x=end_x; x<block_w; x++){
1668 buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
1674 /* apply one mpeg motion vector to the three components */
1675 static inline void mpeg_motion(MpegEncContext *s,
1676 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1678 uint8_t **ref_picture, int src_offset,
1679 int field_based, op_pixels_func (*pix_op)[4],
1680 int motion_x, int motion_y, int h)
1683 int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1686 if(s->quarter_sample)
1692 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
1693 src_x = s->mb_x * 16 + (motion_x >> 1);
1694 src_y = s->mb_y * (16 >> field_based) + (motion_y >> 1);
1696 /* WARNING: do no forget half pels */
1697 height = s->height >> field_based;
1698 v_edge_pos = s->v_edge_pos >> field_based;
1699 src_x = clip(src_x, -16, s->width);
1700 if (src_x == s->width)
1702 src_y = clip(src_y, -16, height);
1703 if (src_y == height)
1705 linesize = s->current_picture.linesize[0] << field_based;
1706 uvlinesize = s->current_picture.linesize[1] << field_based;
1707 ptr = ref_picture[0] + (src_y * linesize) + (src_x) + src_offset;
1708 dest_y += dest_offset;
1710 if(s->flags&CODEC_FLAG_EMU_EDGE){
1711 if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 16 > s->h_edge_pos
1712 || src_y + (motion_y&1) + h > v_edge_pos){
1713 ff_emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based, //FIXME linesize? and uv below
1714 src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
1715 ptr= s->edge_emu_buffer + src_offset;
1719 pix_op[0][dxy](dest_y, ptr, linesize, h);
1721 if(s->flags&CODEC_FLAG_GRAY) return;
1723 if (s->out_format == FMT_H263) {
1725 if ((motion_x & 3) != 0)
1727 if ((motion_y & 3) != 0)
1734 dxy = ((my & 1) << 1) | (mx & 1);
1739 src_x = s->mb_x * 8 + mx;
1740 src_y = s->mb_y * (8 >> field_based) + my;
1741 src_x = clip(src_x, -8, s->width >> 1);
1742 if (src_x == (s->width >> 1))
1744 src_y = clip(src_y, -8, height >> 1);
1745 if (src_y == (height >> 1))
1747 offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
1748 ptr = ref_picture[1] + offset;
1750 ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based,
1751 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1752 ptr= s->edge_emu_buffer + (src_offset >> 1);
1754 pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1756 ptr = ref_picture[2] + offset;
1758 ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based,
1759 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1760 ptr= s->edge_emu_buffer + (src_offset >> 1);
1762 pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1765 static inline void qpel_motion(MpegEncContext *s,
1766 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1768 uint8_t **ref_picture, int src_offset,
1769 int field_based, op_pixels_func (*pix_op)[4],
1770 qpel_mc_func (*qpix_op)[16],
1771 int motion_x, int motion_y, int h)
1774 int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1777 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
1778 src_x = s->mb_x * 16 + (motion_x >> 2);
1779 src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
1781 height = s->height >> field_based;
1782 v_edge_pos = s->v_edge_pos >> field_based;
1783 src_x = clip(src_x, -16, s->width);
1784 if (src_x == s->width)
1786 src_y = clip(src_y, -16, height);
1787 if (src_y == height)
1789 linesize = s->linesize << field_based;
1790 uvlinesize = s->uvlinesize << field_based;
1791 ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
1792 dest_y += dest_offset;
1793 //printf("%d %d %d\n", src_x, src_y, dxy);
1795 if(s->flags&CODEC_FLAG_EMU_EDGE){
1796 if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 16 > s->h_edge_pos
1797 || src_y + (motion_y&3) + h > v_edge_pos){
1798 ff_emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based,
1799 src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
1800 ptr= s->edge_emu_buffer + src_offset;
1805 qpix_op[0][dxy](dest_y, ptr, linesize);
1807 //damn interlaced mode
1808 //FIXME boundary mirroring is not exactly correct here
1809 qpix_op[1][dxy](dest_y , ptr , linesize);
1810 qpix_op[1][dxy](dest_y+8, ptr+8, linesize);
1813 if(s->flags&CODEC_FLAG_GRAY) return;
1818 }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA2){
1819 static const int rtab[8]= {0,0,1,1,0,0,0,1};
1820 mx= (motion_x>>1) + rtab[motion_x&7];
1821 my= (motion_y>>1) + rtab[motion_y&7];
1822 }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){
1823 mx= (motion_x>>1)|(motion_x&1);
1824 my= (motion_y>>1)|(motion_y&1);
1832 dxy= (mx&1) | ((my&1)<<1);
1836 src_x = s->mb_x * 8 + mx;
1837 src_y = s->mb_y * (8 >> field_based) + my;
1838 src_x = clip(src_x, -8, s->width >> 1);
1839 if (src_x == (s->width >> 1))
1841 src_y = clip(src_y, -8, height >> 1);
1842 if (src_y == (height >> 1))
1845 offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
1846 ptr = ref_picture[1] + offset;
1848 ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based,
1849 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1850 ptr= s->edge_emu_buffer + (src_offset >> 1);
1852 pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1854 ptr = ref_picture[2] + offset;
1856 ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based,
1857 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1858 ptr= s->edge_emu_buffer + (src_offset >> 1);
1860 pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1863 inline int ff_h263_round_chroma(int x){
1865 return (h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
1868 return -(h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
1873 * motion compesation of a single macroblock
1875 * @param dest_y luma destination pointer
1876 * @param dest_cb chroma cb/u destination pointer
1877 * @param dest_cr chroma cr/v destination pointer
1878 * @param dir direction (0->forward, 1->backward)
1879 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1880 * @param pic_op halfpel motion compensation function (average or put normally)
1881 * @param pic_op qpel motion compensation function (average or put normally)
1882 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1884 static inline void MPV_motion(MpegEncContext *s,
1885 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1886 int dir, uint8_t **ref_picture,
1887 op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16])
1889 int dxy, offset, mx, my, src_x, src_y, motion_x, motion_y;
1891 uint8_t *ptr, *dest;
1897 switch(s->mv_type) {
1901 if(s->real_sprite_warping_points==1){
1902 gmc1_motion(s, dest_y, dest_cb, dest_cr, 0,
1905 gmc_motion(s, dest_y, dest_cb, dest_cr, 0,
1908 }else if(s->quarter_sample){
1909 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
1912 s->mv[dir][0][0], s->mv[dir][0][1], 16);
1914 ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
1915 ref_picture, pix_op,
1916 s->mv[dir][0][0], s->mv[dir][0][1], 16);
1920 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
1923 s->mv[dir][0][0], s->mv[dir][0][1], 16);
1929 if(s->quarter_sample){
1931 motion_x = s->mv[dir][i][0];
1932 motion_y = s->mv[dir][i][1];
1934 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
1935 src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
1936 src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
1938 /* WARNING: do no forget half pels */
1939 src_x = clip(src_x, -16, s->width);
1940 if (src_x == s->width)
1942 src_y = clip(src_y, -16, s->height);
1943 if (src_y == s->height)
1946 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
1947 if(s->flags&CODEC_FLAG_EMU_EDGE){
1948 if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 8 > s->h_edge_pos
1949 || src_y + (motion_y&3) + 8 > s->v_edge_pos){
1950 ff_emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1951 ptr= s->edge_emu_buffer;
1954 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
1955 qpix_op[1][dxy](dest, ptr, s->linesize);
1957 mx += s->mv[dir][i][0]/2;
1958 my += s->mv[dir][i][1]/2;
1962 motion_x = s->mv[dir][i][0];
1963 motion_y = s->mv[dir][i][1];
1965 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
1966 src_x = mb_x * 16 + (motion_x >> 1) + (i & 1) * 8;
1967 src_y = mb_y * 16 + (motion_y >> 1) + (i >>1) * 8;
1969 /* WARNING: do no forget half pels */
1970 src_x = clip(src_x, -16, s->width);
1971 if (src_x == s->width)
1973 src_y = clip(src_y, -16, s->height);
1974 if (src_y == s->height)
1977 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
1978 if(s->flags&CODEC_FLAG_EMU_EDGE){
1979 if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 8 > s->h_edge_pos
1980 || src_y + (motion_y&1) + 8 > s->v_edge_pos){
1981 ff_emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1982 ptr= s->edge_emu_buffer;
1985 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
1986 pix_op[1][dxy](dest, ptr, s->linesize, 8);
1988 mx += s->mv[dir][i][0];
1989 my += s->mv[dir][i][1];
1993 if(s->flags&CODEC_FLAG_GRAY) break;
1994 /* In case of 8X8, we construct a single chroma motion vector
1995 with a special rounding */
1996 mx= ff_h263_round_chroma(mx);
1997 my= ff_h263_round_chroma(my);
1998 dxy = ((my & 1) << 1) | (mx & 1);
2002 src_x = mb_x * 8 + mx;
2003 src_y = mb_y * 8 + my;
2004 src_x = clip(src_x, -8, s->width/2);
2005 if (src_x == s->width/2)
2007 src_y = clip(src_y, -8, s->height/2);
2008 if (src_y == s->height/2)
2011 offset = (src_y * (s->uvlinesize)) + src_x;
2012 ptr = ref_picture[1] + offset;
2013 if(s->flags&CODEC_FLAG_EMU_EDGE){
2014 if(src_x<0 || src_y<0 || src_x + (dxy &1) + 8 > s->h_edge_pos>>1
2015 || src_y + (dxy>>1) + 8 > s->v_edge_pos>>1){
2016 ff_emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2017 ptr= s->edge_emu_buffer;
2021 pix_op[1][dxy](dest_cb, ptr, s->uvlinesize, 8);
2023 ptr = ref_picture[2] + offset;
2025 ff_emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2026 ptr= s->edge_emu_buffer;
2028 pix_op[1][dxy](dest_cr, ptr, s->uvlinesize, 8);
2031 if (s->picture_structure == PICT_FRAME) {
2032 if(s->quarter_sample){
2034 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
2035 ref_picture, s->field_select[dir][0] ? s->linesize : 0,
2037 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2039 qpel_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2040 ref_picture, s->field_select[dir][1] ? s->linesize : 0,
2042 s->mv[dir][1][0], s->mv[dir][1][1], 8);
2045 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2046 ref_picture, s->field_select[dir][0] ? s->linesize : 0,
2048 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2050 mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2051 ref_picture, s->field_select[dir][1] ? s->linesize : 0,
2053 s->mv[dir][1][0], s->mv[dir][1][1], 8);
2057 if(s->picture_structure == s->field_select[dir][0] + 1 || s->pict_type == B_TYPE || s->first_field){
2058 offset= s->field_select[dir][0] ? s->linesize : 0;
2060 ref_picture= s->current_picture.data;
2061 offset= s->field_select[dir][0] ? s->linesize : -s->linesize;
2064 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2065 ref_picture, offset,
2067 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2074 /* put block[] to dest[] */
2075 static inline void put_dct(MpegEncContext *s,
2076 DCTELEM *block, int i, uint8_t *dest, int line_size)
2078 s->dct_unquantize(s, block, i, s->qscale);
2079 s->dsp.idct_put (dest, line_size, block);
2082 /* add block[] to dest[] */
2083 static inline void add_dct(MpegEncContext *s,
2084 DCTELEM *block, int i, uint8_t *dest, int line_size)
2086 if (s->block_last_index[i] >= 0) {
2087 s->dsp.idct_add (dest, line_size, block);
2091 static inline void add_dequant_dct(MpegEncContext *s,
2092 DCTELEM *block, int i, uint8_t *dest, int line_size)
2094 if (s->block_last_index[i] >= 0) {
2095 s->dct_unquantize(s, block, i, s->qscale);
2097 s->dsp.idct_add (dest, line_size, block);
2102 * cleans dc, ac, coded_block for the current non intra MB
2104 void ff_clean_intra_table_entries(MpegEncContext *s)
2106 int wrap = s->block_wrap[0];
2107 int xy = s->block_index[0];
2110 s->dc_val[0][xy + 1 ] =
2111 s->dc_val[0][xy + wrap] =
2112 s->dc_val[0][xy + 1 + wrap] = 1024;
2114 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2115 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2116 if (s->msmpeg4_version>=3) {
2117 s->coded_block[xy ] =
2118 s->coded_block[xy + 1 ] =
2119 s->coded_block[xy + wrap] =
2120 s->coded_block[xy + 1 + wrap] = 0;
2123 wrap = s->block_wrap[4];
2124 xy = s->mb_x + 1 + (s->mb_y + 1) * wrap;
2126 s->dc_val[2][xy] = 1024;
2128 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2129 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2131 s->mbintra_table[s->mb_x + s->mb_y*s->mb_stride]= 0;
2134 /* generic function called after a macroblock has been parsed by the
2135 decoder or after it has been encoded by the encoder.
2137 Important variables used:
2138 s->mb_intra : true if intra macroblock
2139 s->mv_dir : motion vector direction
2140 s->mv_type : motion vector type
2141 s->mv : motion vector
2142 s->interlaced_dct : true if interlaced dct used (mpeg2)
2144 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
2147 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2152 s->current_picture.qscale_table[mb_xy]= s->qscale;
2154 /* update DC predictors for P macroblocks */
2156 if (s->h263_pred || s->h263_aic) {
2157 if(s->mbintra_table[mb_xy])
2158 ff_clean_intra_table_entries(s);
2162 s->last_dc[2] = 128 << s->intra_dc_precision;
2165 else if (s->h263_pred || s->h263_aic)
2166 s->mbintra_table[mb_xy]=1;
2168 /* update motion predictor, not for B-frames as they need the motion_val from the last P/S-Frame */
2169 if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE) { //FIXME move into h263.c if possible, format specific stuff shouldnt be here
2170 //FIXME a lot of thet is only needed for !low_delay
2171 const int wrap = s->block_wrap[0];
2172 const int xy = s->block_index[0];
2173 if(s->mv_type != MV_TYPE_8X8){
2174 int motion_x, motion_y;
2178 } else if (s->mv_type == MV_TYPE_16X16) {
2179 motion_x = s->mv[0][0][0];
2180 motion_y = s->mv[0][0][1];
2181 } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
2183 motion_x = s->mv[0][0][0] + s->mv[0][1][0];
2184 motion_y = s->mv[0][0][1] + s->mv[0][1][1];
2185 motion_x = (motion_x>>1) | (motion_x&1);
2187 s->field_mv_table[mb_xy][i][0]= s->mv[0][i][0];
2188 s->field_mv_table[mb_xy][i][1]= s->mv[0][i][1];
2189 s->field_select_table[mb_xy][i]= s->field_select[0][i];
2193 /* no update if 8X8 because it has been done during parsing */
2194 s->motion_val[xy][0] = motion_x;
2195 s->motion_val[xy][1] = motion_y;
2196 s->motion_val[xy + 1][0] = motion_x;
2197 s->motion_val[xy + 1][1] = motion_y;
2198 s->motion_val[xy + wrap][0] = motion_x;
2199 s->motion_val[xy + wrap][1] = motion_y;
2200 s->motion_val[xy + 1 + wrap][0] = motion_x;
2201 s->motion_val[xy + 1 + wrap][1] = motion_y;
2204 if(s->encoding){ //FIXME encoding MUST be cleaned up
2205 if (s->mv_type == MV_TYPE_8X8)
2206 s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_8x8;
2208 s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_16x16;
2212 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { //FIXME precalc
2213 uint8_t *dest_y, *dest_cb, *dest_cr;
2214 int dct_linesize, dct_offset;
2215 op_pixels_func (*op_pix)[4];
2216 qpel_mc_func (*op_qpix)[16];
2217 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
2218 const int uvlinesize= s->current_picture.linesize[1];
2220 /* avoid copy if macroblock skipped in last frame too */
2221 if (s->pict_type != B_TYPE) {
2222 s->current_picture.mbskip_table[mb_xy]= s->mb_skiped;
2225 /* skip only during decoding as we might trash the buffers during encoding a bit */
2227 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2228 const int age= s->current_picture.age;
2234 assert(s->pict_type!=I_TYPE);
2236 (*mbskip_ptr) ++; /* indicate that this time we skiped it */
2237 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2239 /* if previous was skipped too, then nothing to do ! */
2240 if (*mbskip_ptr >= age && s->current_picture.reference){
2243 } else if(!s->current_picture.reference){
2244 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2245 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2247 *mbskip_ptr = 0; /* not skipped */
2252 if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME){ //FIXME precalc
2253 dest_y = s->current_picture.data[0] + mb_x * 16;
2254 dest_cb = s->current_picture.data[1] + mb_x * 8;
2255 dest_cr = s->current_picture.data[2] + mb_x * 8;
2257 dest_y = s->current_picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
2258 dest_cb = s->current_picture.data[1] + (mb_y * 8 * uvlinesize) + mb_x * 8;
2259 dest_cr = s->current_picture.data[2] + (mb_y * 8 * uvlinesize) + mb_x * 8;
2262 if (s->interlaced_dct) {
2263 dct_linesize = linesize * 2;
2264 dct_offset = linesize;
2266 dct_linesize = linesize;
2267 dct_offset = linesize * 8;
2271 /* motion handling */
2272 /* decoding or more than one mb_type (MC was allready done otherwise) */
2273 if((!s->encoding) || (s->mb_type[mb_xy]&(s->mb_type[mb_xy]-1))){
2274 if ((!s->no_rounding) || s->pict_type==B_TYPE){
2275 op_pix = s->dsp.put_pixels_tab;
2276 op_qpix= s->dsp.put_qpel_pixels_tab;
2278 op_pix = s->dsp.put_no_rnd_pixels_tab;
2279 op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
2282 if (s->mv_dir & MV_DIR_FORWARD) {
2283 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2284 op_pix = s->dsp.avg_pixels_tab;
2285 op_qpix= s->dsp.avg_qpel_pixels_tab;
2287 if (s->mv_dir & MV_DIR_BACKWARD) {
2288 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2292 /* skip dequant / idct if we are really late ;) */
2293 if(s->hurry_up>1) return;
2295 /* add dct residue */
2296 if(s->encoding || !( s->mpeg2 || s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO
2297 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2298 add_dequant_dct(s, block[0], 0, dest_y, dct_linesize);
2299 add_dequant_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2300 add_dequant_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2301 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2303 if(!(s->flags&CODEC_FLAG_GRAY)){
2304 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize);
2305 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize);
2307 } else if(s->codec_id != CODEC_ID_WMV2){
2308 add_dct(s, block[0], 0, dest_y, dct_linesize);
2309 add_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2310 add_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2311 add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2313 if(!(s->flags&CODEC_FLAG_GRAY)){
2314 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2315 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2320 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2324 /* dct only in intra block */
2325 if(s->encoding || !(s->mpeg2 || s->codec_id==CODEC_ID_MPEG1VIDEO)){
2326 put_dct(s, block[0], 0, dest_y, dct_linesize);
2327 put_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2328 put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2329 put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2331 if(!(s->flags&CODEC_FLAG_GRAY)){
2332 put_dct(s, block[4], 4, dest_cb, uvlinesize);
2333 put_dct(s, block[5], 5, dest_cr, uvlinesize);
2336 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2337 s->dsp.idct_put(dest_y + 8, dct_linesize, block[1]);
2338 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2339 s->dsp.idct_put(dest_y + dct_offset + 8, dct_linesize, block[3]);
2341 if(!(s->flags&CODEC_FLAG_GRAY)){
2342 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2343 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2350 #ifdef CONFIG_ENCODERS
2352 static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
2354 static const char tab[64]=
2366 DCTELEM *block= s->block[n];
2367 const int last_index= s->block_last_index[n];
2372 threshold= -threshold;
2376 /* are all which we could set to zero are allready zero? */
2377 if(last_index<=skip_dc - 1) return;
2379 for(i=0; i<=last_index; i++){
2380 const int j = s->intra_scantable.permutated[i];
2381 const int level = ABS(block[j]);
2383 if(skip_dc && i==0) continue;
2392 if(score >= threshold) return;
2393 for(i=skip_dc; i<=last_index; i++){
2394 const int j = s->intra_scantable.permutated[i];
2397 if(block[0]) s->block_last_index[n]= 0;
2398 else s->block_last_index[n]= -1;
2401 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index)
2404 const int maxlevel= s->max_qcoeff;
2405 const int minlevel= s->min_qcoeff;
2408 i=1; //skip clipping of intra dc
2412 for(;i<=last_index; i++){
2413 const int j= s->intra_scantable.permutated[i];
2414 int level = block[j];
2416 if (level>maxlevel) level=maxlevel;
2417 else if(level<minlevel) level=minlevel;
2424 static int pix_vcmp16x8(uint8_t *s, int stride){ //FIXME move to dsputil & optimize
2429 for(x=0; x<16; x+=4){
2430 score+= ABS(s[x ] - s[x +stride]) + ABS(s[x+1] - s[x+1+stride])
2431 +ABS(s[x+2] - s[x+2+stride]) + ABS(s[x+3] - s[x+3+stride]);
2439 static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move to dsputil & optimize
2444 for(x=0; x<16; x++){
2445 score+= ABS(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
2454 #define SQ(a) ((a)*(a))
2456 static int pix_vcmp16x8(uint8_t *s, int stride){ //FIXME move to dsputil & optimize
2461 for(x=0; x<16; x+=4){
2462 score+= SQ(s[x ] - s[x +stride]) + SQ(s[x+1] - s[x+1+stride])
2463 +SQ(s[x+2] - s[x+2+stride]) + SQ(s[x+3] - s[x+3+stride]);
2471 static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move to dsputil & optimize
2476 for(x=0; x<16; x++){
2477 score+= SQ(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
2488 #endif //CONFIG_ENCODERS
2492 * @param h is the normal height, this will be reduced automatically if needed for the last row
2494 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2495 if ( s->avctx->draw_horiz_band
2496 && (s->last_picture_ptr || s->low_delay) ) {
2497 uint8_t *src_ptr[3];
2499 h= FFMIN(h, s->height - y);
2501 if(s->pict_type==B_TYPE && s->picture_structure == PICT_FRAME)
2504 offset = y * s->linesize;
2506 if(s->pict_type==B_TYPE || s->low_delay){
2507 src_ptr[0] = s->current_picture.data[0] + offset;
2508 src_ptr[1] = s->current_picture.data[1] + (offset >> 2);
2509 src_ptr[2] = s->current_picture.data[2] + (offset >> 2);
2511 src_ptr[0] = s->last_picture.data[0] + offset;
2512 src_ptr[1] = s->last_picture.data[1] + (offset >> 2);
2513 src_ptr[2] = s->last_picture.data[2] + (offset >> 2);
2517 s->avctx->draw_horiz_band(s->avctx, src_ptr, s->linesize,
2522 #ifdef CONFIG_ENCODERS
2524 static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2526 const int mb_x= s->mb_x;
2527 const int mb_y= s->mb_y;
2530 int dct_offset = s->linesize*8; //default for progressive frames
2532 for(i=0; i<6; i++) skip_dct[i]=0;
2534 if(s->adaptive_quant){
2535 s->dquant= s->current_picture.qscale_table[mb_x + mb_y*s->mb_stride] - s->qscale;
2537 if(s->out_format==FMT_H263){
2538 if (s->dquant> 2) s->dquant= 2;
2539 else if(s->dquant<-2) s->dquant=-2;
2542 if(s->codec_id==CODEC_ID_MPEG4){
2544 if(s->mv_dir&MV_DIRECT)
2547 assert(s->dquant==0 || s->mv_type!=MV_TYPE_8X8);
2550 s->qscale+= s->dquant;
2551 s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
2552 s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
2560 wrap_y = s->linesize;
2561 ptr = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
2563 if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
2564 ff_emulated_edge_mc(s, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
2565 ptr= s->edge_emu_buffer;
2569 if(s->flags&CODEC_FLAG_INTERLACED_DCT){
2570 int progressive_score, interlaced_score;
2572 progressive_score= pix_vcmp16x8(ptr, wrap_y ) + pix_vcmp16x8(ptr + wrap_y*8, wrap_y );
2573 interlaced_score = pix_vcmp16x8(ptr, wrap_y*2) + pix_vcmp16x8(ptr + wrap_y , wrap_y*2);
2575 if(progressive_score > interlaced_score + 100){
2576 s->interlaced_dct=1;
2581 s->interlaced_dct=0;
2584 s->dsp.get_pixels(s->block[0], ptr , wrap_y);
2585 s->dsp.get_pixels(s->block[1], ptr + 8, wrap_y);
2586 s->dsp.get_pixels(s->block[2], ptr + dct_offset , wrap_y);
2587 s->dsp.get_pixels(s->block[3], ptr + dct_offset + 8, wrap_y);
2589 if(s->flags&CODEC_FLAG_GRAY){
2593 int wrap_c = s->uvlinesize;
2594 ptr = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
2596 ff_emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2597 ptr= s->edge_emu_buffer;
2599 s->dsp.get_pixels(s->block[4], ptr, wrap_c);
2601 ptr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
2603 ff_emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2604 ptr= s->edge_emu_buffer;
2606 s->dsp.get_pixels(s->block[5], ptr, wrap_c);
2609 op_pixels_func (*op_pix)[4];
2610 qpel_mc_func (*op_qpix)[16];
2611 uint8_t *dest_y, *dest_cb, *dest_cr;
2612 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2616 dest_y = s->current_picture.data[0] + (mb_y * 16 * s->linesize ) + mb_x * 16;
2617 dest_cb = s->current_picture.data[1] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
2618 dest_cr = s->current_picture.data[2] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
2619 wrap_y = s->linesize;
2620 wrap_c = s->uvlinesize;
2621 ptr_y = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
2622 ptr_cb = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
2623 ptr_cr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
2625 if ((!s->no_rounding) || s->pict_type==B_TYPE){
2626 op_pix = s->dsp.put_pixels_tab;
2627 op_qpix= s->dsp.put_qpel_pixels_tab;
2629 op_pix = s->dsp.put_no_rnd_pixels_tab;
2630 op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
2633 if (s->mv_dir & MV_DIR_FORWARD) {
2634 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2635 op_pix = s->dsp.avg_pixels_tab;
2636 op_qpix= s->dsp.avg_qpel_pixels_tab;
2638 if (s->mv_dir & MV_DIR_BACKWARD) {
2639 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2642 if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
2643 ff_emulated_edge_mc(s, ptr_y, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
2644 ptr_y= s->edge_emu_buffer;
2648 if(s->flags&CODEC_FLAG_INTERLACED_DCT){
2649 int progressive_score, interlaced_score;
2651 progressive_score= pix_diff_vcmp16x8(ptr_y , dest_y , wrap_y )
2652 + pix_diff_vcmp16x8(ptr_y + wrap_y*8, dest_y + wrap_y*8, wrap_y );
2653 interlaced_score = pix_diff_vcmp16x8(ptr_y , dest_y , wrap_y*2)
2654 + pix_diff_vcmp16x8(ptr_y + wrap_y , dest_y + wrap_y , wrap_y*2);
2656 if(progressive_score > interlaced_score + 600){
2657 s->interlaced_dct=1;
2662 s->interlaced_dct=0;
2665 s->dsp.diff_pixels(s->block[0], ptr_y , dest_y , wrap_y);
2666 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2667 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset , dest_y + dct_offset , wrap_y);
2668 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y);
2670 if(s->flags&CODEC_FLAG_GRAY){
2675 ff_emulated_edge_mc(s, ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2676 ptr_cb= s->edge_emu_buffer;
2678 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2680 ff_emulated_edge_mc(s, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2681 ptr_cr= s->edge_emu_buffer;
2683 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2685 /* pre quantization */
2686 if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){
2688 if(s->dsp.pix_abs8x8(ptr_y , dest_y , wrap_y) < 20*s->qscale) skip_dct[0]= 1;
2689 if(s->dsp.pix_abs8x8(ptr_y + 8, dest_y + 8, wrap_y) < 20*s->qscale) skip_dct[1]= 1;
2690 if(s->dsp.pix_abs8x8(ptr_y +dct_offset , dest_y +dct_offset , wrap_y) < 20*s->qscale) skip_dct[2]= 1;
2691 if(s->dsp.pix_abs8x8(ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y) < 20*s->qscale) skip_dct[3]= 1;
2692 if(s->dsp.pix_abs8x8(ptr_cb , dest_cb , wrap_c) < 20*s->qscale) skip_dct[4]= 1;
2693 if(s->dsp.pix_abs8x8(ptr_cr , dest_cr , wrap_c) < 20*s->qscale) skip_dct[5]= 1;
2699 if(skip_dct[i]) num++;
2702 if(s->mb_x==0 && s->mb_y==0){
2704 printf("%6d %1d\n", stat[i], i);
2717 adap_parm = ((s->avg_mb_var << 1) + s->mb_var[s->mb_stride*mb_y+mb_x] + 1.0) /
2718 ((s->mb_var[s->mb_stride*mb_y+mb_x] << 1) + s->avg_mb_var + 1.0);
2720 printf("\ntype=%c qscale=%2d adap=%0.2f dquant=%4.2f var=%4d avgvar=%4d",
2721 (s->mb_type[s->mb_stride*mb_y+mb_x] > 0) ? 'I' : 'P',
2722 s->qscale, adap_parm, s->qscale*adap_parm,
2723 s->mb_var[s->mb_stride*mb_y+mb_x], s->avg_mb_var);
2726 /* DCT & quantize */
2727 if(s->out_format==FMT_MJPEG){
2730 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, 8, &overflow);
2731 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
2737 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2738 // FIXME we could decide to change to quantizer instead of clipping
2739 // JS: I don't think that would be a good idea it could lower quality instead
2740 // of improve it. Just INTRADC clipping deserves changes in quantizer
2741 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
2743 s->block_last_index[i]= -1;
2745 if(s->luma_elim_threshold && !s->mb_intra)
2747 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2748 if(s->chroma_elim_threshold && !s->mb_intra)
2750 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2753 if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){
2754 s->block_last_index[4]=
2755 s->block_last_index[5]= 0;
2757 s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale;
2760 /* huffman encode */
2761 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2762 case CODEC_ID_MPEG1VIDEO:
2763 mpeg1_encode_mb(s, s->block, motion_x, motion_y); break;
2765 case CODEC_ID_MPEG4:
2766 mpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
2767 case CODEC_ID_MSMPEG4V2:
2768 case CODEC_ID_MSMPEG4V3:
2770 msmpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
2772 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); break;
2774 case CODEC_ID_H263P:
2776 h263_encode_mb(s, s->block, motion_x, motion_y); break;
2778 case CODEC_ID_MJPEG:
2779 mjpeg_encode_mb(s, s->block); break;
2785 #endif //CONFIG_ENCODERS
2788 * combines the (truncated) bitstream to a complete frame
2789 * @returns -1 if no complete frame could be created
2791 int ff_combine_frame( MpegEncContext *s, int next, uint8_t **buf, int *buf_size){
2792 ParseContext *pc= &s->parse_context;
2796 printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index);
2797 printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
2801 /* copy overreaded byes from last frame into buffer */
2802 for(; pc->overread>0; pc->overread--){
2803 pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
2806 pc->last_index= pc->index;
2808 /* copy into buffer end return */
2809 if(next == END_NOT_FOUND){
2810 pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
2812 memcpy(&pc->buffer[pc->index], *buf, *buf_size);
2813 pc->index += *buf_size;
2817 pc->overread_index= pc->index + next;
2819 /* append to buffer */
2821 pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
2823 memcpy(&pc->buffer[pc->index], *buf, next + FF_INPUT_BUFFER_PADDING_SIZE );
2826 *buf_size= pc->last_index + next;
2829 /* store overread bytes */
2830 for(;next < 0; next++){
2831 pc->state = (pc->state<<8) | pc->buffer[pc->last_index + next];
2837 printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index);
2838 printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
2845 #ifdef CONFIG_ENCODERS
2846 void ff_copy_bits(PutBitContext *pb, uint8_t *src, int length)
2848 int bytes= length>>4;
2849 int bits= length&15;
2852 if(length==0) return;
2854 for(i=0; i<bytes; i++) put_bits(pb, 16, be2me_16(((uint16_t*)src)[i]));
2855 put_bits(pb, bits, be2me_16(((uint16_t*)src)[i])>>(16-bits));
2858 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2861 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
2864 d->mb_skip_run= s->mb_skip_run;
2866 d->last_dc[i]= s->last_dc[i];
2869 d->mv_bits= s->mv_bits;
2870 d->i_tex_bits= s->i_tex_bits;
2871 d->p_tex_bits= s->p_tex_bits;
2872 d->i_count= s->i_count;
2873 d->f_count= s->f_count;
2874 d->b_count= s->b_count;
2875 d->skip_count= s->skip_count;
2876 d->misc_bits= s->misc_bits;
2879 d->mb_skiped= s->mb_skiped;
2880 d->qscale= s->qscale;
2883 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2886 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2887 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
2890 d->mb_skip_run= s->mb_skip_run;
2892 d->last_dc[i]= s->last_dc[i];
2895 d->mv_bits= s->mv_bits;
2896 d->i_tex_bits= s->i_tex_bits;
2897 d->p_tex_bits= s->p_tex_bits;
2898 d->i_count= s->i_count;
2899 d->f_count= s->f_count;
2900 d->b_count= s->b_count;
2901 d->skip_count= s->skip_count;
2902 d->misc_bits= s->misc_bits;
2904 d->mb_intra= s->mb_intra;
2905 d->mb_skiped= s->mb_skiped;
2906 d->mv_type= s->mv_type;
2907 d->mv_dir= s->mv_dir;
2909 if(s->data_partitioning){
2911 d->tex_pb= s->tex_pb;
2915 d->block_last_index[i]= s->block_last_index[i];
2916 d->interlaced_dct= s->interlaced_dct;
2917 d->qscale= s->qscale;
2920 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2921 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2922 int *dmin, int *next_block, int motion_x, int motion_y)
2926 copy_context_before_encode(s, backup, type);
2928 s->block= s->blocks[*next_block];
2929 s->pb= pb[*next_block];
2930 if(s->data_partitioning){
2931 s->pb2 = pb2 [*next_block];
2932 s->tex_pb= tex_pb[*next_block];
2935 encode_mb(s, motion_x, motion_y);
2937 bits_count= get_bit_count(&s->pb);
2938 if(s->data_partitioning){
2939 bits_count+= get_bit_count(&s->pb2);
2940 bits_count+= get_bit_count(&s->tex_pb);
2943 if(bits_count<*dmin){
2947 copy_context_after_encode(best, s, type);
2951 static inline int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2952 uint32_t *sq = squareTbl + 256;
2957 return s->dsp.sse[0](NULL, src1, src2, stride);
2958 else if(w==8 && h==8)
2959 return s->dsp.sse[1](NULL, src1, src2, stride);
2963 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2972 static void encode_picture(MpegEncContext *s, int picture_number)
2974 int mb_x, mb_y, pdif = 0;
2977 MpegEncContext best_s, backup_s;
2978 uint8_t bit_buf[2][3000];
2979 uint8_t bit_buf2[2][3000];
2980 uint8_t bit_buf_tex[2][3000];
2981 PutBitContext pb[2], pb2[2], tex_pb[2];
2984 init_put_bits(&pb [i], bit_buf [i], 3000, NULL, NULL);
2985 init_put_bits(&pb2 [i], bit_buf2 [i], 3000, NULL, NULL);
2986 init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000, NULL, NULL);
2989 s->picture_number = picture_number;
2991 /* Reset the average MB variance */
2992 s->current_picture.mb_var_sum = 0;
2993 s->current_picture.mc_mb_var_sum = 0;
2996 /* we need to initialize some time vars before we can encode b-frames */
2997 // RAL: Condition added for MPEG1VIDEO
2998 if (s->codec_id == CODEC_ID_MPEG1VIDEO || (s->h263_pred && !s->h263_msmpeg4))
2999 ff_set_mpeg4_time(s, s->picture_number);
3002 s->scene_change_score=0;
3004 s->qscale= (int)(s->frame_qscale + 0.5); //FIXME qscale / ... stuff for ME ratedistoration
3006 if(s->pict_type==I_TYPE){
3007 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3008 else s->no_rounding=0;
3009 }else if(s->pict_type!=B_TYPE){
3010 if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
3011 s->no_rounding ^= 1;
3014 /* Estimate motion for every MB */
3015 s->mb_intra=0; //for the rate distoration & bit compare functions
3016 if(s->pict_type != I_TYPE){
3017 if(s->pict_type != B_TYPE){
3018 if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){
3020 s->me.dia_size= s->avctx->pre_dia_size;
3022 for(mb_y=s->mb_height-1; mb_y >=0 ; mb_y--) {
3023 for(mb_x=s->mb_width-1; mb_x >=0 ; mb_x--) {
3026 ff_pre_estimate_p_frame_motion(s, mb_x, mb_y);
3033 s->me.dia_size= s->avctx->dia_size;
3034 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3035 s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
3036 s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
3037 s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
3038 s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
3039 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3042 s->block_index[0]+=2;
3043 s->block_index[1]+=2;
3044 s->block_index[2]+=2;
3045 s->block_index[3]+=2;
3047 /* compute motion vector & mb_type and store in context */
3048 if(s->pict_type==B_TYPE)
3049 ff_estimate_b_frame_motion(s, mb_x, mb_y);
3051 ff_estimate_p_frame_motion(s, mb_x, mb_y);
3054 }else /* if(s->pict_type == I_TYPE) */{
3056 //FIXME do we need to zero them?
3057 memset(s->motion_val[0], 0, sizeof(int16_t)*(s->mb_width*2 + 2)*(s->mb_height*2 + 2)*2);
3058 memset(s->p_mv_table , 0, sizeof(int16_t)*(s->mb_stride)*s->mb_height*2);
3059 memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height);
3061 if(!s->fixed_qscale){
3062 /* finding spatial complexity for I-frame rate control */
3063 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3064 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3067 uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
3069 int sum = s->dsp.pix_sum(pix, s->linesize);
3071 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8;
3073 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
3074 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
3075 s->current_picture.mb_var_sum += varc;
3082 if(s->scene_change_score > 0 && s->pict_type == P_TYPE){
3083 s->pict_type= I_TYPE;
3084 memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height);
3085 //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3089 if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) {
3090 s->f_code= ff_get_best_fcode(s, s->p_mv_table, MB_TYPE_INTER);
3092 ff_fix_long_p_mvs(s);
3095 if(s->pict_type==B_TYPE){
3098 a = ff_get_best_fcode(s, s->b_forw_mv_table, MB_TYPE_FORWARD);
3099 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, MB_TYPE_BIDIR);
3100 s->f_code = FFMAX(a, b);
3102 a = ff_get_best_fcode(s, s->b_back_mv_table, MB_TYPE_BACKWARD);
3103 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, MB_TYPE_BIDIR);
3104 s->b_code = FFMAX(a, b);
3106 ff_fix_long_b_mvs(s, s->b_forw_mv_table, s->f_code, MB_TYPE_FORWARD);
3107 ff_fix_long_b_mvs(s, s->b_back_mv_table, s->b_code, MB_TYPE_BACKWARD);
3108 ff_fix_long_b_mvs(s, s->b_bidir_forw_mv_table, s->f_code, MB_TYPE_BIDIR);
3109 ff_fix_long_b_mvs(s, s->b_bidir_back_mv_table, s->b_code, MB_TYPE_BIDIR);
3113 if (s->fixed_qscale)
3114 s->frame_qscale = s->current_picture.quality;
3116 s->frame_qscale = ff_rate_estimate_qscale(s);
3118 if(s->adaptive_quant){
3120 switch(s->codec_id){
3121 case CODEC_ID_MPEG4:
3122 ff_clean_mpeg4_qscales(s);
3125 case CODEC_ID_H263P:
3126 ff_clean_h263_qscales(s);
3131 s->qscale= s->current_picture.qscale_table[0];
3133 s->qscale= (int)(s->frame_qscale + 0.5);
3135 if (s->out_format == FMT_MJPEG) {
3136 /* for mjpeg, we do include qscale in the matrix */
3137 s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0];
3139 int j= s->dsp.idct_permutation[i];
3141 s->intra_matrix[j] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3143 convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3144 s->q_intra_matrix16_bias, s->intra_matrix, s->intra_quant_bias, 8, 8);
3147 //FIXME var duplication
3148 s->current_picture.key_frame= s->pict_type == I_TYPE;
3149 s->current_picture.pict_type= s->pict_type;
3151 if(s->current_picture.key_frame)
3152 s->picture_in_gop_number=0;
3154 s->last_bits= get_bit_count(&s->pb);
3155 switch(s->out_format) {
3157 mjpeg_picture_header(s);
3161 if (s->codec_id == CODEC_ID_WMV2)
3162 ff_wmv2_encode_picture_header(s, picture_number);
3163 else if (s->h263_msmpeg4)
3164 msmpeg4_encode_picture_header(s, picture_number);
3165 else if (s->h263_pred)
3166 mpeg4_encode_picture_header(s, picture_number);
3167 else if (s->h263_rv10)
3168 rv10_encode_picture_header(s, picture_number);
3170 h263_encode_picture_header(s, picture_number);
3174 mpeg1_encode_picture_header(s, picture_number);
3177 bits= get_bit_count(&s->pb);
3178 s->header_bits= bits - s->last_bits;
3190 /* init last dc values */
3191 /* note: quant matrix value (8) is implied here */
3192 s->last_dc[i] = 128;
3194 s->current_picture_ptr->error[i] = 0;
3197 s->last_mv[0][0][0] = 0;
3198 s->last_mv[0][0][1] = 0;
3199 s->last_mv[1][0][0] = 0;
3200 s->last_mv[1][0][1] = 0;
3205 if (s->codec_id==CODEC_ID_H263 || s->codec_id==CODEC_ID_H263P)
3206 s->gob_index = ff_h263_get_gob_height(s);
3208 if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame)
3209 ff_mpeg4_init_partitions(s);
3214 s->first_slice_line = 1;
3215 s->ptr_lastgob = s->pb.buf;
3216 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3217 s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
3218 s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
3220 s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
3221 s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
3222 s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
3223 s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
3224 s->block_index[4]= s->block_wrap[4]*(mb_y + 1) + s->block_wrap[0]*(s->mb_height*2 + 2);
3225 s->block_index[5]= s->block_wrap[4]*(mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2);
3226 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3227 const int xy= mb_y*s->mb_stride + mb_x;
3228 int mb_type= s->mb_type[xy];
3234 s->block_index[0]+=2;
3235 s->block_index[1]+=2;
3236 s->block_index[2]+=2;
3237 s->block_index[3]+=2;
3238 s->block_index[4]++;
3239 s->block_index[5]++;
3241 /* write gob / video packet header */
3244 int current_packet_size, is_gob_start;
3246 current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob;
3249 if(s->codec_id==CODEC_ID_MPEG4){
3250 if(current_packet_size >= s->rtp_payload_size
3251 && s->mb_y + s->mb_x>0){
3253 if(s->partitioned_frame){
3254 ff_mpeg4_merge_partitions(s);
3255 ff_mpeg4_init_partitions(s);
3257 ff_mpeg4_encode_video_packet_header(s);
3259 if(s->flags&CODEC_FLAG_PASS1){
3260 int bits= get_bit_count(&s->pb);
3261 s->misc_bits+= bits - s->last_bits;
3264 ff_mpeg4_clean_buffers(s);
3267 }else if(s->codec_id==CODEC_ID_MPEG1VIDEO){
3268 if( current_packet_size >= s->rtp_payload_size
3269 && s->mb_y + s->mb_x>0 && s->mb_skip_run==0){
3270 ff_mpeg1_encode_slice_header(s);
3271 ff_mpeg1_clean_buffers(s);
3275 if(current_packet_size >= s->rtp_payload_size
3276 && s->mb_x==0 && s->mb_y>0 && s->mb_y%s->gob_index==0){
3278 h263_encode_gob_header(s, mb_y);
3284 s->ptr_lastgob = pbBufPtr(&s->pb);
3285 s->first_slice_line=1;
3286 s->resync_mb_x=mb_x;
3287 s->resync_mb_y=mb_y;
3292 if( (s->resync_mb_x == s->mb_x)
3293 && s->resync_mb_y+1 == s->mb_y){
3294 s->first_slice_line=0;
3297 if(mb_type & (mb_type-1)){ // more than 1 MB type possible
3299 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3301 copy_context_before_encode(&backup_s, s, -1);
3303 best_s.data_partitioning= s->data_partitioning;
3304 best_s.partitioned_frame= s->partitioned_frame;
3305 if(s->data_partitioning){
3306 backup_s.pb2= s->pb2;
3307 backup_s.tex_pb= s->tex_pb;
3310 if(mb_type&MB_TYPE_INTER){
3311 s->mv_dir = MV_DIR_FORWARD;
3312 s->mv_type = MV_TYPE_16X16;
3314 s->mv[0][0][0] = s->p_mv_table[xy][0];
3315 s->mv[0][0][1] = s->p_mv_table[xy][1];
3316 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER, pb, pb2, tex_pb,
3317 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3319 if(mb_type&MB_TYPE_INTER4V){
3320 s->mv_dir = MV_DIR_FORWARD;
3321 s->mv_type = MV_TYPE_8X8;
3324 s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
3325 s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
3327 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER4V, pb, pb2, tex_pb,
3328 &dmin, &next_block, 0, 0);
3330 if(mb_type&MB_TYPE_FORWARD){
3331 s->mv_dir = MV_DIR_FORWARD;
3332 s->mv_type = MV_TYPE_16X16;
3334 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3335 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3336 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_FORWARD, pb, pb2, tex_pb,
3337 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3339 if(mb_type&MB_TYPE_BACKWARD){
3340 s->mv_dir = MV_DIR_BACKWARD;
3341 s->mv_type = MV_TYPE_16X16;
3343 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3344 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3345 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3346 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3348 if(mb_type&MB_TYPE_BIDIR){
3349 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3350 s->mv_type = MV_TYPE_16X16;
3352 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3353 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3354 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3355 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3356 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BIDIR, pb, pb2, tex_pb,
3357 &dmin, &next_block, 0, 0);
3359 if(mb_type&MB_TYPE_DIRECT){
3360 int mx= s->b_direct_mv_table[xy][0];
3361 int my= s->b_direct_mv_table[xy][1];
3363 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3366 ff_mpeg4_set_direct_mv(s, mx, my);
3368 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_DIRECT, pb, pb2, tex_pb,
3369 &dmin, &next_block, mx, my);
3371 if(mb_type&MB_TYPE_INTRA){
3373 s->mv_type = MV_TYPE_16X16;
3377 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTRA, pb, pb2, tex_pb,
3378 &dmin, &next_block, 0, 0);
3379 /* force cleaning of ac/dc pred stuff if needed ... */
3380 if(s->h263_pred || s->h263_aic)
3381 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3383 copy_context_after_encode(s, &best_s, -1);
3385 pb_bits_count= get_bit_count(&s->pb);
3386 flush_put_bits(&s->pb);
3387 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3390 if(s->data_partitioning){
3391 pb2_bits_count= get_bit_count(&s->pb2);
3392 flush_put_bits(&s->pb2);
3393 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3394 s->pb2= backup_s.pb2;
3396 tex_pb_bits_count= get_bit_count(&s->tex_pb);
3397 flush_put_bits(&s->tex_pb);
3398 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3399 s->tex_pb= backup_s.tex_pb;
3401 s->last_bits= get_bit_count(&s->pb);
3403 int motion_x, motion_y;
3405 int inter_score= s->current_picture.mb_cmp_score[mb_x + mb_y*s->mb_stride];
3407 if(!(s->flags&CODEC_FLAG_HQ) && s->pict_type==P_TYPE){
3408 /* get luma score */
3409 if((s->avctx->mb_cmp&0xFF)==FF_CMP_SSE){
3410 intra_score= (s->current_picture.mb_var[mb_x + mb_y*s->mb_stride]<<8) - 500; //FIXME dont scale it down so we dont have to fix it
3414 int mean= s->current_picture.mb_mean[mb_x + mb_y*s->mb_stride]; //FIXME
3417 dest_y = s->new_picture.data[0] + (mb_y * 16 * s->linesize ) + mb_x * 16;
3419 for(i=0; i<16; i++){
3420 *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 0]) = mean;
3421 *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 4]) = mean;
3422 *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 8]) = mean;
3423 *(uint32_t*)(&s->me.scratchpad[i*s->linesize+12]) = mean;
3427 intra_score= s->dsp.mb_cmp[0](s, s->me.scratchpad, dest_y, s->linesize);
3429 /* printf("intra:%7d inter:%7d var:%7d mc_var.%7d\n", intra_score>>8, inter_score>>8,
3430 s->current_picture.mb_var[mb_x + mb_y*s->mb_stride],
3431 s->current_picture.mc_mb_var[mb_x + mb_y*s->mb_stride]);*/
3434 /* get chroma score */
3435 if(s->avctx->mb_cmp&FF_CMP_CHROMA){
3443 if(s->out_format == FMT_H263){
3444 mean= (s->dc_val[i][mb_x + (mb_y+1)*(s->mb_width+2)] + 4)>>3; //FIXME not exact but simple ;)
3446 mean= (s->last_dc[i] + 4)>>3;
3448 dest_c = s->new_picture.data[i] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
3452 *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 0]) = mean;
3453 *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 4]) = mean;
3456 intra_score+= s->dsp.mb_cmp[1](s, s->me.scratchpad, dest_c, s->uvlinesize);
3461 switch(s->avctx->mb_cmp&0xFF){
3464 intra_score+= 32*s->qscale;
3467 intra_score+= 24*s->qscale*s->qscale;
3470 intra_score+= 96*s->qscale;
3473 intra_score+= 48*s->qscale;
3480 intra_score+= (s->qscale*s->qscale*109*8 + 64)>>7;
3484 if(intra_score < inter_score)
3485 mb_type= MB_TYPE_INTRA;
3488 s->mv_type=MV_TYPE_16X16;
3489 // only one MB-Type possible
3495 motion_x= s->mv[0][0][0] = 0;
3496 motion_y= s->mv[0][0][1] = 0;
3499 s->mv_dir = MV_DIR_FORWARD;
3501 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3502 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3504 case MB_TYPE_INTER4V:
3505 s->mv_dir = MV_DIR_FORWARD;
3506 s->mv_type = MV_TYPE_8X8;
3509 s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
3510 s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
3512 motion_x= motion_y= 0;
3514 case MB_TYPE_DIRECT:
3515 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3517 motion_x=s->b_direct_mv_table[xy][0];
3518 motion_y=s->b_direct_mv_table[xy][1];
3520 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3524 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3528 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3529 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3530 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3531 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3533 case MB_TYPE_BACKWARD:
3534 s->mv_dir = MV_DIR_BACKWARD;
3536 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3537 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3539 case MB_TYPE_FORWARD:
3540 s->mv_dir = MV_DIR_FORWARD;
3542 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3543 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3544 // printf(" %d %d ", motion_x, motion_y);
3547 motion_x=motion_y=0; //gcc warning fix
3548 printf("illegal MB type\n");
3551 encode_mb(s, motion_x, motion_y);
3553 // RAL: Update last macrobloc type
3554 s->last_mv_dir = s->mv_dir;
3557 /* clean the MV table in IPS frames for direct mode in B frames */
3558 if(s->mb_intra /* && I,P,S_TYPE */){
3559 s->p_mv_table[xy][0]=0;
3560 s->p_mv_table[xy][1]=0;
3563 MPV_decode_mb(s, s->block);
3565 if(s->flags&CODEC_FLAG_PSNR){
3569 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3570 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3572 s->current_picture_ptr->error[0] += sse(
3574 s->new_picture .data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3575 s->current_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3577 s->current_picture_ptr->error[1] += sse(
3579 s->new_picture .data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
3580 s->current_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
3581 w>>1, h>>1, s->uvlinesize);
3582 s->current_picture_ptr->error[2] += sse(
3584 s->new_picture .data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
3585 s->current_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
3586 w>>1, h>>1, s->uvlinesize);
3588 //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, get_bit_count(&s->pb));
3594 if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame)
3595 ff_mpeg4_merge_partitions(s);
3597 if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE)
3598 msmpeg4_encode_ext_header(s);
3600 if(s->codec_id==CODEC_ID_MPEG4)
3601 ff_mpeg4_stuffing(&s->pb);
3604 //if (s->gob_number)
3605 // fprintf(stderr,"\nNumber of GOB: %d", s->gob_number);
3607 /* Send the last GOB if RTP */
3609 flush_put_bits(&s->pb);
3610 pdif = pbBufPtr(&s->pb) - s->ptr_lastgob;
3611 /* Call the RTP callback to send the last GOB */
3612 if (s->rtp_callback)
3613 s->rtp_callback(s->ptr_lastgob, pdif, s->gob_number);
3614 s->ptr_lastgob = pbBufPtr(&s->pb);
3615 //fprintf(stderr,"\nGOB: %2d size: %d (last)", s->gob_number, pdif);
3619 static int dct_quantize_trellis_c(MpegEncContext *s,
3620 DCTELEM *block, int n,
3621 int qscale, int *overflow){
3623 const uint8_t *scantable= s->intra_scantable.scantable;
3625 unsigned int threshold1, threshold2;
3635 int coeff_count[64];
3636 int lambda, qmul, qadd, start_i, last_non_zero, i;
3637 const int esc_length= s->ac_esc_length;
3639 uint8_t * last_length;
3643 s->dsp.fdct (block);
3646 qadd= ((qscale-1)|1)*8;
3657 /* For AIC we skip quant/dequant of INTRADC */
3662 /* note: block[0] is assumed to be positive */
3663 block[0] = (block[0] + (q >> 1)) / q;
3666 qmat = s->q_intra_matrix[qscale];
3667 if(s->mpeg_quant || s->codec_id== CODEC_ID_MPEG1VIDEO)
3668 bias= 1<<(QMAT_SHIFT-1);
3669 length = s->intra_ac_vlc_length;
3670 last_length= s->intra_ac_vlc_last_length;
3674 qmat = s->q_inter_matrix[qscale];
3675 length = s->inter_ac_vlc_length;
3676 last_length= s->inter_ac_vlc_last_length;
3679 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3680 threshold2= (threshold1<<1);
3682 for(i=start_i; i<64; i++) {
3683 const int j = scantable[i];
3684 const int k= i-start_i;
3685 int level = block[j];
3686 level = level * qmat[j];
3688 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3689 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3690 if(((unsigned)(level+threshold1))>threshold2){
3692 level= (bias + level)>>QMAT_SHIFT;
3694 coeff[1][k]= level-1;
3695 // coeff[2][k]= level-2;
3697 level= (bias - level)>>QMAT_SHIFT;
3698 coeff[0][k]= -level;
3699 coeff[1][k]= -level+1;
3700 // coeff[2][k]= -level+2;
3702 coeff_count[k]= FFMIN(level, 2);
3706 coeff[0][k]= (level>>31)|1;
3711 *overflow= s->max_qcoeff < max; //overflow might have happend
3713 if(last_non_zero < start_i){
3714 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
3715 return last_non_zero;
3718 lambda= (qscale*qscale*64*105 + 64)>>7; //FIXME finetune
3721 for(i=0; i<=last_non_zero - start_i; i++){
3722 int level_index, run, j;
3723 const int dct_coeff= block[ scantable[i + start_i] ];
3724 const int zero_distoration= dct_coeff*dct_coeff;
3725 int best_score=256*256*256*120;
3727 last_score += zero_distoration;
3728 for(level_index=0; level_index < coeff_count[i]; level_index++){
3730 int level= coeff[level_index][i];
3735 if(s->out_format == FMT_H263){
3737 unquant_coeff= level*qmul + qadd;
3739 unquant_coeff= level*qmul - qadd;
3742 j= s->dsp.idct_permutation[ scantable[i + start_i] ]; //FIXME optimize
3745 unquant_coeff = (int)((-level) * qscale * s->intra_matrix[j]) >> 3;
3746 unquant_coeff = -((unquant_coeff - 1) | 1);
3748 unquant_coeff = (int)( level * qscale * s->intra_matrix[j]) >> 3;
3749 unquant_coeff = (unquant_coeff - 1) | 1;
3753 unquant_coeff = ((((-level) << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3754 unquant_coeff = -((unquant_coeff - 1) | 1);
3756 unquant_coeff = ((( level << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3757 unquant_coeff = (unquant_coeff - 1) | 1;
3763 distoration= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff);
3765 if((level&(~127)) == 0){
3766 for(run=0; run<=i - left_limit; run++){
3767 int score= distoration + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3768 score += score_tab[i-run];
3770 if(score < best_score){
3772 score_tab[i+1]= score;
3774 level_tab[i+1]= level-64;
3778 if(s->out_format == FMT_H263){
3779 for(run=0; run<=i - left_limit; run++){
3780 int score= distoration + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3781 score += score_tab[i-run];
3782 if(score < last_score){
3785 last_level= level-64;
3791 distoration += esc_length*lambda;
3792 for(run=0; run<=i - left_limit; run++){
3793 int score= distoration + score_tab[i-run];
3795 if(score < best_score){
3797 score_tab[i+1]= score;
3799 level_tab[i+1]= level-64;
3803 if(s->out_format == FMT_H263){
3804 for(run=0; run<=i - left_limit; run++){
3805 int score= distoration + score_tab[i-run];
3806 if(score < last_score){
3809 last_level= level-64;
3817 for(j=left_limit; j<=i; j++){
3818 score_tab[j] += zero_distoration;
3820 score_limit+= zero_distoration;
3821 if(score_tab[i+1] < score_limit)
3822 score_limit= score_tab[i+1];
3824 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3825 while(score_tab[ left_limit ] > score_limit + lambda) left_limit++;
3828 //FIXME add some cbp penalty
3830 if(s->out_format != FMT_H263){
3831 last_score= 256*256*256*120;
3832 for(i= left_limit; i<=last_non_zero - start_i + 1; i++){
3833 int score= score_tab[i];
3834 if(i) score += lambda*2; //FIXME exacter?
3836 if(score < last_score){
3839 last_level= level_tab[i];
3840 last_run= run_tab[i];
3845 last_non_zero= last_i - 1 + start_i;
3846 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
3848 if(last_non_zero < start_i)
3849 return last_non_zero;
3853 //FIXME use permutated scantable
3854 block[ s->dsp.idct_permutation[ scantable[last_non_zero] ] ]= last_level;
3857 for(;i>0 ; i -= run_tab[i] + 1){
3858 const int j= s->dsp.idct_permutation[ scantable[i - 1 + start_i] ];
3860 block[j]= level_tab[i];
3864 return last_non_zero;
3867 static int dct_quantize_c(MpegEncContext *s,
3868 DCTELEM *block, int n,
3869 int qscale, int *overflow)
3871 int i, j, level, last_non_zero, q;
3873 const uint8_t *scantable= s->intra_scantable.scantable;
3876 unsigned int threshold1, threshold2;
3878 s->dsp.fdct (block);
3888 /* For AIC we skip quant/dequant of INTRADC */
3891 /* note: block[0] is assumed to be positive */
3892 block[0] = (block[0] + (q >> 1)) / q;
3895 qmat = s->q_intra_matrix[qscale];
3896 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3900 qmat = s->q_inter_matrix[qscale];
3901 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3903 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3904 threshold2= (threshold1<<1);
3909 level = level * qmat[j];
3911 // if( bias+level >= (1<<QMAT_SHIFT)
3912 // || bias-level >= (1<<QMAT_SHIFT)){
3913 if(((unsigned)(level+threshold1))>threshold2){
3915 level= (bias + level)>>QMAT_SHIFT;
3918 level= (bias - level)>>QMAT_SHIFT;
3927 *overflow= s->max_qcoeff < max; //overflow might have happend
3929 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
3930 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
3931 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
3933 return last_non_zero;
3936 #endif //CONFIG_ENCODERS
3938 static void dct_unquantize_mpeg1_c(MpegEncContext *s,
3939 DCTELEM *block, int n, int qscale)
3941 int i, level, nCoeffs;
3942 const uint16_t *quant_matrix;
3944 nCoeffs= s->block_last_index[n];
3948 block[0] = block[0] * s->y_dc_scale;
3950 block[0] = block[0] * s->c_dc_scale;
3951 /* XXX: only mpeg1 */
3952 quant_matrix = s->intra_matrix;
3953 for(i=1;i<=nCoeffs;i++) {
3954 int j= s->intra_scantable.permutated[i];
3959 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3960 level = (level - 1) | 1;
3963 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3964 level = (level - 1) | 1;
3967 if (level < -2048 || level > 2047)
3968 fprintf(stderr, "unquant error %d %d\n", i, level);
3975 quant_matrix = s->inter_matrix;
3976 for(;i<=nCoeffs;i++) {
3977 int j= s->intra_scantable.permutated[i];
3982 level = (((level << 1) + 1) * qscale *
3983 ((int) (quant_matrix[j]))) >> 4;
3984 level = (level - 1) | 1;
3987 level = (((level << 1) + 1) * qscale *
3988 ((int) (quant_matrix[j]))) >> 4;
3989 level = (level - 1) | 1;
3992 if (level < -2048 || level > 2047)
3993 fprintf(stderr, "unquant error %d %d\n", i, level);
4001 static void dct_unquantize_mpeg2_c(MpegEncContext *s,
4002 DCTELEM *block, int n, int qscale)
4004 int i, level, nCoeffs;
4005 const uint16_t *quant_matrix;
4007 if(s->alternate_scan) nCoeffs= 63;
4008 else nCoeffs= s->block_last_index[n];
4012 block[0] = block[0] * s->y_dc_scale;
4014 block[0] = block[0] * s->c_dc_scale;
4015 quant_matrix = s->intra_matrix;
4016 for(i=1;i<=nCoeffs;i++) {
4017 int j= s->intra_scantable.permutated[i];
4022 level = (int)(level * qscale * quant_matrix[j]) >> 3;
4025 level = (int)(level * qscale * quant_matrix[j]) >> 3;
4028 if (level < -2048 || level > 2047)
4029 fprintf(stderr, "unquant error %d %d\n", i, level);
4037 quant_matrix = s->inter_matrix;
4038 for(;i<=nCoeffs;i++) {
4039 int j= s->intra_scantable.permutated[i];
4044 level = (((level << 1) + 1) * qscale *
4045 ((int) (quant_matrix[j]))) >> 4;
4048 level = (((level << 1) + 1) * qscale *
4049 ((int) (quant_matrix[j]))) >> 4;
4052 if (level < -2048 || level > 2047)
4053 fprintf(stderr, "unquant error %d %d\n", i, level);
4064 static void dct_unquantize_h263_c(MpegEncContext *s,
4065 DCTELEM *block, int n, int qscale)
4067 int i, level, qmul, qadd;
4070 assert(s->block_last_index[n]>=0);
4072 qadd = (qscale - 1) | 1;
4078 block[0] = block[0] * s->y_dc_scale;
4080 block[0] = block[0] * s->c_dc_scale;
4084 nCoeffs= 63; //does not allways use zigzag table
4087 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
4090 for(;i<=nCoeffs;i++) {
4094 level = level * qmul - qadd;
4096 level = level * qmul + qadd;
4099 if (level < -2048 || level > 2047)
4100 fprintf(stderr, "unquant error %d %d\n", i, level);
4108 static const AVOption mpeg4_options[] =
4110 AVOPTION_CODEC_INT("bitrate", "desired video bitrate", bit_rate, 4, 240000000, 800000),
4111 AVOPTION_CODEC_FLAG("vhq", "very high quality", flags, CODEC_FLAG_HQ, 0),
4112 AVOPTION_CODEC_INT("ratetol", "number of bits the bitstream is allowed to diverge from the reference"
4113 "the reference can be CBR (for CBR pass1) or VBR (for pass2)",
4114 bit_rate_tolerance, 4, 240000000, 8000),
4115 AVOPTION_CODEC_INT("qmin", "minimum quantizer", qmin, 1, 31, 2),
4116 AVOPTION_CODEC_INT("qmax", "maximum quantizer", qmax, 1, 31, 31),
4117 AVOPTION_CODEC_STRING("rc_eq", "rate control equation",
4118 rc_eq, "tex^qComp,option1,options2", 0),
4119 AVOPTION_CODEC_INT("rc_minrate", "rate control minimum bitrate",
4120 rc_min_rate, 4, 24000000, 0),
4121 AVOPTION_CODEC_INT("rc_maxrate", "rate control maximum bitrate",
4122 rc_max_rate, 4, 24000000, 0),
4123 AVOPTION_CODEC_DOUBLE("rc_buf_aggresivity", "rate control buffer aggresivity",
4124 rc_buffer_aggressivity, 4, 24000000, 0),
4125 AVOPTION_CODEC_DOUBLE("rc_initial_cplx", "initial complexity for pass1 ratecontrol",
4126 rc_initial_cplx, 0., 9999999., 0),
4127 AVOPTION_CODEC_DOUBLE("i_quant_factor", "qscale factor between p and i frames",
4128 i_quant_factor, 0., 0., 0),
4129 AVOPTION_CODEC_DOUBLE("i_quant_offset", "qscale offset between p and i frames",
4130 i_quant_factor, -999999., 999999., 0),
4131 AVOPTION_CODEC_INT("dct_algo", "dct alghorithm",
4132 dct_algo, 0, 5, 0), // fixme - "Auto,FastInt,Int,MMX,MLib,Altivec"
4133 AVOPTION_CODEC_DOUBLE("lumi_masking", "luminance masking",
4134 lumi_masking, 0., 999999., 0),
4135 AVOPTION_CODEC_DOUBLE("temporal_cplx_masking", "temporary complexity masking",
4136 temporal_cplx_masking, 0., 999999., 0),
4137 AVOPTION_CODEC_DOUBLE("spatial_cplx_masking", "spatial complexity masking",
4138 spatial_cplx_masking, 0., 999999., 0),
4139 AVOPTION_CODEC_DOUBLE("p_masking", "p block masking",
4140 p_masking, 0., 999999., 0),
4141 AVOPTION_CODEC_DOUBLE("dark_masking", "darkness masking",
4142 dark_masking, 0., 999999., 0),
4143 AVOPTION_CODEC_INT("idct_algo", "idct alghorithm",
4144 idct_algo, 0, 8, 0), // fixme - "Auto,Int,Simple,SimpleMMX,LibMPEG2MMX,PS2,MLib,ARM,Altivec"
4146 AVOPTION_CODEC_INT("mb_qmin", "minimum MB quantizer",
4148 AVOPTION_CODEC_INT("mb_qmax", "maximum MB quantizer",
4151 AVOPTION_CODEC_INT("me_cmp", "ME compare function",
4152 me_cmp, 0, 24000000, 0),
4153 AVOPTION_CODEC_INT("me_sub_cmp", "subpixel ME compare function",
4154 me_sub_cmp, 0, 24000000, 0),
4157 AVOPTION_CODEC_INT("dia_size", "ME diamond size & shape",
4158 dia_size, 0, 24000000, 0),
4159 AVOPTION_CODEC_INT("last_predictor_count", "amount of previous MV predictors",
4160 last_predictor_count, 0, 24000000, 0),
4162 AVOPTION_CODEC_INT("pre_me", "pre pass for ME",
4163 pre_me, 0, 24000000, 0),
4164 AVOPTION_CODEC_INT("me_pre_cmp", "ME pre pass compare function",
4165 me_pre_cmp, 0, 24000000, 0),
4167 AVOPTION_CODEC_INT("me_range", "maximum ME search range",
4168 me_range, 0, 24000000, 0),
4169 AVOPTION_CODEC_INT("pre_dia_size", "ME pre pass diamod size & shape",
4170 pre_dia_size, 0, 24000000, 0),
4171 AVOPTION_CODEC_INT("me_subpel_quality", "subpel ME quality",
4172 me_subpel_quality, 0, 24000000, 0),
4173 AVOPTION_CODEC_INT("me_range", "maximum ME search range",
4174 me_range, 0, 24000000, 0),
4175 AVOPTION_CODEC_FLAG("psnr", "calculate PSNR of compressed frames",
4176 flags, CODEC_FLAG_PSNR, 0),
4177 AVOPTION_CODEC_RCOVERRIDE("rc_override", "ratecontrol override (=startframe,endframe,qscale,quality_factor)",
4179 AVOPTION_SUB(avoptions_common),
4183 #ifdef CONFIG_ENCODERS
4185 AVCodec mpeg1video_encoder = {
4188 CODEC_ID_MPEG1VIDEO,
4189 sizeof(MpegEncContext),
4197 AVCodec h263_encoder = {
4201 sizeof(MpegEncContext),
4207 AVCodec h263p_encoder = {
4211 sizeof(MpegEncContext),
4217 AVCodec rv10_encoder = {
4221 sizeof(MpegEncContext),
4227 AVCodec mpeg4_encoder = {
4231 sizeof(MpegEncContext),
4235 .options = mpeg4_options,
4238 AVCodec msmpeg4v1_encoder = {
4242 sizeof(MpegEncContext),
4246 .options = mpeg4_options,
4249 AVCodec msmpeg4v2_encoder = {
4253 sizeof(MpegEncContext),
4257 .options = mpeg4_options,
4260 AVCodec msmpeg4v3_encoder = {
4264 sizeof(MpegEncContext),
4268 .options = mpeg4_options,
4271 AVCodec wmv1_encoder = {
4275 sizeof(MpegEncContext),
4279 .options = mpeg4_options,
4284 AVCodec mjpeg_encoder = {
4288 sizeof(MpegEncContext),
4294 #endif //CONFIG_ENCODERS