2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard.
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
24 * The simplest mpeg encoder (well, it was the simplest!).
31 #include "mpegvideo.h"
34 #include "fastmemcpy.h"
40 #ifdef CONFIG_ENCODERS
41 static void encode_picture(MpegEncContext *s, int picture_number);
42 #endif //CONFIG_ENCODERS
43 static void dct_unquantize_mpeg1_c(MpegEncContext *s,
44 DCTELEM *block, int n, int qscale);
45 static void dct_unquantize_mpeg2_c(MpegEncContext *s,
46 DCTELEM *block, int n, int qscale);
47 static void dct_unquantize_h263_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w);
50 #ifdef CONFIG_ENCODERS
51 static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
52 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
53 #endif //CONFIG_ENCODERS
55 void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_edges_c;
58 /* enable all paranoid tests for rounding, overflows, etc... */
64 /* for jpeg fast DCT */
67 static const uint16_t aanscales[64] = {
68 /* precomputed values scaled up by 14 bits */
69 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
70 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270,
71 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906,
72 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315,
73 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
74 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552,
75 8867 , 12299, 11585, 10426, 8867, 6967, 4799, 2446,
76 4520 , 6270, 5906, 5315, 4520, 3552, 2446, 1247
79 static const uint8_t h263_chroma_roundtab[16] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
84 #ifdef CONFIG_ENCODERS
85 static uint8_t (*default_mv_penalty)[MAX_MV*2+1]=NULL;
86 static uint8_t default_fcode_tab[MAX_MV*2+1];
88 enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1};
90 static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16)[64], uint16_t (*qmat16_bias)[64],
91 const uint16_t *quant_matrix, int bias, int qmin, int qmax)
95 for(qscale=qmin; qscale<=qmax; qscale++){
97 if (s->dsp.fdct == ff_jpeg_fdct_islow) {
99 const int j= s->dsp.idct_permutation[i];
100 /* 16 <= qscale * quant_matrix[i] <= 7905 */
101 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
102 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
103 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
105 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) /
106 (qscale * quant_matrix[j]));
108 } else if (s->dsp.fdct == fdct_ifast) {
110 const int j= s->dsp.idct_permutation[i];
111 /* 16 <= qscale * quant_matrix[i] <= 7905 */
112 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
113 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
114 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
116 qmat[qscale][i] = (int)((uint64_t_C(1) << (QMAT_SHIFT + 14)) /
117 (aanscales[i] * qscale * quant_matrix[j]));
121 const int j= s->dsp.idct_permutation[i];
122 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
123 So 16 <= qscale * quant_matrix[i] <= 7905
124 so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
125 so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67
127 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j]));
128 // qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
129 qmat16[qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]);
131 if(qmat16[qscale][i]==0 || qmat16[qscale][i]==128*256) qmat16[qscale][i]=128*256-1;
132 qmat16_bias[qscale][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][i]);
137 #endif //CONFIG_ENCODERS
139 void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable){
143 st->scantable= src_scantable;
147 j = src_scantable[i];
148 st->permutated[i] = permutation[j];
157 j = st->permutated[i];
159 st->raster_end[i]= end;
163 /* init common dct for both encoder and decoder */
164 int DCT_common_init(MpegEncContext *s)
166 s->dct_unquantize_h263 = dct_unquantize_h263_c;
167 s->dct_unquantize_mpeg1 = dct_unquantize_mpeg1_c;
168 s->dct_unquantize_mpeg2 = dct_unquantize_mpeg2_c;
170 #ifdef CONFIG_ENCODERS
171 s->dct_quantize= dct_quantize_c;
175 MPV_common_init_mmx(s);
178 MPV_common_init_axp(s);
181 MPV_common_init_mlib(s);
184 MPV_common_init_mmi(s);
187 MPV_common_init_armv4l(s);
190 MPV_common_init_ppc(s);
193 #ifdef CONFIG_ENCODERS
194 s->fast_dct_quantize= s->dct_quantize;
196 if(s->flags&CODEC_FLAG_TRELLIS_QUANT){
197 s->dct_quantize= dct_quantize_trellis_c; //move before MPV_common_init_*
200 #endif //CONFIG_ENCODERS
202 /* load & permutate scantables
203 note: only wmv uses differnt ones
205 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
206 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
207 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
208 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
210 s->picture_structure= PICT_FRAME;
216 * allocates a Picture
217 * The pixels are allocated/set by calling get_buffer() if shared=0
219 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
220 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11
221 const int mb_array_size= s->mb_stride*s->mb_height;
225 assert(pic->data[0]);
226 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
227 pic->type= FF_BUFFER_TYPE_SHARED;
231 assert(!pic->data[0]);
233 r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
235 if(r<0 || !pic->age || !pic->type || !pic->data[0]){
236 fprintf(stderr, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
240 if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
241 fprintf(stderr, "get_buffer() failed (stride changed)\n");
245 if(pic->linesize[1] != pic->linesize[2]){
246 fprintf(stderr, "get_buffer() failed (uv stride missmatch)\n");
250 s->linesize = pic->linesize[0];
251 s->uvlinesize= pic->linesize[1];
254 if(pic->qscale_table==NULL){
256 CHECKED_ALLOCZ(pic->mb_var , mb_array_size * sizeof(int16_t))
257 CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t))
258 CHECKED_ALLOCZ(pic->mb_mean , mb_array_size * sizeof(int8_t))
259 CHECKED_ALLOCZ(pic->mb_cmp_score, mb_array_size * sizeof(int32_t))
262 CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check
263 CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t))
264 CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num * sizeof(int))
265 pic->mb_type= pic->mb_type_base + s->mb_stride+1;
266 if(s->out_format == FMT_H264){
268 CHECKED_ALLOCZ(pic->motion_val[i], 2 * 16 * s->mb_num * sizeof(uint16_t))
269 CHECKED_ALLOCZ(pic->ref_index[i] , 4 * s->mb_num * sizeof(uint8_t))
272 pic->qstride= s->mb_stride;
275 //it might be nicer if the application would keep track of these but it would require a API change
276 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
277 s->prev_pict_types[0]= s->pict_type;
278 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE)
279 pic->age= INT_MAX; // skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway
282 fail: //for the CHECKED_ALLOCZ macro
287 * deallocates a picture
289 static void free_picture(MpegEncContext *s, Picture *pic){
292 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
293 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
296 av_freep(&pic->mb_var);
297 av_freep(&pic->mc_mb_var);
298 av_freep(&pic->mb_mean);
299 av_freep(&pic->mb_cmp_score);
300 av_freep(&pic->mbskip_table);
301 av_freep(&pic->qscale_table);
302 av_freep(&pic->mb_type_base);
305 av_freep(&pic->motion_val[i]);
306 av_freep(&pic->ref_index[i]);
309 if(pic->type == FF_BUFFER_TYPE_SHARED){
318 /* init common structure for both encoder and decoder */
319 int MPV_common_init(MpegEncContext *s)
321 int y_size, c_size, yc_size, i, mb_array_size, x, y;
323 dsputil_init(&s->dsp, s->avctx);
326 s->flags= s->avctx->flags;
328 s->mb_width = (s->width + 15) / 16;
329 s->mb_height = (s->height + 15) / 16;
330 s->mb_stride = s->mb_width + 1;
331 mb_array_size= s->mb_height * s->mb_stride;
333 /* set default edge pos, will be overriden in decode_header if needed */
334 s->h_edge_pos= s->mb_width*16;
335 s->v_edge_pos= s->mb_height*16;
337 s->mb_num = s->mb_width * s->mb_height;
342 s->block_wrap[3]= s->mb_width*2 + 2;
344 s->block_wrap[5]= s->mb_width + 2;
346 y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
347 c_size = (s->mb_width + 2) * (s->mb_height + 2);
348 yc_size = y_size + 2 * c_size;
350 /* convert fourcc to upper case */
351 s->avctx->codec_tag= toupper( s->avctx->codec_tag &0xFF)
352 + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
353 + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
354 + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
356 CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
357 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*17;
359 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
361 CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int)) //error ressilience code looks cleaner with this
362 for(y=0; y<s->mb_height; y++){
363 for(x=0; x<s->mb_width; x++){
364 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
367 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
370 int mv_table_size= s->mb_stride * (s->mb_height+2) + 1;
372 /* Allocate MV tables */
373 CHECKED_ALLOCZ(s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
374 CHECKED_ALLOCZ(s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
375 CHECKED_ALLOCZ(s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
376 CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
377 CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
378 CHECKED_ALLOCZ(s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
379 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
380 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
381 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
382 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
383 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
384 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
386 //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
387 CHECKED_ALLOCZ(s->me.scratchpad, s->width*2*16*3*sizeof(uint8_t))
389 CHECKED_ALLOCZ(s->me.map , ME_MAP_SIZE*sizeof(uint32_t))
390 CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
392 if(s->codec_id==CODEC_ID_MPEG4){
393 CHECKED_ALLOCZ(s->tex_pb_buffer, PB_BUFFER_SIZE);
394 CHECKED_ALLOCZ( s->pb2_buffer, PB_BUFFER_SIZE);
397 if(s->msmpeg4_version){
398 CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
400 CHECKED_ALLOCZ(s->avctx->stats_out, 256);
402 /* Allocate MB type table */
403 CHECKED_ALLOCZ(s->mb_type , mb_array_size * sizeof(uint8_t)) //needed for encoding
406 CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t))
408 if (s->out_format == FMT_H263 || s->encoding) {
412 size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
413 CHECKED_ALLOCZ(s->motion_val, size * 2 * sizeof(int16_t));
416 if(s->codec_id==CODEC_ID_MPEG4){
417 /* interlaced direct mode decoding tables */
418 CHECKED_ALLOCZ(s->field_mv_table, mb_array_size*2*2 * sizeof(int16_t))
419 CHECKED_ALLOCZ(s->field_select_table, mb_array_size*2* sizeof(int8_t))
421 if (s->out_format == FMT_H263) {
423 CHECKED_ALLOCZ(s->ac_val[0], yc_size * sizeof(int16_t) * 16);
424 s->ac_val[1] = s->ac_val[0] + y_size;
425 s->ac_val[2] = s->ac_val[1] + c_size;
428 CHECKED_ALLOCZ(s->coded_block, y_size);
430 /* divx501 bitstream reorder buffer */
431 CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE);
433 /* cbp, ac_pred, pred_dir */
434 CHECKED_ALLOCZ(s->cbp_table , mb_array_size * sizeof(uint8_t))
435 CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
438 if (s->h263_pred || s->h263_plus || !s->encoding) {
440 //MN: we need these for error resilience of intra-frames
441 CHECKED_ALLOCZ(s->dc_val[0], yc_size * sizeof(int16_t));
442 s->dc_val[1] = s->dc_val[0] + y_size;
443 s->dc_val[2] = s->dc_val[1] + c_size;
444 for(i=0;i<yc_size;i++)
445 s->dc_val[0][i] = 1024;
448 /* which mb is a intra block */
449 CHECKED_ALLOCZ(s->mbintra_table, mb_array_size);
450 memset(s->mbintra_table, 1, mb_array_size);
452 /* default structure is frame */
453 s->picture_structure = PICT_FRAME;
455 /* init macroblock skip table */
456 CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2);
457 //Note the +1 is for a quicker mpeg4 slice_end detection
458 CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
460 s->block= s->blocks[0];
462 s->parse_context.state= -1;
464 s->context_initialized = 1;
474 /* init common structure for both encoder and decoder */
475 void MPV_common_end(MpegEncContext *s)
479 av_freep(&s->mb_type);
480 av_freep(&s->p_mv_table_base);
481 av_freep(&s->b_forw_mv_table_base);
482 av_freep(&s->b_back_mv_table_base);
483 av_freep(&s->b_bidir_forw_mv_table_base);
484 av_freep(&s->b_bidir_back_mv_table_base);
485 av_freep(&s->b_direct_mv_table_base);
487 s->b_forw_mv_table= NULL;
488 s->b_back_mv_table= NULL;
489 s->b_bidir_forw_mv_table= NULL;
490 s->b_bidir_back_mv_table= NULL;
491 s->b_direct_mv_table= NULL;
493 av_freep(&s->motion_val);
494 av_freep(&s->dc_val[0]);
495 av_freep(&s->ac_val[0]);
496 av_freep(&s->coded_block);
497 av_freep(&s->mbintra_table);
498 av_freep(&s->cbp_table);
499 av_freep(&s->pred_dir_table);
500 av_freep(&s->me.scratchpad);
501 av_freep(&s->me.map);
502 av_freep(&s->me.score_map);
504 av_freep(&s->mbskip_table);
505 av_freep(&s->prev_pict_types);
506 av_freep(&s->bitstream_buffer);
507 av_freep(&s->tex_pb_buffer);
508 av_freep(&s->pb2_buffer);
509 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
510 av_freep(&s->field_mv_table);
511 av_freep(&s->field_select_table);
512 av_freep(&s->avctx->stats_out);
513 av_freep(&s->ac_stats);
514 av_freep(&s->error_status_table);
515 av_freep(&s->mb_index2xy);
517 for(i=0; i<MAX_PICTURE_COUNT; i++){
518 free_picture(s, &s->picture[i]);
520 avcodec_default_free_buffers(s->avctx);
521 s->context_initialized = 0;
524 #ifdef CONFIG_ENCODERS
526 /* init video encoder */
527 int MPV_encode_init(AVCodecContext *avctx)
529 MpegEncContext *s = avctx->priv_data;
531 int chroma_h_shift, chroma_v_shift;
533 s->bit_rate = avctx->bit_rate;
534 s->bit_rate_tolerance = avctx->bit_rate_tolerance;
535 s->width = avctx->width;
536 s->height = avctx->height;
537 if(avctx->gop_size > 600){
538 fprintf(stderr, "Warning keyframe interval too large! reducing it ...\n");
541 s->gop_size = avctx->gop_size;
542 s->rtp_mode = avctx->rtp_mode;
543 s->rtp_payload_size = avctx->rtp_payload_size;
544 if (avctx->rtp_callback)
545 s->rtp_callback = avctx->rtp_callback;
546 s->max_qdiff= avctx->max_qdiff;
547 s->qcompress= avctx->qcompress;
548 s->qblur= avctx->qblur;
550 s->flags= avctx->flags;
551 s->max_b_frames= avctx->max_b_frames;
552 s->b_frame_strategy= avctx->b_frame_strategy;
553 s->codec_id= avctx->codec->id;
554 s->luma_elim_threshold = avctx->luma_elim_threshold;
555 s->chroma_elim_threshold= avctx->chroma_elim_threshold;
556 s->strict_std_compliance= avctx->strict_std_compliance;
557 s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
558 s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0;
559 s->mpeg_quant= avctx->mpeg_quant;
561 if (s->gop_size <= 1) {
568 s->me_method = avctx->me_method;
571 s->fixed_qscale = (avctx->flags & CODEC_FLAG_QSCALE);
573 s->adaptive_quant= ( s->avctx->lumi_masking
574 || s->avctx->dark_masking
575 || s->avctx->temporal_cplx_masking
576 || s->avctx->spatial_cplx_masking
577 || s->avctx->p_masking)
580 s->progressive_sequence= !(avctx->flags & CODEC_FLAG_INTERLACED_DCT);
582 if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4){
583 fprintf(stderr, "4MV not supporetd by codec\n");
587 if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){
588 fprintf(stderr, "qpel not supporetd by codec\n");
592 if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){
593 fprintf(stderr, "data partitioning not supporetd by codec\n");
597 if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO){
598 fprintf(stderr, "b frames not supporetd by codec\n");
602 if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too
603 fprintf(stderr, "mpeg2 style quantization not supporetd by codec\n");
607 if(s->codec_id==CODEC_ID_MJPEG){
608 s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x
609 s->inter_quant_bias= 0;
610 }else if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO){
611 s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x
612 s->inter_quant_bias= 0;
614 s->intra_quant_bias=0;
615 s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2)); //(a - x/4)/x
618 if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
619 s->intra_quant_bias= avctx->intra_quant_bias;
620 if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
621 s->inter_quant_bias= avctx->inter_quant_bias;
623 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
625 switch(avctx->codec->id) {
626 case CODEC_ID_MPEG1VIDEO:
627 s->out_format = FMT_MPEG1;
628 s->low_delay= 0; //s->max_b_frames ? 0 : 1;
629 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
633 s->out_format = FMT_MJPEG;
634 s->intra_only = 1; /* force intra only for jpeg */
635 s->mjpeg_write_tables = 1; /* write all tables */
636 s->mjpeg_data_only_frames = 0; /* write all the needed headers */
637 s->mjpeg_vsample[0] = 1<<chroma_v_shift;
638 s->mjpeg_vsample[1] = 1;
639 s->mjpeg_vsample[2] = 1;
640 s->mjpeg_hsample[0] = 1<<chroma_h_shift;
641 s->mjpeg_hsample[1] = 1;
642 s->mjpeg_hsample[2] = 1;
643 if (mjpeg_init(s) < 0)
650 if (h263_get_picture_format(s->width, s->height) == 7) {
651 printf("Input picture size isn't suitable for h263 codec! try h263+\n");
654 s->out_format = FMT_H263;
659 s->out_format = FMT_H263;
662 s->unrestricted_mv=(avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0;
663 s->h263_aic= (avctx->flags & CODEC_FLAG_H263P_AIC) ? 1:0;
665 /* These are just to be sure */
671 s->out_format = FMT_H263;
677 s->out_format = FMT_H263;
679 s->unrestricted_mv = 1;
680 s->low_delay= s->max_b_frames ? 0 : 1;
681 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
683 case CODEC_ID_MSMPEG4V1:
684 s->out_format = FMT_H263;
687 s->unrestricted_mv = 1;
688 s->msmpeg4_version= 1;
692 case CODEC_ID_MSMPEG4V2:
693 s->out_format = FMT_H263;
696 s->unrestricted_mv = 1;
697 s->msmpeg4_version= 2;
701 case CODEC_ID_MSMPEG4V3:
702 s->out_format = FMT_H263;
705 s->unrestricted_mv = 1;
706 s->msmpeg4_version= 3;
707 s->flipflop_rounding=1;
712 s->out_format = FMT_H263;
715 s->unrestricted_mv = 1;
716 s->msmpeg4_version= 4;
717 s->flipflop_rounding=1;
722 s->out_format = FMT_H263;
725 s->unrestricted_mv = 1;
726 s->msmpeg4_version= 5;
727 s->flipflop_rounding=1;
736 { /* set up some save defaults, some codecs might override them later */
742 default_mv_penalty= av_mallocz( sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1) );
743 memset(default_mv_penalty, 0, sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1));
744 memset(default_fcode_tab , 0, sizeof(uint8_t)*(2*MAX_MV+1));
746 for(i=-16; i<16; i++){
747 default_fcode_tab[i + MAX_MV]= 1;
751 s->me.mv_penalty= default_mv_penalty;
752 s->fcode_tab= default_fcode_tab;
754 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
756 /* dont use mv_penalty table for crap MV as it would be confused */
757 //FIXME remove after fixing / removing old ME
758 if (s->me_method < ME_EPZS) s->me.mv_penalty = default_mv_penalty;
763 if (MPV_common_init(s) < 0)
768 #ifdef CONFIG_ENCODERS
770 if (s->out_format == FMT_H263)
772 if(s->msmpeg4_version)
773 ff_msmpeg4_encode_init(s);
775 if (s->out_format == FMT_MPEG1)
776 ff_mpeg1_encode_init(s);
779 /* init default q matrix */
781 int j= s->dsp.idct_permutation[i];
783 if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
784 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
785 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
786 }else if(s->out_format == FMT_H263){
788 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
792 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
793 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
797 /* precompute matrix */
798 /* for mjpeg, we do include qscale in the matrix */
799 if (s->out_format != FMT_MJPEG) {
800 convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, s->q_intra_matrix16_bias,
801 s->intra_matrix, s->intra_quant_bias, 1, 31);
802 convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16, s->q_inter_matrix16_bias,
803 s->inter_matrix, s->inter_quant_bias, 1, 31);
806 if(ff_rate_control_init(s) < 0)
809 s->picture_number = 0;
810 s->picture_in_gop_number = 0;
811 s->fake_picture_number = 0;
812 /* motion detector init */
819 int MPV_encode_end(AVCodecContext *avctx)
821 MpegEncContext *s = avctx->priv_data;
827 ff_rate_control_uninit(s);
830 if (s->out_format == FMT_MJPEG)
836 #endif //CONFIG_ENCODERS
838 void init_rl(RLTable *rl)
840 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
841 uint8_t index_run[MAX_RUN+1];
842 int last, run, level, start, end, i;
844 /* compute max_level[], max_run[] and index_run[] */
845 for(last=0;last<2;last++) {
854 memset(max_level, 0, MAX_RUN + 1);
855 memset(max_run, 0, MAX_LEVEL + 1);
856 memset(index_run, rl->n, MAX_RUN + 1);
857 for(i=start;i<end;i++) {
858 run = rl->table_run[i];
859 level = rl->table_level[i];
860 if (index_run[run] == rl->n)
862 if (level > max_level[run])
863 max_level[run] = level;
864 if (run > max_run[level])
865 max_run[level] = run;
867 rl->max_level[last] = av_malloc(MAX_RUN + 1);
868 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
869 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
870 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
871 rl->index_run[last] = av_malloc(MAX_RUN + 1);
872 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
876 /* draw the edges of width 'w' of an image of size width, height */
877 //FIXME check that this is ok for mpeg4 interlaced
878 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w)
880 uint8_t *ptr, *last_line;
883 last_line = buf + (height - 1) * wrap;
886 memcpy(buf - (i + 1) * wrap, buf, width);
887 memcpy(last_line + (i + 1) * wrap, last_line, width);
891 for(i=0;i<height;i++) {
892 memset(ptr - w, ptr[0], w);
893 memset(ptr + width, ptr[width-1], w);
898 memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
899 memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
900 memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
901 memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
905 static int find_unused_picture(MpegEncContext *s, int shared){
909 for(i=0; i<MAX_PICTURE_COUNT; i++){
910 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) break;
913 for(i=0; i<MAX_PICTURE_COUNT; i++){
914 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) break; //FIXME
916 for(i=0; i<MAX_PICTURE_COUNT; i++){
917 if(s->picture[i].data[0]==NULL) break;
921 assert(i<MAX_PICTURE_COUNT);
925 /* generic function for encode/decode called before a frame is coded/decoded */
926 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
933 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
935 /* mark&release old frames */
936 if (s->pict_type != B_TYPE && s->last_picture_ptr) {
937 avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
939 /* release forgotten pictures */
940 /* if(mpeg124/h263) */
942 for(i=0; i<MAX_PICTURE_COUNT; i++){
943 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
944 fprintf(stderr, "releasing zombie picture\n");
945 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
952 /* release non refernce frames */
953 for(i=0; i<MAX_PICTURE_COUNT; i++){
954 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
955 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
959 i= find_unused_picture(s, 0);
961 pic= (AVFrame*)&s->picture[i];
962 pic->reference= s->pict_type != B_TYPE ? 3 : 0;
964 if(s->current_picture_ptr)
965 pic->coded_picture_number= s->current_picture_ptr->coded_picture_number+1;
967 alloc_picture(s, (Picture*)pic, 0);
969 s->current_picture_ptr= &s->picture[i];
972 s->current_picture_ptr->pict_type= s->pict_type;
973 s->current_picture_ptr->quality= s->qscale;
974 s->current_picture_ptr->key_frame= s->pict_type == I_TYPE;
976 s->current_picture= *s->current_picture_ptr;
978 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
979 if (s->pict_type != B_TYPE) {
980 s->last_picture_ptr= s->next_picture_ptr;
981 s->next_picture_ptr= s->current_picture_ptr;
984 if(s->last_picture_ptr) s->last_picture= *s->last_picture_ptr;
985 if(s->next_picture_ptr) s->next_picture= *s->next_picture_ptr;
986 if(s->new_picture_ptr ) s->new_picture = *s->new_picture_ptr;
988 if(s->picture_structure!=PICT_FRAME){
991 if(s->picture_structure == PICT_BOTTOM_FIELD){
992 s->current_picture.data[i] += s->current_picture.linesize[i];
994 s->current_picture.linesize[i] *= 2;
995 s->last_picture.linesize[i] *=2;
996 s->next_picture.linesize[i] *=2;
1000 if(s->pict_type != I_TYPE && s->last_picture_ptr==NULL){
1001 fprintf(stderr, "warning: first frame is no keyframe\n");
1002 assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference
1007 s->hurry_up= s->avctx->hurry_up;
1008 s->error_resilience= avctx->error_resilience;
1010 /* set dequantizer, we cant do it during init as it might change for mpeg4
1011 and we cant do it in the header decode as init isnt called for mpeg4 there yet */
1012 if(s->out_format == FMT_H263){
1014 s->dct_unquantize = s->dct_unquantize_mpeg2;
1016 s->dct_unquantize = s->dct_unquantize_h263;
1018 s->dct_unquantize = s->dct_unquantize_mpeg1;
1023 /* generic function for encode/decode called after a frame has been coded/decoded */
1024 void MPV_frame_end(MpegEncContext *s)
1027 /* draw edge for correct motion prediction if outside */
1028 if(s->codec_id!=CODEC_ID_SVQ1){
1029 if (s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1030 draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
1031 draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1032 draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1037 s->last_pict_type = s->pict_type;
1038 if(s->pict_type!=B_TYPE){
1039 s->last_non_b_pict_type= s->pict_type;
1042 /* copy back current_picture variables */
1043 for(i=0; i<MAX_PICTURE_COUNT; i++){
1044 if(s->picture[i].data[0] == s->current_picture.data[0]){
1045 s->picture[i]= s->current_picture;
1049 assert(i<MAX_PICTURE_COUNT);
1053 /* release non refernce frames */
1054 for(i=0; i<MAX_PICTURE_COUNT; i++){
1055 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1056 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1060 // clear copies, to avoid confusion
1062 memset(&s->last_picture, 0, sizeof(Picture));
1063 memset(&s->next_picture, 0, sizeof(Picture));
1064 memset(&s->current_picture, 0, sizeof(Picture));
1069 * draws an line from (ex, ey) -> (sx, sy).
1070 * @param w width of the image
1071 * @param h height of the image
1072 * @param stride stride/linesize of the image
1073 * @param color color of the arrow
1075 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1078 sx= clip(sx, 0, w-1);
1079 sy= clip(sy, 0, h-1);
1080 ex= clip(ex, 0, w-1);
1081 ey= clip(ey, 0, h-1);
1083 buf[sy*stride + sx]+= color;
1085 if(ABS(ex - sx) > ABS(ey - sy)){
1090 buf+= sx + sy*stride;
1092 f= ((ey-sy)<<16)/ex;
1093 for(x= 0; x <= ex; x++){
1094 y= ((x*f) + (1<<15))>>16;
1095 buf[y*stride + x]+= color;
1102 buf+= sx + sy*stride;
1104 if(ey) f= ((ex-sx)<<16)/ey;
1106 for(y= 0; y <= ey; y++){
1107 x= ((y*f) + (1<<15))>>16;
1108 buf[y*stride + x]+= color;
1114 * draws an arrow from (ex, ey) -> (sx, sy).
1115 * @param w width of the image
1116 * @param h height of the image
1117 * @param stride stride/linesize of the image
1118 * @param color color of the arrow
1120 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1123 sx= clip(sx, -100, w+100);
1124 sy= clip(sy, -100, h+100);
1125 ex= clip(ex, -100, w+100);
1126 ey= clip(ey, -100, h+100);
1131 if(dx*dx + dy*dy > 3*3){
1134 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1136 //FIXME subpixel accuracy
1137 rx= ROUNDED_DIV(rx*3<<4, length);
1138 ry= ROUNDED_DIV(ry*3<<4, length);
1140 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1141 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1143 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1147 * prints debuging info for the given picture.
1149 void ff_print_debug_info(MpegEncContext *s, Picture *pict){
1151 if(!pict || !pict->mb_type) return;
1153 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1156 for(y=0; y<s->mb_height; y++){
1157 for(x=0; x<s->mb_width; x++){
1158 if(s->avctx->debug&FF_DEBUG_SKIP){
1159 int count= s->mbskip_table[x + y*s->mb_stride];
1160 if(count>9) count=9;
1161 printf("%1d", count);
1163 if(s->avctx->debug&FF_DEBUG_QP){
1164 printf("%2d", pict->qscale_table[x + y*s->mb_stride]);
1166 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1167 int mb_type= pict->mb_type[x + y*s->mb_stride];
1169 //Type & MV direction
1172 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1174 else if(IS_INTRA4x4(mb_type))
1176 else if(IS_INTRA16x16(mb_type))
1178 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1180 else if(IS_DIRECT(mb_type))
1182 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1184 else if(IS_GMC(mb_type))
1186 else if(IS_SKIP(mb_type))
1188 else if(!USES_LIST(mb_type, 1))
1190 else if(!USES_LIST(mb_type, 0))
1193 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1200 else if(IS_16X8(mb_type))
1202 else if(IS_8X16(mb_type))
1204 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1210 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
1221 if((s->avctx->debug&FF_DEBUG_VIS_MV) && s->motion_val){
1222 const int shift= 1 + s->quarter_sample;
1224 uint8_t *ptr= pict->data[0];
1225 s->low_delay=0; //needed to see the vectors without trashing the buffers
1227 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1229 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1230 const int mb_index= mb_x + mb_y*s->mb_stride;
1231 if(IS_8X8(s->current_picture.mb_type[mb_index])){
1234 int sx= mb_x*16 + 4 + 8*(i&1);
1235 int sy= mb_y*16 + 4 + 8*(i>>1);
1236 int xy= 1 + mb_x*2 + (i&1) + (mb_y*2 + 1 + (i>>1))*(s->mb_width*2 + 2);
1237 int mx= (s->motion_val[xy][0]>>shift) + sx;
1238 int my= (s->motion_val[xy][1]>>shift) + sy;
1239 draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
1242 int sx= mb_x*16 + 8;
1243 int sy= mb_y*16 + 8;
1244 int xy= 1 + mb_x*2 + (mb_y*2 + 1)*(s->mb_width*2 + 2);
1245 int mx= (s->motion_val[xy][0]>>shift) + sx;
1246 int my= (s->motion_val[xy][1]>>shift) + sy;
1247 draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
1249 s->mbskip_table[mb_index]=0;
1255 #ifdef CONFIG_ENCODERS
1257 static int get_sae(uint8_t *src, int ref, int stride){
1261 for(y=0; y<16; y++){
1262 for(x=0; x<16; x++){
1263 acc+= ABS(src[x+y*stride] - ref);
1270 static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){
1277 for(y=0; y<h; y+=16){
1278 for(x=0; x<w; x+=16){
1279 int offset= x + y*stride;
1280 int sad = s->dsp.pix_abs16x16(src + offset, ref + offset, stride);
1281 int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8;
1282 int sae = get_sae(src + offset, mean, stride);
1284 acc+= sae + 500 < sad;
1291 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
1294 const int encoding_delay= s->max_b_frames;
1297 if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
1298 if(pic_arg->linesize[0] != s->linesize) direct=0;
1299 if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
1300 if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
1302 // printf("%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
1305 i= find_unused_picture(s, 1);
1307 pic= (AVFrame*)&s->picture[i];
1311 pic->data[i]= pic_arg->data[i];
1312 pic->linesize[i]= pic_arg->linesize[i];
1314 alloc_picture(s, (Picture*)pic, 1);
1316 i= find_unused_picture(s, 0);
1318 pic= (AVFrame*)&s->picture[i];
1321 alloc_picture(s, (Picture*)pic, 0);
1323 /* the input will be 16 pixels to the right relative to the actual buffer start
1324 * and the current_pic, so the buffer can be reused, yes its not beatifull
1329 if( pic->data[0] == pic_arg->data[0]
1330 && pic->data[1] == pic_arg->data[1]
1331 && pic->data[2] == pic_arg->data[2]){
1334 int h_chroma_shift, v_chroma_shift;
1336 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1339 int src_stride= pic_arg->linesize[i];
1340 int dst_stride= i ? s->uvlinesize : s->linesize;
1341 int h_shift= i ? h_chroma_shift : 0;
1342 int v_shift= i ? v_chroma_shift : 0;
1343 int w= s->width >>h_shift;
1344 int h= s->height>>v_shift;
1345 uint8_t *src= pic_arg->data[i];
1346 uint8_t *dst= pic->data[i];
1348 if(src_stride==dst_stride)
1349 memcpy(dst, src, src_stride*h);
1352 memcpy(dst, src, w);
1360 pic->quality= pic_arg->quality;
1361 pic->pict_type= pic_arg->pict_type;
1362 pic->pts = pic_arg->pts;
1364 if(s->input_picture[encoding_delay])
1365 pic->display_picture_number= s->input_picture[encoding_delay]->display_picture_number + 1;
1367 /* shift buffer entries */
1368 for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++)
1369 s->input_picture[i-1]= s->input_picture[i];
1371 s->input_picture[encoding_delay]= (Picture*)pic;
1376 static void select_input_picture(MpegEncContext *s){
1378 const int encoding_delay= s->max_b_frames;
1379 int coded_pic_num=0;
1381 if(s->reordered_input_picture[0])
1382 coded_pic_num= s->reordered_input_picture[0]->coded_picture_number + 1;
1384 for(i=1; i<MAX_PICTURE_COUNT; i++)
1385 s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
1386 s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
1388 /* set next picture types & ordering */
1389 if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
1390 if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
1391 s->reordered_input_picture[0]= s->input_picture[0];
1392 s->reordered_input_picture[0]->pict_type= I_TYPE;
1393 s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
1397 if(s->flags&CODEC_FLAG_PASS2){
1398 for(i=0; i<s->max_b_frames+1; i++){
1399 int pict_num= s->input_picture[0]->display_picture_number + i;
1400 int pict_type= s->rc_context.entry[pict_num].new_pict_type;
1401 s->input_picture[i]->pict_type= pict_type;
1403 if(i + 1 >= s->rc_context.num_entries) break;
1407 if(s->input_picture[0]->pict_type){
1408 /* user selected pict_type */
1409 for(b_frames=0; b_frames<s->max_b_frames+1; b_frames++){
1410 if(s->input_picture[b_frames]->pict_type!=B_TYPE) break;
1413 if(b_frames > s->max_b_frames){
1414 fprintf(stderr, "warning, too many bframes in a row\n");
1415 b_frames = s->max_b_frames;
1417 }else if(s->b_frame_strategy==0){
1418 b_frames= s->max_b_frames;
1419 }else if(s->b_frame_strategy==1){
1420 for(i=1; i<s->max_b_frames+1; i++){
1421 if(s->input_picture[i]->b_frame_score==0){
1422 s->input_picture[i]->b_frame_score=
1423 get_intra_count(s, s->input_picture[i ]->data[0],
1424 s->input_picture[i-1]->data[0], s->linesize) + 1;
1427 for(i=0; i<s->max_b_frames; i++){
1428 if(s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break;
1431 b_frames= FFMAX(0, i-1);
1434 for(i=0; i<b_frames+1; i++){
1435 s->input_picture[i]->b_frame_score=0;
1438 fprintf(stderr, "illegal b frame strategy\n");
1443 //static int b_count=0;
1444 //b_count+= b_frames;
1445 //printf("b_frames: %d\n", b_count);
1447 s->reordered_input_picture[0]= s->input_picture[b_frames];
1448 if( s->picture_in_gop_number + b_frames >= s->gop_size
1449 || s->reordered_input_picture[0]->pict_type== I_TYPE)
1450 s->reordered_input_picture[0]->pict_type= I_TYPE;
1452 s->reordered_input_picture[0]->pict_type= P_TYPE;
1453 s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
1454 for(i=0; i<b_frames; i++){
1456 s->reordered_input_picture[i+1]= s->input_picture[i];
1457 s->reordered_input_picture[i+1]->pict_type= B_TYPE;
1458 s->reordered_input_picture[i+1]->coded_picture_number= coded_pic_num;
1463 if(s->reordered_input_picture[0]){
1464 s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0;
1466 s->new_picture= *s->reordered_input_picture[0];
1468 if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
1469 // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable
1471 int i= find_unused_picture(s, 0);
1472 Picture *pic= &s->picture[i];
1474 /* mark us unused / free shared pic */
1476 s->reordered_input_picture[0]->data[i]= NULL;
1477 s->reordered_input_picture[0]->type= 0;
1479 //FIXME bad, copy * except
1480 pic->pict_type = s->reordered_input_picture[0]->pict_type;
1481 pic->quality = s->reordered_input_picture[0]->quality;
1482 pic->coded_picture_number = s->reordered_input_picture[0]->coded_picture_number;
1483 pic->reference = s->reordered_input_picture[0]->reference;
1485 alloc_picture(s, pic, 0);
1487 s->current_picture_ptr= pic;
1489 // input is not a shared pix -> reuse buffer for current_pix
1491 assert( s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER
1492 || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
1494 s->current_picture_ptr= s->reordered_input_picture[0];
1496 //reverse the +16 we did before storing the input
1497 s->current_picture_ptr->data[i]-=16;
1500 s->current_picture= *s->current_picture_ptr;
1502 s->picture_number= s->new_picture.display_picture_number;
1503 //printf("dpn:%d\n", s->picture_number);
1505 memset(&s->new_picture, 0, sizeof(Picture));
1509 int MPV_encode_picture(AVCodecContext *avctx,
1510 unsigned char *buf, int buf_size, void *data)
1512 MpegEncContext *s = avctx->priv_data;
1513 AVFrame *pic_arg = data;
1516 init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
1518 s->picture_in_gop_number++;
1520 load_input_picture(s, pic_arg);
1522 select_input_picture(s);
1525 if(s->new_picture.data[0]){
1527 s->pict_type= s->new_picture.pict_type;
1528 if (s->fixed_qscale){ /* the ratecontrol needs the last qscale so we dont touch it for CBR */
1529 s->qscale= (int)(s->new_picture.quality+0.5);
1533 //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
1534 MPV_frame_start(s, avctx);
1536 encode_picture(s, s->picture_number);
1538 avctx->real_pict_num = s->picture_number;
1539 avctx->header_bits = s->header_bits;
1540 avctx->mv_bits = s->mv_bits;
1541 avctx->misc_bits = s->misc_bits;
1542 avctx->i_tex_bits = s->i_tex_bits;
1543 avctx->p_tex_bits = s->p_tex_bits;
1544 avctx->i_count = s->i_count;
1545 avctx->p_count = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx
1546 avctx->skip_count = s->skip_count;
1550 if (s->out_format == FMT_MJPEG)
1551 mjpeg_picture_trailer(s);
1553 if(s->flags&CODEC_FLAG_PASS1)
1554 ff_write_pass1_stats(s);
1557 avctx->error[i] += s->current_picture_ptr->error[i];
1561 s->input_picture_number++;
1563 flush_put_bits(&s->pb);
1564 s->frame_bits = (pbBufPtr(&s->pb) - s->pb.buf) * 8;
1566 s->total_bits += s->frame_bits;
1567 avctx->frame_bits = s->frame_bits;
1569 return pbBufPtr(&s->pb) - s->pb.buf;
1572 #endif //CONFIG_ENCODERS
1574 static inline void gmc1_motion(MpegEncContext *s,
1575 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1577 uint8_t **ref_picture, int src_offset)
1580 int offset, src_x, src_y, linesize, uvlinesize;
1581 int motion_x, motion_y;
1584 motion_x= s->sprite_offset[0][0];
1585 motion_y= s->sprite_offset[0][1];
1586 src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
1587 src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
1588 motion_x<<=(3-s->sprite_warping_accuracy);
1589 motion_y<<=(3-s->sprite_warping_accuracy);
1590 src_x = clip(src_x, -16, s->width);
1591 if (src_x == s->width)
1593 src_y = clip(src_y, -16, s->height);
1594 if (src_y == s->height)
1597 linesize = s->linesize;
1598 uvlinesize = s->uvlinesize;
1600 ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
1602 dest_y+=dest_offset;
1603 if(s->flags&CODEC_FLAG_EMU_EDGE){
1604 if(src_x<0 || src_y<0 || src_x + 17 >= s->h_edge_pos
1605 || src_y + 17 >= s->v_edge_pos){
1606 ff_emulated_edge_mc(s, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1607 ptr= s->edge_emu_buffer;
1611 if((motion_x|motion_y)&7){
1612 s->dsp.gmc1(dest_y , ptr , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
1613 s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
1617 dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
1618 if (s->no_rounding){
1619 s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
1621 s->dsp.put_pixels_tab [0][dxy](dest_y, ptr, linesize, 16);
1625 if(s->flags&CODEC_FLAG_GRAY) return;
1627 motion_x= s->sprite_offset[1][0];
1628 motion_y= s->sprite_offset[1][1];
1629 src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
1630 src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
1631 motion_x<<=(3-s->sprite_warping_accuracy);
1632 motion_y<<=(3-s->sprite_warping_accuracy);
1633 src_x = clip(src_x, -8, s->width>>1);
1634 if (src_x == s->width>>1)
1636 src_y = clip(src_y, -8, s->height>>1);
1637 if (src_y == s->height>>1)
1640 offset = (src_y * uvlinesize) + src_x + (src_offset>>1);
1641 ptr = ref_picture[1] + offset;
1642 if(s->flags&CODEC_FLAG_EMU_EDGE){
1643 if(src_x<0 || src_y<0 || src_x + 9 >= s->h_edge_pos>>1
1644 || src_y + 9 >= s->v_edge_pos>>1){
1645 ff_emulated_edge_mc(s, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1646 ptr= s->edge_emu_buffer;
1650 s->dsp.gmc1(dest_cb + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
1652 ptr = ref_picture[2] + offset;
1654 ff_emulated_edge_mc(s, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1655 ptr= s->edge_emu_buffer;
1657 s->dsp.gmc1(dest_cr + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
1662 static inline void gmc_motion(MpegEncContext *s,
1663 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1665 uint8_t **ref_picture, int src_offset)
1668 int linesize, uvlinesize;
1669 const int a= s->sprite_warping_accuracy;
1672 linesize = s->linesize;
1673 uvlinesize = s->uvlinesize;
1675 ptr = ref_picture[0] + src_offset;
1677 dest_y+=dest_offset;
1679 ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16;
1680 oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16;
1682 s->dsp.gmc(dest_y, ptr, linesize, 16,
1685 s->sprite_delta[0][0], s->sprite_delta[0][1],
1686 s->sprite_delta[1][0], s->sprite_delta[1][1],
1687 a+1, (1<<(2*a+1)) - s->no_rounding,
1688 s->h_edge_pos, s->v_edge_pos);
1689 s->dsp.gmc(dest_y+8, ptr, linesize, 16,
1690 ox + s->sprite_delta[0][0]*8,
1691 oy + s->sprite_delta[1][0]*8,
1692 s->sprite_delta[0][0], s->sprite_delta[0][1],
1693 s->sprite_delta[1][0], s->sprite_delta[1][1],
1694 a+1, (1<<(2*a+1)) - s->no_rounding,
1695 s->h_edge_pos, s->v_edge_pos);
1697 if(s->flags&CODEC_FLAG_GRAY) return;
1700 dest_cb+=dest_offset>>1;
1701 dest_cr+=dest_offset>>1;
1703 ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8;
1704 oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8;
1706 ptr = ref_picture[1] + (src_offset>>1);
1707 s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
1710 s->sprite_delta[0][0], s->sprite_delta[0][1],
1711 s->sprite_delta[1][0], s->sprite_delta[1][1],
1712 a+1, (1<<(2*a+1)) - s->no_rounding,
1713 s->h_edge_pos>>1, s->v_edge_pos>>1);
1715 ptr = ref_picture[2] + (src_offset>>1);
1716 s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
1719 s->sprite_delta[0][0], s->sprite_delta[0][1],
1720 s->sprite_delta[1][0], s->sprite_delta[1][1],
1721 a+1, (1<<(2*a+1)) - s->no_rounding,
1722 s->h_edge_pos>>1, s->v_edge_pos>>1);
1726 void ff_emulated_edge_mc(MpegEncContext *s, uint8_t *src, int linesize, int block_w, int block_h,
1727 int src_x, int src_y, int w, int h){
1729 int start_y, start_x, end_y, end_x;
1730 uint8_t *buf= s->edge_emu_buffer;
1733 src+= (h-1-src_y)*linesize;
1735 }else if(src_y<=-block_h){
1736 src+= (1-block_h-src_y)*linesize;
1742 }else if(src_x<=-block_w){
1743 src+= (1-block_w-src_x);
1747 start_y= FFMAX(0, -src_y);
1748 start_x= FFMAX(0, -src_x);
1749 end_y= FFMIN(block_h, h-src_y);
1750 end_x= FFMIN(block_w, w-src_x);
1752 // copy existing part
1753 for(y=start_y; y<end_y; y++){
1754 for(x=start_x; x<end_x; x++){
1755 buf[x + y*linesize]= src[x + y*linesize];
1760 for(y=0; y<start_y; y++){
1761 for(x=start_x; x<end_x; x++){
1762 buf[x + y*linesize]= buf[x + start_y*linesize];
1767 for(y=end_y; y<block_h; y++){
1768 for(x=start_x; x<end_x; x++){
1769 buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
1773 for(y=0; y<block_h; y++){
1775 for(x=0; x<start_x; x++){
1776 buf[x + y*linesize]= buf[start_x + y*linesize];
1780 for(x=end_x; x<block_w; x++){
1781 buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
1787 /* apply one mpeg motion vector to the three components */
1788 static inline void mpeg_motion(MpegEncContext *s,
1789 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1791 uint8_t **ref_picture, int src_offset,
1792 int field_based, op_pixels_func (*pix_op)[4],
1793 int motion_x, int motion_y, int h)
1796 int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1799 if(s->quarter_sample)
1805 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
1806 src_x = s->mb_x * 16 + (motion_x >> 1);
1807 src_y = s->mb_y * (16 >> field_based) + (motion_y >> 1);
1809 /* WARNING: do no forget half pels */
1810 height = s->height >> field_based;
1811 v_edge_pos = s->v_edge_pos >> field_based;
1812 src_x = clip(src_x, -16, s->width);
1813 if (src_x == s->width)
1815 src_y = clip(src_y, -16, height);
1816 if (src_y == height)
1818 linesize = s->current_picture.linesize[0] << field_based;
1819 uvlinesize = s->current_picture.linesize[1] << field_based;
1820 ptr = ref_picture[0] + (src_y * linesize) + (src_x) + src_offset;
1821 dest_y += dest_offset;
1823 if(s->flags&CODEC_FLAG_EMU_EDGE){
1824 if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 16 > s->h_edge_pos
1825 || src_y + (motion_y&1) + h > v_edge_pos){
1826 ff_emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based, //FIXME linesize? and uv below
1827 src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
1828 ptr= s->edge_emu_buffer + src_offset;
1832 pix_op[0][dxy](dest_y, ptr, linesize, h);
1834 if(s->flags&CODEC_FLAG_GRAY) return;
1836 if (s->out_format == FMT_H263) {
1838 if ((motion_x & 3) != 0)
1840 if ((motion_y & 3) != 0)
1847 dxy = ((my & 1) << 1) | (mx & 1);
1852 src_x = s->mb_x * 8 + mx;
1853 src_y = s->mb_y * (8 >> field_based) + my;
1854 src_x = clip(src_x, -8, s->width >> 1);
1855 if (src_x == (s->width >> 1))
1857 src_y = clip(src_y, -8, height >> 1);
1858 if (src_y == (height >> 1))
1860 offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
1861 ptr = ref_picture[1] + offset;
1863 ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based,
1864 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1865 ptr= s->edge_emu_buffer + (src_offset >> 1);
1867 pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1869 ptr = ref_picture[2] + offset;
1871 ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based,
1872 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1873 ptr= s->edge_emu_buffer + (src_offset >> 1);
1875 pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1878 static inline void qpel_motion(MpegEncContext *s,
1879 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1881 uint8_t **ref_picture, int src_offset,
1882 int field_based, op_pixels_func (*pix_op)[4],
1883 qpel_mc_func (*qpix_op)[16],
1884 int motion_x, int motion_y, int h)
1887 int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1890 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
1891 src_x = s->mb_x * 16 + (motion_x >> 2);
1892 src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
1894 height = s->height >> field_based;
1895 v_edge_pos = s->v_edge_pos >> field_based;
1896 src_x = clip(src_x, -16, s->width);
1897 if (src_x == s->width)
1899 src_y = clip(src_y, -16, height);
1900 if (src_y == height)
1902 linesize = s->linesize << field_based;
1903 uvlinesize = s->uvlinesize << field_based;
1904 ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
1905 dest_y += dest_offset;
1906 //printf("%d %d %d\n", src_x, src_y, dxy);
1908 if(s->flags&CODEC_FLAG_EMU_EDGE){
1909 if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 16 > s->h_edge_pos
1910 || src_y + (motion_y&3) + h > v_edge_pos){
1911 ff_emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based,
1912 src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
1913 ptr= s->edge_emu_buffer + src_offset;
1918 qpix_op[0][dxy](dest_y, ptr, linesize);
1920 //damn interlaced mode
1921 //FIXME boundary mirroring is not exactly correct here
1922 qpix_op[1][dxy](dest_y , ptr , linesize);
1923 qpix_op[1][dxy](dest_y+8, ptr+8, linesize);
1926 if(s->flags&CODEC_FLAG_GRAY) return;
1931 }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA2){
1932 static const int rtab[8]= {0,0,1,1,0,0,0,1};
1933 mx= (motion_x>>1) + rtab[motion_x&7];
1934 my= (motion_y>>1) + rtab[motion_y&7];
1935 }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){
1936 mx= (motion_x>>1)|(motion_x&1);
1937 my= (motion_y>>1)|(motion_y&1);
1945 dxy= (mx&1) | ((my&1)<<1);
1949 src_x = s->mb_x * 8 + mx;
1950 src_y = s->mb_y * (8 >> field_based) + my;
1951 src_x = clip(src_x, -8, s->width >> 1);
1952 if (src_x == (s->width >> 1))
1954 src_y = clip(src_y, -8, height >> 1);
1955 if (src_y == (height >> 1))
1958 offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
1959 ptr = ref_picture[1] + offset;
1961 ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based,
1962 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1963 ptr= s->edge_emu_buffer + (src_offset >> 1);
1965 pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1967 ptr = ref_picture[2] + offset;
1969 ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based,
1970 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1971 ptr= s->edge_emu_buffer + (src_offset >> 1);
1973 pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1976 inline int ff_h263_round_chroma(int x){
1978 return (h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
1981 return -(h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
1986 * motion compesation of a single macroblock
1988 * @param dest_y luma destination pointer
1989 * @param dest_cb chroma cb/u destination pointer
1990 * @param dest_cr chroma cr/v destination pointer
1991 * @param dir direction (0->forward, 1->backward)
1992 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1993 * @param pic_op halfpel motion compensation function (average or put normally)
1994 * @param pic_op qpel motion compensation function (average or put normally)
1995 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1997 static inline void MPV_motion(MpegEncContext *s,
1998 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1999 int dir, uint8_t **ref_picture,
2000 op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16])
2002 int dxy, offset, mx, my, src_x, src_y, motion_x, motion_y;
2004 uint8_t *ptr, *dest;
2010 switch(s->mv_type) {
2014 if(s->real_sprite_warping_points==1){
2015 gmc1_motion(s, dest_y, dest_cb, dest_cr, 0,
2018 gmc_motion(s, dest_y, dest_cb, dest_cr, 0,
2021 }else if(s->quarter_sample){
2022 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
2025 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2027 ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
2028 ref_picture, pix_op,
2029 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2033 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2036 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2042 if(s->quarter_sample){
2044 motion_x = s->mv[dir][i][0];
2045 motion_y = s->mv[dir][i][1];
2047 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
2048 src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
2049 src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
2051 /* WARNING: do no forget half pels */
2052 src_x = clip(src_x, -16, s->width);
2053 if (src_x == s->width)
2055 src_y = clip(src_y, -16, s->height);
2056 if (src_y == s->height)
2059 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
2060 if(s->flags&CODEC_FLAG_EMU_EDGE){
2061 if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 8 > s->h_edge_pos
2062 || src_y + (motion_y&3) + 8 > s->v_edge_pos){
2063 ff_emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
2064 ptr= s->edge_emu_buffer;
2067 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
2068 qpix_op[1][dxy](dest, ptr, s->linesize);
2070 mx += s->mv[dir][i][0]/2;
2071 my += s->mv[dir][i][1]/2;
2075 motion_x = s->mv[dir][i][0];
2076 motion_y = s->mv[dir][i][1];
2078 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
2079 src_x = mb_x * 16 + (motion_x >> 1) + (i & 1) * 8;
2080 src_y = mb_y * 16 + (motion_y >> 1) + (i >>1) * 8;
2082 /* WARNING: do no forget half pels */
2083 src_x = clip(src_x, -16, s->width);
2084 if (src_x == s->width)
2086 src_y = clip(src_y, -16, s->height);
2087 if (src_y == s->height)
2090 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
2091 if(s->flags&CODEC_FLAG_EMU_EDGE){
2092 if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 8 > s->h_edge_pos
2093 || src_y + (motion_y&1) + 8 > s->v_edge_pos){
2094 ff_emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
2095 ptr= s->edge_emu_buffer;
2098 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
2099 pix_op[1][dxy](dest, ptr, s->linesize, 8);
2101 mx += s->mv[dir][i][0];
2102 my += s->mv[dir][i][1];
2106 if(s->flags&CODEC_FLAG_GRAY) break;
2107 /* In case of 8X8, we construct a single chroma motion vector
2108 with a special rounding */
2109 mx= ff_h263_round_chroma(mx);
2110 my= ff_h263_round_chroma(my);
2111 dxy = ((my & 1) << 1) | (mx & 1);
2115 src_x = mb_x * 8 + mx;
2116 src_y = mb_y * 8 + my;
2117 src_x = clip(src_x, -8, s->width/2);
2118 if (src_x == s->width/2)
2120 src_y = clip(src_y, -8, s->height/2);
2121 if (src_y == s->height/2)
2124 offset = (src_y * (s->uvlinesize)) + src_x;
2125 ptr = ref_picture[1] + offset;
2126 if(s->flags&CODEC_FLAG_EMU_EDGE){
2127 if(src_x<0 || src_y<0 || src_x + (dxy &1) + 8 > s->h_edge_pos>>1
2128 || src_y + (dxy>>1) + 8 > s->v_edge_pos>>1){
2129 ff_emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2130 ptr= s->edge_emu_buffer;
2134 pix_op[1][dxy](dest_cb, ptr, s->uvlinesize, 8);
2136 ptr = ref_picture[2] + offset;
2138 ff_emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2139 ptr= s->edge_emu_buffer;
2141 pix_op[1][dxy](dest_cr, ptr, s->uvlinesize, 8);
2144 if (s->picture_structure == PICT_FRAME) {
2145 if(s->quarter_sample){
2147 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
2148 ref_picture, s->field_select[dir][0] ? s->linesize : 0,
2150 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2152 qpel_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2153 ref_picture, s->field_select[dir][1] ? s->linesize : 0,
2155 s->mv[dir][1][0], s->mv[dir][1][1], 8);
2158 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2159 ref_picture, s->field_select[dir][0] ? s->linesize : 0,
2161 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2163 mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2164 ref_picture, s->field_select[dir][1] ? s->linesize : 0,
2166 s->mv[dir][1][0], s->mv[dir][1][1], 8);
2170 if(s->picture_structure == s->field_select[dir][0] + 1 || s->pict_type == B_TYPE || s->first_field){
2171 offset= s->field_select[dir][0] ? s->linesize : 0;
2173 ref_picture= s->current_picture.data;
2174 offset= s->field_select[dir][0] ? s->linesize : -s->linesize;
2177 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2178 ref_picture, offset,
2180 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2187 /* put block[] to dest[] */
2188 static inline void put_dct(MpegEncContext *s,
2189 DCTELEM *block, int i, uint8_t *dest, int line_size)
2191 s->dct_unquantize(s, block, i, s->qscale);
2192 s->dsp.idct_put (dest, line_size, block);
2195 /* add block[] to dest[] */
2196 static inline void add_dct(MpegEncContext *s,
2197 DCTELEM *block, int i, uint8_t *dest, int line_size)
2199 if (s->block_last_index[i] >= 0) {
2200 s->dsp.idct_add (dest, line_size, block);
2204 static inline void add_dequant_dct(MpegEncContext *s,
2205 DCTELEM *block, int i, uint8_t *dest, int line_size)
2207 if (s->block_last_index[i] >= 0) {
2208 s->dct_unquantize(s, block, i, s->qscale);
2210 s->dsp.idct_add (dest, line_size, block);
2215 * cleans dc, ac, coded_block for the current non intra MB
2217 void ff_clean_intra_table_entries(MpegEncContext *s)
2219 int wrap = s->block_wrap[0];
2220 int xy = s->block_index[0];
2223 s->dc_val[0][xy + 1 ] =
2224 s->dc_val[0][xy + wrap] =
2225 s->dc_val[0][xy + 1 + wrap] = 1024;
2227 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2228 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2229 if (s->msmpeg4_version>=3) {
2230 s->coded_block[xy ] =
2231 s->coded_block[xy + 1 ] =
2232 s->coded_block[xy + wrap] =
2233 s->coded_block[xy + 1 + wrap] = 0;
2236 wrap = s->block_wrap[4];
2237 xy = s->mb_x + 1 + (s->mb_y + 1) * wrap;
2239 s->dc_val[2][xy] = 1024;
2241 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2242 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2244 s->mbintra_table[s->mb_x + s->mb_y*s->mb_stride]= 0;
2247 /* generic function called after a macroblock has been parsed by the
2248 decoder or after it has been encoded by the encoder.
2250 Important variables used:
2251 s->mb_intra : true if intra macroblock
2252 s->mv_dir : motion vector direction
2253 s->mv_type : motion vector type
2254 s->mv : motion vector
2255 s->interlaced_dct : true if interlaced dct used (mpeg2)
2257 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
2260 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2265 s->current_picture.qscale_table[mb_xy]= s->qscale;
2267 /* update DC predictors for P macroblocks */
2269 if (s->h263_pred || s->h263_aic) {
2270 if(s->mbintra_table[mb_xy])
2271 ff_clean_intra_table_entries(s);
2275 s->last_dc[2] = 128 << s->intra_dc_precision;
2278 else if (s->h263_pred || s->h263_aic)
2279 s->mbintra_table[mb_xy]=1;
2281 /* update motion predictor, not for B-frames as they need the motion_val from the last P/S-Frame */
2282 if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE) { //FIXME move into h263.c if possible, format specific stuff shouldnt be here
2283 //FIXME a lot of thet is only needed for !low_delay
2284 const int wrap = s->block_wrap[0];
2285 const int xy = s->block_index[0];
2286 if(s->mv_type != MV_TYPE_8X8){
2287 int motion_x, motion_y;
2291 } else if (s->mv_type == MV_TYPE_16X16) {
2292 motion_x = s->mv[0][0][0];
2293 motion_y = s->mv[0][0][1];
2294 } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
2296 motion_x = s->mv[0][0][0] + s->mv[0][1][0];
2297 motion_y = s->mv[0][0][1] + s->mv[0][1][1];
2298 motion_x = (motion_x>>1) | (motion_x&1);
2300 s->field_mv_table[mb_xy][i][0]= s->mv[0][i][0];
2301 s->field_mv_table[mb_xy][i][1]= s->mv[0][i][1];
2302 s->field_select_table[mb_xy][i]= s->field_select[0][i];
2306 /* no update if 8X8 because it has been done during parsing */
2307 s->motion_val[xy][0] = motion_x;
2308 s->motion_val[xy][1] = motion_y;
2309 s->motion_val[xy + 1][0] = motion_x;
2310 s->motion_val[xy + 1][1] = motion_y;
2311 s->motion_val[xy + wrap][0] = motion_x;
2312 s->motion_val[xy + wrap][1] = motion_y;
2313 s->motion_val[xy + 1 + wrap][0] = motion_x;
2314 s->motion_val[xy + 1 + wrap][1] = motion_y;
2317 if(s->encoding){ //FIXME encoding MUST be cleaned up
2318 if (s->mv_type == MV_TYPE_8X8)
2319 s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_8x8;
2321 s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_16x16;
2325 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { //FIXME precalc
2326 uint8_t *dest_y, *dest_cb, *dest_cr;
2327 int dct_linesize, dct_offset;
2328 op_pixels_func (*op_pix)[4];
2329 qpel_mc_func (*op_qpix)[16];
2330 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
2331 const int uvlinesize= s->current_picture.linesize[1];
2333 /* avoid copy if macroblock skipped in last frame too */
2334 if (s->pict_type != B_TYPE) {
2335 s->current_picture.mbskip_table[mb_xy]= s->mb_skiped;
2338 /* skip only during decoding as we might trash the buffers during encoding a bit */
2340 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2341 const int age= s->current_picture.age;
2347 assert(s->pict_type!=I_TYPE);
2349 (*mbskip_ptr) ++; /* indicate that this time we skiped it */
2350 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2352 /* if previous was skipped too, then nothing to do ! */
2353 if (*mbskip_ptr >= age && s->current_picture.reference){
2356 } else if(!s->current_picture.reference){
2357 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2358 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2360 *mbskip_ptr = 0; /* not skipped */
2365 if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME){ //FIXME precalc
2366 dest_y = s->current_picture.data[0] + mb_x * 16;
2367 dest_cb = s->current_picture.data[1] + mb_x * 8;
2368 dest_cr = s->current_picture.data[2] + mb_x * 8;
2370 dest_y = s->current_picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
2371 dest_cb = s->current_picture.data[1] + (mb_y * 8 * uvlinesize) + mb_x * 8;
2372 dest_cr = s->current_picture.data[2] + (mb_y * 8 * uvlinesize) + mb_x * 8;
2375 if (s->interlaced_dct) {
2376 dct_linesize = linesize * 2;
2377 dct_offset = linesize;
2379 dct_linesize = linesize;
2380 dct_offset = linesize * 8;
2384 /* motion handling */
2385 /* decoding or more than one mb_type (MC was allready done otherwise) */
2386 if((!s->encoding) || (s->mb_type[mb_xy]&(s->mb_type[mb_xy]-1))){
2387 if ((!s->no_rounding) || s->pict_type==B_TYPE){
2388 op_pix = s->dsp.put_pixels_tab;
2389 op_qpix= s->dsp.put_qpel_pixels_tab;
2391 op_pix = s->dsp.put_no_rnd_pixels_tab;
2392 op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
2395 if (s->mv_dir & MV_DIR_FORWARD) {
2396 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2397 op_pix = s->dsp.avg_pixels_tab;
2398 op_qpix= s->dsp.avg_qpel_pixels_tab;
2400 if (s->mv_dir & MV_DIR_BACKWARD) {
2401 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2405 /* skip dequant / idct if we are really late ;) */
2406 if(s->hurry_up>1) return;
2408 /* add dct residue */
2409 if(s->encoding || !( s->mpeg2 || s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO
2410 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2411 add_dequant_dct(s, block[0], 0, dest_y, dct_linesize);
2412 add_dequant_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2413 add_dequant_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2414 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2416 if(!(s->flags&CODEC_FLAG_GRAY)){
2417 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize);
2418 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize);
2420 } else if(s->codec_id != CODEC_ID_WMV2){
2421 add_dct(s, block[0], 0, dest_y, dct_linesize);
2422 add_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2423 add_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2424 add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2426 if(!(s->flags&CODEC_FLAG_GRAY)){
2427 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2428 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2433 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2437 /* dct only in intra block */
2438 if(s->encoding || !(s->mpeg2 || s->codec_id==CODEC_ID_MPEG1VIDEO)){
2439 put_dct(s, block[0], 0, dest_y, dct_linesize);
2440 put_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2441 put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2442 put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2444 if(!(s->flags&CODEC_FLAG_GRAY)){
2445 put_dct(s, block[4], 4, dest_cb, uvlinesize);
2446 put_dct(s, block[5], 5, dest_cr, uvlinesize);
2449 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2450 s->dsp.idct_put(dest_y + 8, dct_linesize, block[1]);
2451 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2452 s->dsp.idct_put(dest_y + dct_offset + 8, dct_linesize, block[3]);
2454 if(!(s->flags&CODEC_FLAG_GRAY)){
2455 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2456 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2463 #ifdef CONFIG_ENCODERS
2465 static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
2467 static const char tab[64]=
2479 DCTELEM *block= s->block[n];
2480 const int last_index= s->block_last_index[n];
2485 threshold= -threshold;
2489 /* are all which we could set to zero are allready zero? */
2490 if(last_index<=skip_dc - 1) return;
2492 for(i=0; i<=last_index; i++){
2493 const int j = s->intra_scantable.permutated[i];
2494 const int level = ABS(block[j]);
2496 if(skip_dc && i==0) continue;
2505 if(score >= threshold) return;
2506 for(i=skip_dc; i<=last_index; i++){
2507 const int j = s->intra_scantable.permutated[i];
2510 if(block[0]) s->block_last_index[n]= 0;
2511 else s->block_last_index[n]= -1;
2514 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index)
2517 const int maxlevel= s->max_qcoeff;
2518 const int minlevel= s->min_qcoeff;
2521 i=1; //skip clipping of intra dc
2525 for(;i<=last_index; i++){
2526 const int j= s->intra_scantable.permutated[i];
2527 int level = block[j];
2529 if (level>maxlevel) level=maxlevel;
2530 else if(level<minlevel) level=minlevel;
2537 static int pix_vcmp16x8(uint8_t *s, int stride){ //FIXME move to dsputil & optimize
2542 for(x=0; x<16; x+=4){
2543 score+= ABS(s[x ] - s[x +stride]) + ABS(s[x+1] - s[x+1+stride])
2544 +ABS(s[x+2] - s[x+2+stride]) + ABS(s[x+3] - s[x+3+stride]);
2552 static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move to dsputil & optimize
2557 for(x=0; x<16; x++){
2558 score+= ABS(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
2567 #define SQ(a) ((a)*(a))
2569 static int pix_vcmp16x8(uint8_t *s, int stride){ //FIXME move to dsputil & optimize
2574 for(x=0; x<16; x+=4){
2575 score+= SQ(s[x ] - s[x +stride]) + SQ(s[x+1] - s[x+1+stride])
2576 +SQ(s[x+2] - s[x+2+stride]) + SQ(s[x+3] - s[x+3+stride]);
2584 static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move to dsputil & optimize
2589 for(x=0; x<16; x++){
2590 score+= SQ(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
2601 #endif //CONFIG_ENCODERS
2605 * @param h is the normal height, this will be reduced automatically if needed for the last row
2607 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2608 if ( s->avctx->draw_horiz_band
2609 && (s->last_picture_ptr || s->low_delay) ) {
2610 uint8_t *src_ptr[3];
2612 h= FFMIN(h, s->height - y);
2614 if(s->pict_type==B_TYPE && s->picture_structure == PICT_FRAME)
2617 offset = y * s->linesize;
2619 if(s->pict_type==B_TYPE || s->low_delay){
2620 src_ptr[0] = s->current_picture.data[0] + offset;
2621 src_ptr[1] = s->current_picture.data[1] + (offset >> 2);
2622 src_ptr[2] = s->current_picture.data[2] + (offset >> 2);
2624 src_ptr[0] = s->last_picture.data[0] + offset;
2625 src_ptr[1] = s->last_picture.data[1] + (offset >> 2);
2626 src_ptr[2] = s->last_picture.data[2] + (offset >> 2);
2630 s->avctx->draw_horiz_band(s->avctx, src_ptr, s->linesize,
2635 #ifdef CONFIG_ENCODERS
2637 static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2639 const int mb_x= s->mb_x;
2640 const int mb_y= s->mb_y;
2643 int dct_offset = s->linesize*8; //default for progressive frames
2645 for(i=0; i<6; i++) skip_dct[i]=0;
2647 if(s->adaptive_quant){
2648 s->dquant= s->current_picture.qscale_table[mb_x + mb_y*s->mb_stride] - s->qscale;
2650 if(s->out_format==FMT_H263){
2651 if (s->dquant> 2) s->dquant= 2;
2652 else if(s->dquant<-2) s->dquant=-2;
2655 if(s->codec_id==CODEC_ID_MPEG4){
2657 if(s->mv_dir&MV_DIRECT)
2660 assert(s->dquant==0 || s->mv_type!=MV_TYPE_8X8);
2663 s->qscale+= s->dquant;
2664 s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
2665 s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
2673 wrap_y = s->linesize;
2674 ptr = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
2676 if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
2677 ff_emulated_edge_mc(s, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
2678 ptr= s->edge_emu_buffer;
2682 if(s->flags&CODEC_FLAG_INTERLACED_DCT){
2683 int progressive_score, interlaced_score;
2685 progressive_score= pix_vcmp16x8(ptr, wrap_y ) + pix_vcmp16x8(ptr + wrap_y*8, wrap_y );
2686 interlaced_score = pix_vcmp16x8(ptr, wrap_y*2) + pix_vcmp16x8(ptr + wrap_y , wrap_y*2);
2688 if(progressive_score > interlaced_score + 100){
2689 s->interlaced_dct=1;
2694 s->interlaced_dct=0;
2697 s->dsp.get_pixels(s->block[0], ptr , wrap_y);
2698 s->dsp.get_pixels(s->block[1], ptr + 8, wrap_y);
2699 s->dsp.get_pixels(s->block[2], ptr + dct_offset , wrap_y);
2700 s->dsp.get_pixels(s->block[3], ptr + dct_offset + 8, wrap_y);
2702 if(s->flags&CODEC_FLAG_GRAY){
2706 int wrap_c = s->uvlinesize;
2707 ptr = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
2709 ff_emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2710 ptr= s->edge_emu_buffer;
2712 s->dsp.get_pixels(s->block[4], ptr, wrap_c);
2714 ptr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
2716 ff_emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2717 ptr= s->edge_emu_buffer;
2719 s->dsp.get_pixels(s->block[5], ptr, wrap_c);
2722 op_pixels_func (*op_pix)[4];
2723 qpel_mc_func (*op_qpix)[16];
2724 uint8_t *dest_y, *dest_cb, *dest_cr;
2725 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2729 dest_y = s->current_picture.data[0] + (mb_y * 16 * s->linesize ) + mb_x * 16;
2730 dest_cb = s->current_picture.data[1] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
2731 dest_cr = s->current_picture.data[2] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
2732 wrap_y = s->linesize;
2733 wrap_c = s->uvlinesize;
2734 ptr_y = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
2735 ptr_cb = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
2736 ptr_cr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
2738 if ((!s->no_rounding) || s->pict_type==B_TYPE){
2739 op_pix = s->dsp.put_pixels_tab;
2740 op_qpix= s->dsp.put_qpel_pixels_tab;
2742 op_pix = s->dsp.put_no_rnd_pixels_tab;
2743 op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
2746 if (s->mv_dir & MV_DIR_FORWARD) {
2747 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2748 op_pix = s->dsp.avg_pixels_tab;
2749 op_qpix= s->dsp.avg_qpel_pixels_tab;
2751 if (s->mv_dir & MV_DIR_BACKWARD) {
2752 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2755 if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
2756 ff_emulated_edge_mc(s, ptr_y, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
2757 ptr_y= s->edge_emu_buffer;
2761 if(s->flags&CODEC_FLAG_INTERLACED_DCT){
2762 int progressive_score, interlaced_score;
2764 progressive_score= pix_diff_vcmp16x8(ptr_y , dest_y , wrap_y )
2765 + pix_diff_vcmp16x8(ptr_y + wrap_y*8, dest_y + wrap_y*8, wrap_y );
2766 interlaced_score = pix_diff_vcmp16x8(ptr_y , dest_y , wrap_y*2)
2767 + pix_diff_vcmp16x8(ptr_y + wrap_y , dest_y + wrap_y , wrap_y*2);
2769 if(progressive_score > interlaced_score + 600){
2770 s->interlaced_dct=1;
2775 s->interlaced_dct=0;
2778 s->dsp.diff_pixels(s->block[0], ptr_y , dest_y , wrap_y);
2779 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2780 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset , dest_y + dct_offset , wrap_y);
2781 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y);
2783 if(s->flags&CODEC_FLAG_GRAY){
2788 ff_emulated_edge_mc(s, ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2789 ptr_cb= s->edge_emu_buffer;
2791 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2793 ff_emulated_edge_mc(s, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2794 ptr_cr= s->edge_emu_buffer;
2796 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2798 /* pre quantization */
2799 if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){
2801 if(s->dsp.pix_abs8x8(ptr_y , dest_y , wrap_y) < 20*s->qscale) skip_dct[0]= 1;
2802 if(s->dsp.pix_abs8x8(ptr_y + 8, dest_y + 8, wrap_y) < 20*s->qscale) skip_dct[1]= 1;
2803 if(s->dsp.pix_abs8x8(ptr_y +dct_offset , dest_y +dct_offset , wrap_y) < 20*s->qscale) skip_dct[2]= 1;
2804 if(s->dsp.pix_abs8x8(ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y) < 20*s->qscale) skip_dct[3]= 1;
2805 if(s->dsp.pix_abs8x8(ptr_cb , dest_cb , wrap_c) < 20*s->qscale) skip_dct[4]= 1;
2806 if(s->dsp.pix_abs8x8(ptr_cr , dest_cr , wrap_c) < 20*s->qscale) skip_dct[5]= 1;
2812 if(skip_dct[i]) num++;
2815 if(s->mb_x==0 && s->mb_y==0){
2817 printf("%6d %1d\n", stat[i], i);
2830 adap_parm = ((s->avg_mb_var << 1) + s->mb_var[s->mb_stride*mb_y+mb_x] + 1.0) /
2831 ((s->mb_var[s->mb_stride*mb_y+mb_x] << 1) + s->avg_mb_var + 1.0);
2833 printf("\ntype=%c qscale=%2d adap=%0.2f dquant=%4.2f var=%4d avgvar=%4d",
2834 (s->mb_type[s->mb_stride*mb_y+mb_x] > 0) ? 'I' : 'P',
2835 s->qscale, adap_parm, s->qscale*adap_parm,
2836 s->mb_var[s->mb_stride*mb_y+mb_x], s->avg_mb_var);
2839 /* DCT & quantize */
2840 if(s->out_format==FMT_MJPEG){
2843 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, 8, &overflow);
2844 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
2850 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2851 // FIXME we could decide to change to quantizer instead of clipping
2852 // JS: I don't think that would be a good idea it could lower quality instead
2853 // of improve it. Just INTRADC clipping deserves changes in quantizer
2854 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
2856 s->block_last_index[i]= -1;
2858 if(s->luma_elim_threshold && !s->mb_intra)
2860 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2861 if(s->chroma_elim_threshold && !s->mb_intra)
2863 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2866 if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){
2867 s->block_last_index[4]=
2868 s->block_last_index[5]= 0;
2870 s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale;
2873 /* huffman encode */
2874 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2875 case CODEC_ID_MPEG1VIDEO:
2876 mpeg1_encode_mb(s, s->block, motion_x, motion_y); break;
2878 case CODEC_ID_MPEG4:
2879 mpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
2880 case CODEC_ID_MSMPEG4V2:
2881 case CODEC_ID_MSMPEG4V3:
2883 msmpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
2885 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); break;
2887 case CODEC_ID_H263P:
2889 h263_encode_mb(s, s->block, motion_x, motion_y); break;
2891 case CODEC_ID_MJPEG:
2892 mjpeg_encode_mb(s, s->block); break;
2898 #endif //CONFIG_ENCODERS
2901 * combines the (truncated) bitstream to a complete frame
2902 * @returns -1 if no complete frame could be created
2904 int ff_combine_frame( MpegEncContext *s, int next, uint8_t **buf, int *buf_size){
2905 ParseContext *pc= &s->parse_context;
2909 printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index);
2910 printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
2914 /* copy overreaded byes from last frame into buffer */
2915 for(; pc->overread>0; pc->overread--){
2916 pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
2919 pc->last_index= pc->index;
2921 /* copy into buffer end return */
2922 if(next == END_NOT_FOUND){
2923 pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
2925 memcpy(&pc->buffer[pc->index], *buf, *buf_size);
2926 pc->index += *buf_size;
2931 pc->overread_index= pc->index + next;
2933 /* append to buffer */
2935 pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
2937 memcpy(&pc->buffer[pc->index], *buf, next + FF_INPUT_BUFFER_PADDING_SIZE );
2942 /* store overread bytes */
2943 for(;next < 0; next++){
2944 pc->state = (pc->state<<8) | pc->buffer[pc->last_index + next];
2950 printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index);
2951 printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
2958 #ifdef CONFIG_ENCODERS
2959 void ff_copy_bits(PutBitContext *pb, uint8_t *src, int length)
2961 int bytes= length>>4;
2962 int bits= length&15;
2965 if(length==0) return;
2967 for(i=0; i<bytes; i++) put_bits(pb, 16, be2me_16(((uint16_t*)src)[i]));
2968 put_bits(pb, bits, be2me_16(((uint16_t*)src)[i])>>(16-bits));
2971 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2974 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
2977 d->mb_skip_run= s->mb_skip_run;
2979 d->last_dc[i]= s->last_dc[i];
2982 d->mv_bits= s->mv_bits;
2983 d->i_tex_bits= s->i_tex_bits;
2984 d->p_tex_bits= s->p_tex_bits;
2985 d->i_count= s->i_count;
2986 d->f_count= s->f_count;
2987 d->b_count= s->b_count;
2988 d->skip_count= s->skip_count;
2989 d->misc_bits= s->misc_bits;
2992 d->mb_skiped= s->mb_skiped;
2993 d->qscale= s->qscale;
2996 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2999 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
3000 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
3003 d->mb_skip_run= s->mb_skip_run;
3005 d->last_dc[i]= s->last_dc[i];
3008 d->mv_bits= s->mv_bits;
3009 d->i_tex_bits= s->i_tex_bits;
3010 d->p_tex_bits= s->p_tex_bits;
3011 d->i_count= s->i_count;
3012 d->f_count= s->f_count;
3013 d->b_count= s->b_count;
3014 d->skip_count= s->skip_count;
3015 d->misc_bits= s->misc_bits;
3017 d->mb_intra= s->mb_intra;
3018 d->mb_skiped= s->mb_skiped;
3019 d->mv_type= s->mv_type;
3020 d->mv_dir= s->mv_dir;
3022 if(s->data_partitioning){
3024 d->tex_pb= s->tex_pb;
3028 d->block_last_index[i]= s->block_last_index[i];
3029 d->interlaced_dct= s->interlaced_dct;
3030 d->qscale= s->qscale;
3033 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
3034 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
3035 int *dmin, int *next_block, int motion_x, int motion_y)
3039 copy_context_before_encode(s, backup, type);
3041 s->block= s->blocks[*next_block];
3042 s->pb= pb[*next_block];
3043 if(s->data_partitioning){
3044 s->pb2 = pb2 [*next_block];
3045 s->tex_pb= tex_pb[*next_block];
3048 encode_mb(s, motion_x, motion_y);
3050 bits_count= get_bit_count(&s->pb);
3051 if(s->data_partitioning){
3052 bits_count+= get_bit_count(&s->pb2);
3053 bits_count+= get_bit_count(&s->tex_pb);
3056 if(bits_count<*dmin){
3060 copy_context_after_encode(best, s, type);
3064 static inline int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
3065 uint32_t *sq = squareTbl + 256;
3070 return s->dsp.sse[0](NULL, src1, src2, stride);
3071 else if(w==8 && h==8)
3072 return s->dsp.sse[1](NULL, src1, src2, stride);
3076 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
3085 static void encode_picture(MpegEncContext *s, int picture_number)
3087 int mb_x, mb_y, pdif = 0;
3090 MpegEncContext best_s, backup_s;
3091 uint8_t bit_buf[2][3000];
3092 uint8_t bit_buf2[2][3000];
3093 uint8_t bit_buf_tex[2][3000];
3094 PutBitContext pb[2], pb2[2], tex_pb[2];
3097 init_put_bits(&pb [i], bit_buf [i], 3000, NULL, NULL);
3098 init_put_bits(&pb2 [i], bit_buf2 [i], 3000, NULL, NULL);
3099 init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000, NULL, NULL);
3102 s->picture_number = picture_number;
3104 /* Reset the average MB variance */
3105 s->current_picture.mb_var_sum = 0;
3106 s->current_picture.mc_mb_var_sum = 0;
3109 /* we need to initialize some time vars before we can encode b-frames */
3110 // RAL: Condition added for MPEG1VIDEO
3111 if (s->codec_id == CODEC_ID_MPEG1VIDEO || (s->h263_pred && !s->h263_msmpeg4))
3112 ff_set_mpeg4_time(s, s->picture_number);
3115 s->scene_change_score=0;
3117 s->qscale= (int)(s->frame_qscale + 0.5); //FIXME qscale / ... stuff for ME ratedistoration
3119 if(s->pict_type==I_TYPE){
3120 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3121 else s->no_rounding=0;
3122 }else if(s->pict_type!=B_TYPE){
3123 if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
3124 s->no_rounding ^= 1;
3127 /* Estimate motion for every MB */
3128 s->mb_intra=0; //for the rate distoration & bit compare functions
3129 if(s->pict_type != I_TYPE){
3130 if(s->pict_type != B_TYPE){
3131 if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){
3133 s->me.dia_size= s->avctx->pre_dia_size;
3135 for(mb_y=s->mb_height-1; mb_y >=0 ; mb_y--) {
3136 for(mb_x=s->mb_width-1; mb_x >=0 ; mb_x--) {
3139 ff_pre_estimate_p_frame_motion(s, mb_x, mb_y);
3146 s->me.dia_size= s->avctx->dia_size;
3147 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3148 s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
3149 s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
3150 s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
3151 s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
3152 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3155 s->block_index[0]+=2;
3156 s->block_index[1]+=2;
3157 s->block_index[2]+=2;
3158 s->block_index[3]+=2;
3160 /* compute motion vector & mb_type and store in context */
3161 if(s->pict_type==B_TYPE)
3162 ff_estimate_b_frame_motion(s, mb_x, mb_y);
3164 ff_estimate_p_frame_motion(s, mb_x, mb_y);
3167 }else /* if(s->pict_type == I_TYPE) */{
3169 //FIXME do we need to zero them?
3170 memset(s->motion_val[0], 0, sizeof(int16_t)*(s->mb_width*2 + 2)*(s->mb_height*2 + 2)*2);
3171 memset(s->p_mv_table , 0, sizeof(int16_t)*(s->mb_stride)*s->mb_height*2);
3172 memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height);
3174 if(!s->fixed_qscale){
3175 /* finding spatial complexity for I-frame rate control */
3176 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3177 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3180 uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
3182 int sum = s->dsp.pix_sum(pix, s->linesize);
3184 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8;
3186 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
3187 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
3188 s->current_picture.mb_var_sum += varc;
3195 if(s->scene_change_score > 0 && s->pict_type == P_TYPE){
3196 s->pict_type= I_TYPE;
3197 memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height);
3198 //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3202 if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) {
3203 s->f_code= ff_get_best_fcode(s, s->p_mv_table, MB_TYPE_INTER);
3205 ff_fix_long_p_mvs(s);
3208 if(s->pict_type==B_TYPE){
3211 a = ff_get_best_fcode(s, s->b_forw_mv_table, MB_TYPE_FORWARD);
3212 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, MB_TYPE_BIDIR);
3213 s->f_code = FFMAX(a, b);
3215 a = ff_get_best_fcode(s, s->b_back_mv_table, MB_TYPE_BACKWARD);
3216 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, MB_TYPE_BIDIR);
3217 s->b_code = FFMAX(a, b);
3219 ff_fix_long_b_mvs(s, s->b_forw_mv_table, s->f_code, MB_TYPE_FORWARD);
3220 ff_fix_long_b_mvs(s, s->b_back_mv_table, s->b_code, MB_TYPE_BACKWARD);
3221 ff_fix_long_b_mvs(s, s->b_bidir_forw_mv_table, s->f_code, MB_TYPE_BIDIR);
3222 ff_fix_long_b_mvs(s, s->b_bidir_back_mv_table, s->b_code, MB_TYPE_BIDIR);
3226 if (s->fixed_qscale)
3227 s->frame_qscale = s->current_picture.quality;
3229 s->frame_qscale = ff_rate_estimate_qscale(s);
3231 if(s->adaptive_quant){
3233 switch(s->codec_id){
3234 case CODEC_ID_MPEG4:
3235 ff_clean_mpeg4_qscales(s);
3238 case CODEC_ID_H263P:
3239 ff_clean_h263_qscales(s);
3244 s->qscale= s->current_picture.qscale_table[0];
3246 s->qscale= (int)(s->frame_qscale + 0.5);
3248 if (s->out_format == FMT_MJPEG) {
3249 /* for mjpeg, we do include qscale in the matrix */
3250 s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0];
3252 int j= s->dsp.idct_permutation[i];
3254 s->intra_matrix[j] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3256 convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3257 s->q_intra_matrix16_bias, s->intra_matrix, s->intra_quant_bias, 8, 8);
3260 //FIXME var duplication
3261 s->current_picture.key_frame= s->pict_type == I_TYPE;
3262 s->current_picture.pict_type= s->pict_type;
3264 if(s->current_picture.key_frame)
3265 s->picture_in_gop_number=0;
3267 s->last_bits= get_bit_count(&s->pb);
3268 switch(s->out_format) {
3270 mjpeg_picture_header(s);
3274 if (s->codec_id == CODEC_ID_WMV2)
3275 ff_wmv2_encode_picture_header(s, picture_number);
3276 else if (s->h263_msmpeg4)
3277 msmpeg4_encode_picture_header(s, picture_number);
3278 else if (s->h263_pred)
3279 mpeg4_encode_picture_header(s, picture_number);
3280 else if (s->h263_rv10)
3281 rv10_encode_picture_header(s, picture_number);
3283 h263_encode_picture_header(s, picture_number);
3287 mpeg1_encode_picture_header(s, picture_number);
3290 bits= get_bit_count(&s->pb);
3291 s->header_bits= bits - s->last_bits;
3303 /* init last dc values */
3304 /* note: quant matrix value (8) is implied here */
3305 s->last_dc[i] = 128;
3307 s->current_picture_ptr->error[i] = 0;
3310 s->last_mv[0][0][0] = 0;
3311 s->last_mv[0][0][1] = 0;
3312 s->last_mv[1][0][0] = 0;
3313 s->last_mv[1][0][1] = 0;
3318 if (s->codec_id==CODEC_ID_H263 || s->codec_id==CODEC_ID_H263P)
3319 s->gob_index = ff_h263_get_gob_height(s);
3321 if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame)
3322 ff_mpeg4_init_partitions(s);
3327 s->first_slice_line = 1;
3328 s->ptr_lastgob = s->pb.buf;
3329 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3330 s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
3331 s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
3333 s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
3334 s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
3335 s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
3336 s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
3337 s->block_index[4]= s->block_wrap[4]*(mb_y + 1) + s->block_wrap[0]*(s->mb_height*2 + 2);
3338 s->block_index[5]= s->block_wrap[4]*(mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2);
3339 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3340 const int xy= mb_y*s->mb_stride + mb_x;
3341 int mb_type= s->mb_type[xy];
3347 s->block_index[0]+=2;
3348 s->block_index[1]+=2;
3349 s->block_index[2]+=2;
3350 s->block_index[3]+=2;
3351 s->block_index[4]++;
3352 s->block_index[5]++;
3354 /* write gob / video packet header */
3357 int current_packet_size, is_gob_start;
3359 current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob;
3362 if(s->codec_id==CODEC_ID_MPEG4){
3363 if(current_packet_size >= s->rtp_payload_size
3364 && s->mb_y + s->mb_x>0){
3366 if(s->partitioned_frame){
3367 ff_mpeg4_merge_partitions(s);
3368 ff_mpeg4_init_partitions(s);
3370 ff_mpeg4_encode_video_packet_header(s);
3372 if(s->flags&CODEC_FLAG_PASS1){
3373 int bits= get_bit_count(&s->pb);
3374 s->misc_bits+= bits - s->last_bits;
3377 ff_mpeg4_clean_buffers(s);
3380 }else if(s->codec_id==CODEC_ID_MPEG1VIDEO){
3381 if( current_packet_size >= s->rtp_payload_size
3382 && s->mb_y + s->mb_x>0 && s->mb_skip_run==0){
3383 ff_mpeg1_encode_slice_header(s);
3384 ff_mpeg1_clean_buffers(s);
3388 if(current_packet_size >= s->rtp_payload_size
3389 && s->mb_x==0 && s->mb_y>0 && s->mb_y%s->gob_index==0){
3391 h263_encode_gob_header(s, mb_y);
3397 s->ptr_lastgob = pbBufPtr(&s->pb);
3398 s->first_slice_line=1;
3399 s->resync_mb_x=mb_x;
3400 s->resync_mb_y=mb_y;
3405 if( (s->resync_mb_x == s->mb_x)
3406 && s->resync_mb_y+1 == s->mb_y){
3407 s->first_slice_line=0;
3410 if(mb_type & (mb_type-1)){ // more than 1 MB type possible
3412 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3414 copy_context_before_encode(&backup_s, s, -1);
3416 best_s.data_partitioning= s->data_partitioning;
3417 best_s.partitioned_frame= s->partitioned_frame;
3418 if(s->data_partitioning){
3419 backup_s.pb2= s->pb2;
3420 backup_s.tex_pb= s->tex_pb;
3423 if(mb_type&MB_TYPE_INTER){
3424 s->mv_dir = MV_DIR_FORWARD;
3425 s->mv_type = MV_TYPE_16X16;
3427 s->mv[0][0][0] = s->p_mv_table[xy][0];
3428 s->mv[0][0][1] = s->p_mv_table[xy][1];
3429 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER, pb, pb2, tex_pb,
3430 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3432 if(mb_type&MB_TYPE_INTER4V){
3433 s->mv_dir = MV_DIR_FORWARD;
3434 s->mv_type = MV_TYPE_8X8;
3437 s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
3438 s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
3440 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER4V, pb, pb2, tex_pb,
3441 &dmin, &next_block, 0, 0);
3443 if(mb_type&MB_TYPE_FORWARD){
3444 s->mv_dir = MV_DIR_FORWARD;
3445 s->mv_type = MV_TYPE_16X16;
3447 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3448 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3449 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_FORWARD, pb, pb2, tex_pb,
3450 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3452 if(mb_type&MB_TYPE_BACKWARD){
3453 s->mv_dir = MV_DIR_BACKWARD;
3454 s->mv_type = MV_TYPE_16X16;
3456 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3457 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3458 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3459 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3461 if(mb_type&MB_TYPE_BIDIR){
3462 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3463 s->mv_type = MV_TYPE_16X16;
3465 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3466 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3467 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3468 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3469 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BIDIR, pb, pb2, tex_pb,
3470 &dmin, &next_block, 0, 0);
3472 if(mb_type&MB_TYPE_DIRECT){
3473 int mx= s->b_direct_mv_table[xy][0];
3474 int my= s->b_direct_mv_table[xy][1];
3476 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3479 ff_mpeg4_set_direct_mv(s, mx, my);
3481 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_DIRECT, pb, pb2, tex_pb,
3482 &dmin, &next_block, mx, my);
3484 if(mb_type&MB_TYPE_INTRA){
3486 s->mv_type = MV_TYPE_16X16;
3490 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTRA, pb, pb2, tex_pb,
3491 &dmin, &next_block, 0, 0);
3492 /* force cleaning of ac/dc pred stuff if needed ... */
3493 if(s->h263_pred || s->h263_aic)
3494 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3496 copy_context_after_encode(s, &best_s, -1);
3498 pb_bits_count= get_bit_count(&s->pb);
3499 flush_put_bits(&s->pb);
3500 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3503 if(s->data_partitioning){
3504 pb2_bits_count= get_bit_count(&s->pb2);
3505 flush_put_bits(&s->pb2);
3506 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3507 s->pb2= backup_s.pb2;
3509 tex_pb_bits_count= get_bit_count(&s->tex_pb);
3510 flush_put_bits(&s->tex_pb);
3511 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3512 s->tex_pb= backup_s.tex_pb;
3514 s->last_bits= get_bit_count(&s->pb);
3516 int motion_x, motion_y;
3518 int inter_score= s->current_picture.mb_cmp_score[mb_x + mb_y*s->mb_stride];
3520 if(!(s->flags&CODEC_FLAG_HQ) && s->pict_type==P_TYPE){
3521 /* get luma score */
3522 if((s->avctx->mb_cmp&0xFF)==FF_CMP_SSE){
3523 intra_score= (s->current_picture.mb_var[mb_x + mb_y*s->mb_stride]<<8) - 500; //FIXME dont scale it down so we dont have to fix it
3527 int mean= s->current_picture.mb_mean[mb_x + mb_y*s->mb_stride]; //FIXME
3530 dest_y = s->new_picture.data[0] + (mb_y * 16 * s->linesize ) + mb_x * 16;
3532 for(i=0; i<16; i++){
3533 *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 0]) = mean;
3534 *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 4]) = mean;
3535 *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 8]) = mean;
3536 *(uint32_t*)(&s->me.scratchpad[i*s->linesize+12]) = mean;
3540 intra_score= s->dsp.mb_cmp[0](s, s->me.scratchpad, dest_y, s->linesize);
3542 /* printf("intra:%7d inter:%7d var:%7d mc_var.%7d\n", intra_score>>8, inter_score>>8,
3543 s->current_picture.mb_var[mb_x + mb_y*s->mb_stride],
3544 s->current_picture.mc_mb_var[mb_x + mb_y*s->mb_stride]);*/
3547 /* get chroma score */
3548 if(s->avctx->mb_cmp&FF_CMP_CHROMA){
3556 if(s->out_format == FMT_H263){
3557 mean= (s->dc_val[i][mb_x + (mb_y+1)*(s->mb_width+2)] + 4)>>3; //FIXME not exact but simple ;)
3559 mean= (s->last_dc[i] + 4)>>3;
3561 dest_c = s->new_picture.data[i] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
3565 *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 0]) = mean;
3566 *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 4]) = mean;
3569 intra_score+= s->dsp.mb_cmp[1](s, s->me.scratchpad, dest_c, s->uvlinesize);
3574 switch(s->avctx->mb_cmp&0xFF){
3577 intra_score+= 32*s->qscale;
3580 intra_score+= 24*s->qscale*s->qscale;
3583 intra_score+= 96*s->qscale;
3586 intra_score+= 48*s->qscale;
3593 intra_score+= (s->qscale*s->qscale*109*8 + 64)>>7;
3597 if(intra_score < inter_score)
3598 mb_type= MB_TYPE_INTRA;
3601 s->mv_type=MV_TYPE_16X16;
3602 // only one MB-Type possible
3608 motion_x= s->mv[0][0][0] = 0;
3609 motion_y= s->mv[0][0][1] = 0;
3612 s->mv_dir = MV_DIR_FORWARD;
3614 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3615 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3617 case MB_TYPE_INTER4V:
3618 s->mv_dir = MV_DIR_FORWARD;
3619 s->mv_type = MV_TYPE_8X8;
3622 s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
3623 s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
3625 motion_x= motion_y= 0;
3627 case MB_TYPE_DIRECT:
3628 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3630 motion_x=s->b_direct_mv_table[xy][0];
3631 motion_y=s->b_direct_mv_table[xy][1];
3633 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3637 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3641 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3642 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3643 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3644 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3646 case MB_TYPE_BACKWARD:
3647 s->mv_dir = MV_DIR_BACKWARD;
3649 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3650 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3652 case MB_TYPE_FORWARD:
3653 s->mv_dir = MV_DIR_FORWARD;
3655 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3656 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3657 // printf(" %d %d ", motion_x, motion_y);
3660 motion_x=motion_y=0; //gcc warning fix
3661 printf("illegal MB type\n");
3664 encode_mb(s, motion_x, motion_y);
3666 // RAL: Update last macrobloc type
3667 s->last_mv_dir = s->mv_dir;
3670 /* clean the MV table in IPS frames for direct mode in B frames */
3671 if(s->mb_intra /* && I,P,S_TYPE */){
3672 s->p_mv_table[xy][0]=0;
3673 s->p_mv_table[xy][1]=0;
3676 MPV_decode_mb(s, s->block);
3678 if(s->flags&CODEC_FLAG_PSNR){
3682 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3683 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3685 s->current_picture_ptr->error[0] += sse(
3687 s->new_picture .data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3688 s->current_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3690 s->current_picture_ptr->error[1] += sse(
3692 s->new_picture .data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
3693 s->current_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
3694 w>>1, h>>1, s->uvlinesize);
3695 s->current_picture_ptr->error[2] += sse(
3697 s->new_picture .data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
3698 s->current_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
3699 w>>1, h>>1, s->uvlinesize);
3701 //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, get_bit_count(&s->pb));
3707 if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame)
3708 ff_mpeg4_merge_partitions(s);
3710 if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE)
3711 msmpeg4_encode_ext_header(s);
3713 if(s->codec_id==CODEC_ID_MPEG4)
3714 ff_mpeg4_stuffing(&s->pb);
3717 //if (s->gob_number)
3718 // fprintf(stderr,"\nNumber of GOB: %d", s->gob_number);
3720 /* Send the last GOB if RTP */
3722 flush_put_bits(&s->pb);
3723 pdif = pbBufPtr(&s->pb) - s->ptr_lastgob;
3724 /* Call the RTP callback to send the last GOB */
3725 if (s->rtp_callback)
3726 s->rtp_callback(s->ptr_lastgob, pdif, s->gob_number);
3727 s->ptr_lastgob = pbBufPtr(&s->pb);
3728 //fprintf(stderr,"\nGOB: %2d size: %d (last)", s->gob_number, pdif);
3732 static int dct_quantize_trellis_c(MpegEncContext *s,
3733 DCTELEM *block, int n,
3734 int qscale, int *overflow){
3736 const uint8_t *scantable= s->intra_scantable.scantable;
3738 unsigned int threshold1, threshold2;
3748 int coeff_count[64];
3749 int lambda, qmul, qadd, start_i, last_non_zero, i;
3750 const int esc_length= s->ac_esc_length;
3752 uint8_t * last_length;
3756 s->dsp.fdct (block);
3759 qadd= ((qscale-1)|1)*8;
3770 /* For AIC we skip quant/dequant of INTRADC */
3775 /* note: block[0] is assumed to be positive */
3776 block[0] = (block[0] + (q >> 1)) / q;
3779 qmat = s->q_intra_matrix[qscale];
3780 if(s->mpeg_quant || s->codec_id== CODEC_ID_MPEG1VIDEO)
3781 bias= 1<<(QMAT_SHIFT-1);
3782 length = s->intra_ac_vlc_length;
3783 last_length= s->intra_ac_vlc_last_length;
3787 qmat = s->q_inter_matrix[qscale];
3788 length = s->inter_ac_vlc_length;
3789 last_length= s->inter_ac_vlc_last_length;
3792 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3793 threshold2= (threshold1<<1);
3795 for(i=start_i; i<64; i++) {
3796 const int j = scantable[i];
3797 const int k= i-start_i;
3798 int level = block[j];
3799 level = level * qmat[j];
3801 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3802 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3803 if(((unsigned)(level+threshold1))>threshold2){
3805 level= (bias + level)>>QMAT_SHIFT;
3807 coeff[1][k]= level-1;
3808 // coeff[2][k]= level-2;
3810 level= (bias - level)>>QMAT_SHIFT;
3811 coeff[0][k]= -level;
3812 coeff[1][k]= -level+1;
3813 // coeff[2][k]= -level+2;
3815 coeff_count[k]= FFMIN(level, 2);
3819 coeff[0][k]= (level>>31)|1;
3824 *overflow= s->max_qcoeff < max; //overflow might have happend
3826 if(last_non_zero < start_i){
3827 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
3828 return last_non_zero;
3831 lambda= (qscale*qscale*64*105 + 64)>>7; //FIXME finetune
3834 for(i=0; i<=last_non_zero - start_i; i++){
3835 int level_index, run, j;
3836 const int dct_coeff= block[ scantable[i + start_i] ];
3837 const int zero_distoration= dct_coeff*dct_coeff;
3838 int best_score=256*256*256*120;
3840 last_score += zero_distoration;
3841 for(level_index=0; level_index < coeff_count[i]; level_index++){
3843 int level= coeff[level_index][i];
3848 if(s->out_format == FMT_H263){
3850 unquant_coeff= level*qmul + qadd;
3852 unquant_coeff= level*qmul - qadd;
3855 j= s->dsp.idct_permutation[ scantable[i + start_i] ]; //FIXME optimize
3858 unquant_coeff = (int)((-level) * qscale * s->intra_matrix[j]) >> 3;
3859 unquant_coeff = -((unquant_coeff - 1) | 1);
3861 unquant_coeff = (int)( level * qscale * s->intra_matrix[j]) >> 3;
3862 unquant_coeff = (unquant_coeff - 1) | 1;
3866 unquant_coeff = ((((-level) << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3867 unquant_coeff = -((unquant_coeff - 1) | 1);
3869 unquant_coeff = ((( level << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3870 unquant_coeff = (unquant_coeff - 1) | 1;
3876 distoration= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff);
3878 if((level&(~127)) == 0){
3879 for(run=0; run<=i - left_limit; run++){
3880 int score= distoration + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3881 score += score_tab[i-run];
3883 if(score < best_score){
3885 score_tab[i+1]= score;
3887 level_tab[i+1]= level-64;
3891 if(s->out_format == FMT_H263){
3892 for(run=0; run<=i - left_limit; run++){
3893 int score= distoration + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3894 score += score_tab[i-run];
3895 if(score < last_score){
3898 last_level= level-64;
3904 distoration += esc_length*lambda;
3905 for(run=0; run<=i - left_limit; run++){
3906 int score= distoration + score_tab[i-run];
3908 if(score < best_score){
3910 score_tab[i+1]= score;
3912 level_tab[i+1]= level-64;
3916 if(s->out_format == FMT_H263){
3917 for(run=0; run<=i - left_limit; run++){
3918 int score= distoration + score_tab[i-run];
3919 if(score < last_score){
3922 last_level= level-64;
3930 for(j=left_limit; j<=i; j++){
3931 score_tab[j] += zero_distoration;
3933 score_limit+= zero_distoration;
3934 if(score_tab[i+1] < score_limit)
3935 score_limit= score_tab[i+1];
3937 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3938 while(score_tab[ left_limit ] > score_limit + lambda) left_limit++;
3941 //FIXME add some cbp penalty
3943 if(s->out_format != FMT_H263){
3944 last_score= 256*256*256*120;
3945 for(i= left_limit; i<=last_non_zero - start_i + 1; i++){
3946 int score= score_tab[i];
3947 if(i) score += lambda*2; //FIXME exacter?
3949 if(score < last_score){
3952 last_level= level_tab[i];
3953 last_run= run_tab[i];
3958 last_non_zero= last_i - 1 + start_i;
3959 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
3961 if(last_non_zero < start_i)
3962 return last_non_zero;
3966 //FIXME use permutated scantable
3967 block[ s->dsp.idct_permutation[ scantable[last_non_zero] ] ]= last_level;
3970 for(;i>0 ; i -= run_tab[i] + 1){
3971 const int j= s->dsp.idct_permutation[ scantable[i - 1 + start_i] ];
3973 block[j]= level_tab[i];
3977 return last_non_zero;
3980 static int dct_quantize_c(MpegEncContext *s,
3981 DCTELEM *block, int n,
3982 int qscale, int *overflow)
3984 int i, j, level, last_non_zero, q;
3986 const uint8_t *scantable= s->intra_scantable.scantable;
3989 unsigned int threshold1, threshold2;
3991 s->dsp.fdct (block);
4001 /* For AIC we skip quant/dequant of INTRADC */
4004 /* note: block[0] is assumed to be positive */
4005 block[0] = (block[0] + (q >> 1)) / q;
4008 qmat = s->q_intra_matrix[qscale];
4009 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4013 qmat = s->q_inter_matrix[qscale];
4014 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4016 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4017 threshold2= (threshold1<<1);
4022 level = level * qmat[j];
4024 // if( bias+level >= (1<<QMAT_SHIFT)
4025 // || bias-level >= (1<<QMAT_SHIFT)){
4026 if(((unsigned)(level+threshold1))>threshold2){
4028 level= (bias + level)>>QMAT_SHIFT;
4031 level= (bias - level)>>QMAT_SHIFT;
4040 *overflow= s->max_qcoeff < max; //overflow might have happend
4042 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4043 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4044 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4046 return last_non_zero;
4049 #endif //CONFIG_ENCODERS
4051 static void dct_unquantize_mpeg1_c(MpegEncContext *s,
4052 DCTELEM *block, int n, int qscale)
4054 int i, level, nCoeffs;
4055 const uint16_t *quant_matrix;
4057 nCoeffs= s->block_last_index[n];
4061 block[0] = block[0] * s->y_dc_scale;
4063 block[0] = block[0] * s->c_dc_scale;
4064 /* XXX: only mpeg1 */
4065 quant_matrix = s->intra_matrix;
4066 for(i=1;i<=nCoeffs;i++) {
4067 int j= s->intra_scantable.permutated[i];
4072 level = (int)(level * qscale * quant_matrix[j]) >> 3;
4073 level = (level - 1) | 1;
4076 level = (int)(level * qscale * quant_matrix[j]) >> 3;
4077 level = (level - 1) | 1;
4080 if (level < -2048 || level > 2047)
4081 fprintf(stderr, "unquant error %d %d\n", i, level);
4088 quant_matrix = s->inter_matrix;
4089 for(;i<=nCoeffs;i++) {
4090 int j= s->intra_scantable.permutated[i];
4095 level = (((level << 1) + 1) * qscale *
4096 ((int) (quant_matrix[j]))) >> 4;
4097 level = (level - 1) | 1;
4100 level = (((level << 1) + 1) * qscale *
4101 ((int) (quant_matrix[j]))) >> 4;
4102 level = (level - 1) | 1;
4105 if (level < -2048 || level > 2047)
4106 fprintf(stderr, "unquant error %d %d\n", i, level);
4114 static void dct_unquantize_mpeg2_c(MpegEncContext *s,
4115 DCTELEM *block, int n, int qscale)
4117 int i, level, nCoeffs;
4118 const uint16_t *quant_matrix;
4120 if(s->alternate_scan) nCoeffs= 63;
4121 else nCoeffs= s->block_last_index[n];
4125 block[0] = block[0] * s->y_dc_scale;
4127 block[0] = block[0] * s->c_dc_scale;
4128 quant_matrix = s->intra_matrix;
4129 for(i=1;i<=nCoeffs;i++) {
4130 int j= s->intra_scantable.permutated[i];
4135 level = (int)(level * qscale * quant_matrix[j]) >> 3;
4138 level = (int)(level * qscale * quant_matrix[j]) >> 3;
4141 if (level < -2048 || level > 2047)
4142 fprintf(stderr, "unquant error %d %d\n", i, level);
4150 quant_matrix = s->inter_matrix;
4151 for(;i<=nCoeffs;i++) {
4152 int j= s->intra_scantable.permutated[i];
4157 level = (((level << 1) + 1) * qscale *
4158 ((int) (quant_matrix[j]))) >> 4;
4161 level = (((level << 1) + 1) * qscale *
4162 ((int) (quant_matrix[j]))) >> 4;
4165 if (level < -2048 || level > 2047)
4166 fprintf(stderr, "unquant error %d %d\n", i, level);
4177 static void dct_unquantize_h263_c(MpegEncContext *s,
4178 DCTELEM *block, int n, int qscale)
4180 int i, level, qmul, qadd;
4183 assert(s->block_last_index[n]>=0);
4185 qadd = (qscale - 1) | 1;
4191 block[0] = block[0] * s->y_dc_scale;
4193 block[0] = block[0] * s->c_dc_scale;
4197 nCoeffs= 63; //does not allways use zigzag table
4200 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
4203 for(;i<=nCoeffs;i++) {
4207 level = level * qmul - qadd;
4209 level = level * qmul + qadd;
4212 if (level < -2048 || level > 2047)
4213 fprintf(stderr, "unquant error %d %d\n", i, level);
4221 static const AVOption mpeg4_options[] =
4223 AVOPTION_CODEC_INT("bitrate", "desired video bitrate", bit_rate, 4, 240000000, 800000),
4224 AVOPTION_CODEC_FLAG("vhq", "very high quality", flags, CODEC_FLAG_HQ, 0),
4225 AVOPTION_CODEC_INT("ratetol", "number of bits the bitstream is allowed to diverge from the reference"
4226 "the reference can be CBR (for CBR pass1) or VBR (for pass2)",
4227 bit_rate_tolerance, 4, 240000000, 8000),
4228 AVOPTION_CODEC_INT("qmin", "minimum quantizer", qmin, 1, 31, 2),
4229 AVOPTION_CODEC_INT("qmax", "maximum quantizer", qmax, 1, 31, 31),
4230 AVOPTION_CODEC_STRING("rc_eq", "rate control equation",
4231 rc_eq, "tex^qComp,option1,options2", 0),
4232 AVOPTION_CODEC_INT("rc_minrate", "rate control minimum bitrate",
4233 rc_min_rate, 4, 24000000, 0),
4234 AVOPTION_CODEC_INT("rc_maxrate", "rate control maximum bitrate",
4235 rc_max_rate, 4, 24000000, 0),
4236 AVOPTION_CODEC_DOUBLE("rc_buf_aggresivity", "rate control buffer aggresivity",
4237 rc_buffer_aggressivity, 4, 24000000, 0),
4238 AVOPTION_CODEC_DOUBLE("rc_initial_cplx", "initial complexity for pass1 ratecontrol",
4239 rc_initial_cplx, 0., 9999999., 0),
4240 AVOPTION_CODEC_DOUBLE("i_quant_factor", "qscale factor between p and i frames",
4241 i_quant_factor, 0., 0., 0),
4242 AVOPTION_CODEC_DOUBLE("i_quant_offset", "qscale offset between p and i frames",
4243 i_quant_factor, -999999., 999999., 0),
4244 AVOPTION_CODEC_INT("dct_algo", "dct alghorithm",
4245 dct_algo, 0, 5, 0), // fixme - "Auto,FastInt,Int,MMX,MLib,Altivec"
4246 AVOPTION_CODEC_DOUBLE("lumi_masking", "luminance masking",
4247 lumi_masking, 0., 999999., 0),
4248 AVOPTION_CODEC_DOUBLE("temporal_cplx_masking", "temporary complexity masking",
4249 temporal_cplx_masking, 0., 999999., 0),
4250 AVOPTION_CODEC_DOUBLE("spatial_cplx_masking", "spatial complexity masking",
4251 spatial_cplx_masking, 0., 999999., 0),
4252 AVOPTION_CODEC_DOUBLE("p_masking", "p block masking",
4253 p_masking, 0., 999999., 0),
4254 AVOPTION_CODEC_DOUBLE("dark_masking", "darkness masking",
4255 dark_masking, 0., 999999., 0),
4256 AVOPTION_CODEC_INT("idct_algo", "idct alghorithm",
4257 idct_algo, 0, 8, 0), // fixme - "Auto,Int,Simple,SimpleMMX,LibMPEG2MMX,PS2,MLib,ARM,Altivec"
4259 AVOPTION_CODEC_INT("mb_qmin", "minimum MB quantizer",
4261 AVOPTION_CODEC_INT("mb_qmax", "maximum MB quantizer",
4264 AVOPTION_CODEC_INT("me_cmp", "ME compare function",
4265 me_cmp, 0, 24000000, 0),
4266 AVOPTION_CODEC_INT("me_sub_cmp", "subpixel ME compare function",
4267 me_sub_cmp, 0, 24000000, 0),
4270 AVOPTION_CODEC_INT("dia_size", "ME diamond size & shape",
4271 dia_size, 0, 24000000, 0),
4272 AVOPTION_CODEC_INT("last_predictor_count", "amount of previous MV predictors",
4273 last_predictor_count, 0, 24000000, 0),
4275 AVOPTION_CODEC_INT("pre_me", "pre pass for ME",
4276 pre_me, 0, 24000000, 0),
4277 AVOPTION_CODEC_INT("me_pre_cmp", "ME pre pass compare function",
4278 me_pre_cmp, 0, 24000000, 0),
4280 AVOPTION_CODEC_INT("me_range", "maximum ME search range",
4281 me_range, 0, 24000000, 0),
4282 AVOPTION_CODEC_INT("pre_dia_size", "ME pre pass diamod size & shape",
4283 pre_dia_size, 0, 24000000, 0),
4284 AVOPTION_CODEC_INT("me_subpel_quality", "subpel ME quality",
4285 me_subpel_quality, 0, 24000000, 0),
4286 AVOPTION_CODEC_INT("me_range", "maximum ME search range",
4287 me_range, 0, 24000000, 0),
4288 AVOPTION_CODEC_FLAG("psnr", "calculate PSNR of compressed frames",
4289 flags, CODEC_FLAG_PSNR, 0),
4290 AVOPTION_CODEC_RCOVERRIDE("rc_override", "ratecontrol override (=startframe,endframe,qscale,quality_factor)",
4292 AVOPTION_SUB(avoptions_common),
4296 #ifdef CONFIG_ENCODERS
4298 AVCodec mpeg1video_encoder = {
4301 CODEC_ID_MPEG1VIDEO,
4302 sizeof(MpegEncContext),
4310 AVCodec h263_encoder = {
4314 sizeof(MpegEncContext),
4320 AVCodec h263p_encoder = {
4324 sizeof(MpegEncContext),
4330 AVCodec rv10_encoder = {
4334 sizeof(MpegEncContext),
4340 AVCodec mpeg4_encoder = {
4344 sizeof(MpegEncContext),
4348 .options = mpeg4_options,
4351 AVCodec msmpeg4v1_encoder = {
4355 sizeof(MpegEncContext),
4359 .options = mpeg4_options,
4362 AVCodec msmpeg4v2_encoder = {
4366 sizeof(MpegEncContext),
4370 .options = mpeg4_options,
4373 AVCodec msmpeg4v3_encoder = {
4377 sizeof(MpegEncContext),
4381 .options = mpeg4_options,
4384 AVCodec wmv1_encoder = {
4388 sizeof(MpegEncContext),
4392 .options = mpeg4_options,
4397 AVCodec mjpeg_encoder = {
4401 sizeof(MpegEncContext),
4407 #endif //CONFIG_ENCODERS