2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard.
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
24 * The simplest mpeg encoder (well, it was the simplest!).
31 #include "mpegvideo.h"
34 #include "fastmemcpy.h"
40 #ifdef CONFIG_ENCODERS
41 static void encode_picture(MpegEncContext *s, int picture_number);
42 #endif //CONFIG_ENCODERS
43 static void dct_unquantize_mpeg1_c(MpegEncContext *s,
44 DCTELEM *block, int n, int qscale);
45 static void dct_unquantize_mpeg2_c(MpegEncContext *s,
46 DCTELEM *block, int n, int qscale);
47 static void dct_unquantize_h263_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w);
50 #ifdef CONFIG_ENCODERS
51 static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
52 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
53 #endif //CONFIG_ENCODERS
55 void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_edges_c;
58 /* enable all paranoid tests for rounding, overflows, etc... */
64 /* for jpeg fast DCT */
67 static const uint16_t aanscales[64] = {
68 /* precomputed values scaled up by 14 bits */
69 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
70 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270,
71 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906,
72 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315,
73 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
74 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552,
75 8867 , 12299, 11585, 10426, 8867, 6967, 4799, 2446,
76 4520 , 6270, 5906, 5315, 4520, 3552, 2446, 1247
79 static const uint8_t h263_chroma_roundtab[16] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
84 #ifdef CONFIG_ENCODERS
85 static uint8_t (*default_mv_penalty)[MAX_MV*2+1]=NULL;
86 static uint8_t default_fcode_tab[MAX_MV*2+1];
88 enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1};
90 static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16)[64], uint16_t (*qmat16_bias)[64],
91 const uint16_t *quant_matrix, int bias, int qmin, int qmax)
95 for(qscale=qmin; qscale<=qmax; qscale++){
97 if (s->dsp.fdct == ff_jpeg_fdct_islow) {
99 const int j= s->dsp.idct_permutation[i];
100 /* 16 <= qscale * quant_matrix[i] <= 7905 */
101 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
102 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
103 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
105 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) /
106 (qscale * quant_matrix[j]));
108 } else if (s->dsp.fdct == fdct_ifast) {
110 const int j= s->dsp.idct_permutation[i];
111 /* 16 <= qscale * quant_matrix[i] <= 7905 */
112 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
113 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
114 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
116 qmat[qscale][i] = (int)((uint64_t_C(1) << (QMAT_SHIFT + 14)) /
117 (aanscales[i] * qscale * quant_matrix[j]));
121 const int j= s->dsp.idct_permutation[i];
122 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
123 So 16 <= qscale * quant_matrix[i] <= 7905
124 so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
125 so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67
127 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j]));
128 // qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
129 qmat16[qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]);
131 if(qmat16[qscale][i]==0 || qmat16[qscale][i]==128*256) qmat16[qscale][i]=128*256-1;
132 qmat16_bias[qscale][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][i]);
137 #endif //CONFIG_ENCODERS
139 void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable){
143 st->scantable= src_scantable;
147 j = src_scantable[i];
148 st->permutated[i] = permutation[j];
157 j = st->permutated[i];
159 st->raster_end[i]= end;
163 /* init common dct for both encoder and decoder */
164 int DCT_common_init(MpegEncContext *s)
166 s->dct_unquantize_h263 = dct_unquantize_h263_c;
167 s->dct_unquantize_mpeg1 = dct_unquantize_mpeg1_c;
168 s->dct_unquantize_mpeg2 = dct_unquantize_mpeg2_c;
170 #ifdef CONFIG_ENCODERS
171 s->dct_quantize= dct_quantize_c;
175 MPV_common_init_mmx(s);
178 MPV_common_init_axp(s);
181 MPV_common_init_mlib(s);
184 MPV_common_init_mmi(s);
187 MPV_common_init_armv4l(s);
190 MPV_common_init_ppc(s);
193 #ifdef CONFIG_ENCODERS
194 s->fast_dct_quantize= s->dct_quantize;
196 if(s->flags&CODEC_FLAG_TRELLIS_QUANT){
197 s->dct_quantize= dct_quantize_trellis_c; //move before MPV_common_init_*
200 #endif //CONFIG_ENCODERS
202 /* load & permutate scantables
203 note: only wmv uses differnt ones
205 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
206 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
207 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
208 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
210 s->picture_structure= PICT_FRAME;
216 * allocates a Picture
217 * The pixels are allocated/set by calling get_buffer() if shared=0
219 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
220 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11
221 const int mb_array_size= s->mb_stride*s->mb_height;
225 assert(pic->data[0]);
226 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
227 pic->type= FF_BUFFER_TYPE_SHARED;
231 assert(!pic->data[0]);
233 r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
235 if(r<0 || !pic->age || !pic->type || !pic->data[0]){
236 fprintf(stderr, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
240 if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
241 fprintf(stderr, "get_buffer() failed (stride changed)\n");
245 if(pic->linesize[1] != pic->linesize[2]){
246 fprintf(stderr, "get_buffer() failed (uv stride missmatch)\n");
250 s->linesize = pic->linesize[0];
251 s->uvlinesize= pic->linesize[1];
254 if(pic->qscale_table==NULL){
256 CHECKED_ALLOCZ(pic->mb_var , mb_array_size * sizeof(int16_t))
257 CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t))
258 CHECKED_ALLOCZ(pic->mb_mean , mb_array_size * sizeof(int8_t))
259 CHECKED_ALLOCZ(pic->mb_cmp_score, mb_array_size * sizeof(int32_t))
262 CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check
263 CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t))
264 CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num * sizeof(int))
265 pic->mb_type= pic->mb_type_base + s->mb_stride+1;
266 if(s->out_format == FMT_H264){
268 CHECKED_ALLOCZ(pic->motion_val[i], 2 * 16 * s->mb_num * sizeof(uint16_t))
269 CHECKED_ALLOCZ(pic->ref_index[i] , 4 * s->mb_num * sizeof(uint8_t))
272 pic->qstride= s->mb_stride;
275 //it might be nicer if the application would keep track of these but it would require a API change
276 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
277 s->prev_pict_types[0]= s->pict_type;
278 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE)
279 pic->age= INT_MAX; // skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway
282 fail: //for the CHECKED_ALLOCZ macro
287 * deallocates a picture
289 static void free_picture(MpegEncContext *s, Picture *pic){
292 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
293 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
296 av_freep(&pic->mb_var);
297 av_freep(&pic->mc_mb_var);
298 av_freep(&pic->mb_mean);
299 av_freep(&pic->mb_cmp_score);
300 av_freep(&pic->mbskip_table);
301 av_freep(&pic->qscale_table);
302 av_freep(&pic->mb_type_base);
305 av_freep(&pic->motion_val[i]);
306 av_freep(&pic->ref_index[i]);
309 if(pic->type == FF_BUFFER_TYPE_SHARED){
318 /* init common structure for both encoder and decoder */
319 int MPV_common_init(MpegEncContext *s)
321 int y_size, c_size, yc_size, i, mb_array_size, x, y;
323 dsputil_init(&s->dsp, s->avctx);
326 s->flags= s->avctx->flags;
328 s->mb_width = (s->width + 15) / 16;
329 s->mb_height = (s->height + 15) / 16;
330 s->mb_stride = s->mb_width + 1;
331 mb_array_size= s->mb_height * s->mb_stride;
333 /* set default edge pos, will be overriden in decode_header if needed */
334 s->h_edge_pos= s->mb_width*16;
335 s->v_edge_pos= s->mb_height*16;
337 s->mb_num = s->mb_width * s->mb_height;
342 s->block_wrap[3]= s->mb_width*2 + 2;
344 s->block_wrap[5]= s->mb_width + 2;
346 y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
347 c_size = (s->mb_width + 2) * (s->mb_height + 2);
348 yc_size = y_size + 2 * c_size;
350 /* convert fourcc to upper case */
351 s->avctx->codec_tag= toupper( s->avctx->codec_tag &0xFF)
352 + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
353 + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
354 + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
356 CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
357 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*17;
359 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
361 CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int)) //error ressilience code looks cleaner with this
362 for(y=0; y<s->mb_height; y++){
363 for(x=0; x<s->mb_width; x++){
364 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
367 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
370 int mv_table_size= s->mb_stride * (s->mb_height+2) + 1;
372 /* Allocate MV tables */
373 CHECKED_ALLOCZ(s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
374 CHECKED_ALLOCZ(s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
375 CHECKED_ALLOCZ(s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
376 CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
377 CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
378 CHECKED_ALLOCZ(s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
379 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
380 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
381 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
382 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
383 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
384 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
386 //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
387 CHECKED_ALLOCZ(s->me.scratchpad, s->width*2*16*3*sizeof(uint8_t))
389 CHECKED_ALLOCZ(s->me.map , ME_MAP_SIZE*sizeof(uint32_t))
390 CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
392 if(s->codec_id==CODEC_ID_MPEG4){
393 CHECKED_ALLOCZ(s->tex_pb_buffer, PB_BUFFER_SIZE);
394 CHECKED_ALLOCZ( s->pb2_buffer, PB_BUFFER_SIZE);
397 if(s->msmpeg4_version){
398 CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
400 CHECKED_ALLOCZ(s->avctx->stats_out, 256);
402 /* Allocate MB type table */
403 CHECKED_ALLOCZ(s->mb_type , mb_array_size * sizeof(uint8_t)) //needed for encoding
406 CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t))
408 if (s->out_format == FMT_H263 || s->encoding) {
412 size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
413 CHECKED_ALLOCZ(s->motion_val, size * 2 * sizeof(int16_t));
416 if(s->codec_id==CODEC_ID_MPEG4){
417 /* interlaced direct mode decoding tables */
418 CHECKED_ALLOCZ(s->field_mv_table, mb_array_size*2*2 * sizeof(int16_t))
419 CHECKED_ALLOCZ(s->field_select_table, mb_array_size*2* sizeof(int8_t))
421 if (s->out_format == FMT_H263) {
423 CHECKED_ALLOCZ(s->ac_val[0], yc_size * sizeof(int16_t) * 16);
424 s->ac_val[1] = s->ac_val[0] + y_size;
425 s->ac_val[2] = s->ac_val[1] + c_size;
428 CHECKED_ALLOCZ(s->coded_block, y_size);
430 /* divx501 bitstream reorder buffer */
431 CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE);
433 /* cbp, ac_pred, pred_dir */
434 CHECKED_ALLOCZ(s->cbp_table , mb_array_size * sizeof(uint8_t))
435 CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
438 if (s->h263_pred || s->h263_plus || !s->encoding) {
440 //MN: we need these for error resilience of intra-frames
441 CHECKED_ALLOCZ(s->dc_val[0], yc_size * sizeof(int16_t));
442 s->dc_val[1] = s->dc_val[0] + y_size;
443 s->dc_val[2] = s->dc_val[1] + c_size;
444 for(i=0;i<yc_size;i++)
445 s->dc_val[0][i] = 1024;
448 /* which mb is a intra block */
449 CHECKED_ALLOCZ(s->mbintra_table, mb_array_size);
450 memset(s->mbintra_table, 1, mb_array_size);
452 /* default structure is frame */
453 s->picture_structure = PICT_FRAME;
455 /* init macroblock skip table */
456 CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2);
457 //Note the +1 is for a quicker mpeg4 slice_end detection
458 CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
460 s->block= s->blocks[0];
462 s->parse_context.state= -1;
464 s->context_initialized = 1;
474 /* init common structure for both encoder and decoder */
475 void MPV_common_end(MpegEncContext *s)
479 av_freep(&s->mb_type);
480 av_freep(&s->p_mv_table_base);
481 av_freep(&s->b_forw_mv_table_base);
482 av_freep(&s->b_back_mv_table_base);
483 av_freep(&s->b_bidir_forw_mv_table_base);
484 av_freep(&s->b_bidir_back_mv_table_base);
485 av_freep(&s->b_direct_mv_table_base);
487 s->b_forw_mv_table= NULL;
488 s->b_back_mv_table= NULL;
489 s->b_bidir_forw_mv_table= NULL;
490 s->b_bidir_back_mv_table= NULL;
491 s->b_direct_mv_table= NULL;
493 av_freep(&s->motion_val);
494 av_freep(&s->dc_val[0]);
495 av_freep(&s->ac_val[0]);
496 av_freep(&s->coded_block);
497 av_freep(&s->mbintra_table);
498 av_freep(&s->cbp_table);
499 av_freep(&s->pred_dir_table);
500 av_freep(&s->me.scratchpad);
501 av_freep(&s->me.map);
502 av_freep(&s->me.score_map);
504 av_freep(&s->mbskip_table);
505 av_freep(&s->prev_pict_types);
506 av_freep(&s->bitstream_buffer);
507 av_freep(&s->tex_pb_buffer);
508 av_freep(&s->pb2_buffer);
509 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
510 av_freep(&s->field_mv_table);
511 av_freep(&s->field_select_table);
512 av_freep(&s->avctx->stats_out);
513 av_freep(&s->ac_stats);
514 av_freep(&s->error_status_table);
515 av_freep(&s->mb_index2xy);
517 for(i=0; i<MAX_PICTURE_COUNT; i++){
518 free_picture(s, &s->picture[i]);
520 avcodec_default_free_buffers(s->avctx);
521 s->context_initialized = 0;
524 #ifdef CONFIG_ENCODERS
526 /* init video encoder */
527 int MPV_encode_init(AVCodecContext *avctx)
529 MpegEncContext *s = avctx->priv_data;
532 avctx->pix_fmt = PIX_FMT_YUV420P;
534 s->bit_rate = avctx->bit_rate;
535 s->bit_rate_tolerance = avctx->bit_rate_tolerance;
536 s->width = avctx->width;
537 s->height = avctx->height;
538 if(avctx->gop_size > 600){
539 fprintf(stderr, "Warning keyframe interval too large! reducing it ...\n");
542 s->gop_size = avctx->gop_size;
543 s->rtp_mode = avctx->rtp_mode;
544 s->rtp_payload_size = avctx->rtp_payload_size;
545 if (avctx->rtp_callback)
546 s->rtp_callback = avctx->rtp_callback;
547 s->max_qdiff= avctx->max_qdiff;
548 s->qcompress= avctx->qcompress;
549 s->qblur= avctx->qblur;
551 s->flags= avctx->flags;
552 s->max_b_frames= avctx->max_b_frames;
553 s->b_frame_strategy= avctx->b_frame_strategy;
554 s->codec_id= avctx->codec->id;
555 s->luma_elim_threshold = avctx->luma_elim_threshold;
556 s->chroma_elim_threshold= avctx->chroma_elim_threshold;
557 s->strict_std_compliance= avctx->strict_std_compliance;
558 s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
559 s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0;
560 s->mpeg_quant= avctx->mpeg_quant;
562 if (s->gop_size <= 1) {
569 s->me_method = avctx->me_method;
572 s->fixed_qscale = (avctx->flags & CODEC_FLAG_QSCALE);
574 s->adaptive_quant= ( s->avctx->lumi_masking
575 || s->avctx->dark_masking
576 || s->avctx->temporal_cplx_masking
577 || s->avctx->spatial_cplx_masking
578 || s->avctx->p_masking)
581 s->progressive_sequence= !(avctx->flags & CODEC_FLAG_INTERLACED_DCT);
583 if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4){
584 fprintf(stderr, "4MV not supporetd by codec\n");
588 if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){
589 fprintf(stderr, "qpel not supporetd by codec\n");
593 if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){
594 fprintf(stderr, "data partitioning not supporetd by codec\n");
598 if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO){
599 fprintf(stderr, "b frames not supporetd by codec\n");
603 if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too
604 fprintf(stderr, "mpeg2 style quantization not supporetd by codec\n");
608 if(s->codec_id==CODEC_ID_MJPEG){
609 s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x
610 s->inter_quant_bias= 0;
611 }else if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO){
612 s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x
613 s->inter_quant_bias= 0;
615 s->intra_quant_bias=0;
616 s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2)); //(a - x/4)/x
619 if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
620 s->intra_quant_bias= avctx->intra_quant_bias;
621 if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
622 s->inter_quant_bias= avctx->inter_quant_bias;
624 switch(avctx->codec->id) {
625 case CODEC_ID_MPEG1VIDEO:
626 s->out_format = FMT_MPEG1;
627 s->low_delay= 0; //s->max_b_frames ? 0 : 1;
628 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
631 s->out_format = FMT_MJPEG;
632 s->intra_only = 1; /* force intra only for jpeg */
633 s->mjpeg_write_tables = 1; /* write all tables */
634 s->mjpeg_data_only_frames = 0; /* write all the needed headers */
635 s->mjpeg_vsample[0] = 2; /* set up default sampling factors */
636 s->mjpeg_vsample[1] = 1; /* the only currently supported values */
637 s->mjpeg_vsample[2] = 1;
638 s->mjpeg_hsample[0] = 2;
639 s->mjpeg_hsample[1] = 1;
640 s->mjpeg_hsample[2] = 1;
641 if (mjpeg_init(s) < 0)
648 if (h263_get_picture_format(s->width, s->height) == 7) {
649 printf("Input picture size isn't suitable for h263 codec! try h263+\n");
652 s->out_format = FMT_H263;
657 s->out_format = FMT_H263;
660 s->unrestricted_mv=(avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0;
661 s->h263_aic= (avctx->flags & CODEC_FLAG_H263P_AIC) ? 1:0;
663 /* These are just to be sure */
669 s->out_format = FMT_H263;
675 s->out_format = FMT_H263;
677 s->unrestricted_mv = 1;
678 s->low_delay= s->max_b_frames ? 0 : 1;
679 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
681 case CODEC_ID_MSMPEG4V1:
682 s->out_format = FMT_H263;
685 s->unrestricted_mv = 1;
686 s->msmpeg4_version= 1;
690 case CODEC_ID_MSMPEG4V2:
691 s->out_format = FMT_H263;
694 s->unrestricted_mv = 1;
695 s->msmpeg4_version= 2;
699 case CODEC_ID_MSMPEG4V3:
700 s->out_format = FMT_H263;
703 s->unrestricted_mv = 1;
704 s->msmpeg4_version= 3;
705 s->flipflop_rounding=1;
710 s->out_format = FMT_H263;
713 s->unrestricted_mv = 1;
714 s->msmpeg4_version= 4;
715 s->flipflop_rounding=1;
720 s->out_format = FMT_H263;
723 s->unrestricted_mv = 1;
724 s->msmpeg4_version= 5;
725 s->flipflop_rounding=1;
734 { /* set up some save defaults, some codecs might override them later */
740 default_mv_penalty= av_mallocz( sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1) );
741 memset(default_mv_penalty, 0, sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1));
742 memset(default_fcode_tab , 0, sizeof(uint8_t)*(2*MAX_MV+1));
744 for(i=-16; i<16; i++){
745 default_fcode_tab[i + MAX_MV]= 1;
749 s->me.mv_penalty= default_mv_penalty;
750 s->fcode_tab= default_fcode_tab;
752 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
754 /* dont use mv_penalty table for crap MV as it would be confused */
755 //FIXME remove after fixing / removing old ME
756 if (s->me_method < ME_EPZS) s->me.mv_penalty = default_mv_penalty;
761 if (MPV_common_init(s) < 0)
766 #ifdef CONFIG_ENCODERS
768 if (s->out_format == FMT_H263)
770 if(s->msmpeg4_version)
771 ff_msmpeg4_encode_init(s);
773 if (s->out_format == FMT_MPEG1)
774 ff_mpeg1_encode_init(s);
777 /* init default q matrix */
779 int j= s->dsp.idct_permutation[i];
781 if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
782 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
783 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
784 }else if(s->out_format == FMT_H263){
786 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
790 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
791 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
795 /* precompute matrix */
796 /* for mjpeg, we do include qscale in the matrix */
797 if (s->out_format != FMT_MJPEG) {
798 convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, s->q_intra_matrix16_bias,
799 s->intra_matrix, s->intra_quant_bias, 1, 31);
800 convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16, s->q_inter_matrix16_bias,
801 s->inter_matrix, s->inter_quant_bias, 1, 31);
804 if(ff_rate_control_init(s) < 0)
807 s->picture_number = 0;
808 s->picture_in_gop_number = 0;
809 s->fake_picture_number = 0;
810 /* motion detector init */
817 int MPV_encode_end(AVCodecContext *avctx)
819 MpegEncContext *s = avctx->priv_data;
825 ff_rate_control_uninit(s);
828 if (s->out_format == FMT_MJPEG)
834 #endif //CONFIG_ENCODERS
836 void init_rl(RLTable *rl)
838 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
839 uint8_t index_run[MAX_RUN+1];
840 int last, run, level, start, end, i;
842 /* compute max_level[], max_run[] and index_run[] */
843 for(last=0;last<2;last++) {
852 memset(max_level, 0, MAX_RUN + 1);
853 memset(max_run, 0, MAX_LEVEL + 1);
854 memset(index_run, rl->n, MAX_RUN + 1);
855 for(i=start;i<end;i++) {
856 run = rl->table_run[i];
857 level = rl->table_level[i];
858 if (index_run[run] == rl->n)
860 if (level > max_level[run])
861 max_level[run] = level;
862 if (run > max_run[level])
863 max_run[level] = run;
865 rl->max_level[last] = av_malloc(MAX_RUN + 1);
866 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
867 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
868 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
869 rl->index_run[last] = av_malloc(MAX_RUN + 1);
870 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
874 /* draw the edges of width 'w' of an image of size width, height */
875 //FIXME check that this is ok for mpeg4 interlaced
876 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w)
878 uint8_t *ptr, *last_line;
881 last_line = buf + (height - 1) * wrap;
884 memcpy(buf - (i + 1) * wrap, buf, width);
885 memcpy(last_line + (i + 1) * wrap, last_line, width);
889 for(i=0;i<height;i++) {
890 memset(ptr - w, ptr[0], w);
891 memset(ptr + width, ptr[width-1], w);
896 memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
897 memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
898 memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
899 memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
903 static int find_unused_picture(MpegEncContext *s, int shared){
907 for(i=0; i<MAX_PICTURE_COUNT; i++){
908 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) break;
911 for(i=0; i<MAX_PICTURE_COUNT; i++){
912 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) break; //FIXME
914 for(i=0; i<MAX_PICTURE_COUNT; i++){
915 if(s->picture[i].data[0]==NULL) break;
919 assert(i<MAX_PICTURE_COUNT);
923 /* generic function for encode/decode called before a frame is coded/decoded */
924 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
931 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
933 /* mark&release old frames */
934 if (s->pict_type != B_TYPE && s->last_picture_ptr) {
935 avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
937 /* release forgotten pictures */
938 /* if(mpeg124/h263) */
940 for(i=0; i<MAX_PICTURE_COUNT; i++){
941 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
942 fprintf(stderr, "releasing zombie picture\n");
943 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
950 /* release non refernce frames */
951 for(i=0; i<MAX_PICTURE_COUNT; i++){
952 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
953 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
957 i= find_unused_picture(s, 0);
959 pic= (AVFrame*)&s->picture[i];
960 pic->reference= s->pict_type != B_TYPE ? 3 : 0;
962 if(s->current_picture_ptr)
963 pic->coded_picture_number= s->current_picture_ptr->coded_picture_number+1;
965 alloc_picture(s, (Picture*)pic, 0);
967 s->current_picture_ptr= &s->picture[i];
970 s->current_picture_ptr->pict_type= s->pict_type;
971 s->current_picture_ptr->quality= s->qscale;
972 s->current_picture_ptr->key_frame= s->pict_type == I_TYPE;
974 s->current_picture= *s->current_picture_ptr;
976 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
977 if (s->pict_type != B_TYPE) {
978 s->last_picture_ptr= s->next_picture_ptr;
979 s->next_picture_ptr= s->current_picture_ptr;
982 if(s->last_picture_ptr) s->last_picture= *s->last_picture_ptr;
983 if(s->next_picture_ptr) s->next_picture= *s->next_picture_ptr;
984 if(s->new_picture_ptr ) s->new_picture = *s->new_picture_ptr;
986 if(s->picture_structure!=PICT_FRAME){
989 if(s->picture_structure == PICT_BOTTOM_FIELD){
990 s->current_picture.data[i] += s->current_picture.linesize[i];
992 s->current_picture.linesize[i] *= 2;
993 s->last_picture.linesize[i] *=2;
994 s->next_picture.linesize[i] *=2;
998 if(s->pict_type != I_TYPE && s->last_picture_ptr==NULL){
999 fprintf(stderr, "warning: first frame is no keyframe\n");
1000 assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference
1005 s->hurry_up= s->avctx->hurry_up;
1006 s->error_resilience= avctx->error_resilience;
1008 /* set dequantizer, we cant do it during init as it might change for mpeg4
1009 and we cant do it in the header decode as init isnt called for mpeg4 there yet */
1010 if(s->out_format == FMT_H263){
1012 s->dct_unquantize = s->dct_unquantize_mpeg2;
1014 s->dct_unquantize = s->dct_unquantize_h263;
1016 s->dct_unquantize = s->dct_unquantize_mpeg1;
1021 /* generic function for encode/decode called after a frame has been coded/decoded */
1022 void MPV_frame_end(MpegEncContext *s)
1025 /* draw edge for correct motion prediction if outside */
1026 if(s->codec_id!=CODEC_ID_SVQ1){
1027 if (s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1028 draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
1029 draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1030 draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1035 s->last_pict_type = s->pict_type;
1036 if(s->pict_type!=B_TYPE){
1037 s->last_non_b_pict_type= s->pict_type;
1040 /* copy back current_picture variables */
1041 for(i=0; i<MAX_PICTURE_COUNT; i++){
1042 if(s->picture[i].data[0] == s->current_picture.data[0]){
1043 s->picture[i]= s->current_picture;
1047 assert(i<MAX_PICTURE_COUNT);
1051 /* release non refernce frames */
1052 for(i=0; i<MAX_PICTURE_COUNT; i++){
1053 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1054 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1058 // clear copies, to avoid confusion
1060 memset(&s->last_picture, 0, sizeof(Picture));
1061 memset(&s->next_picture, 0, sizeof(Picture));
1062 memset(&s->current_picture, 0, sizeof(Picture));
1067 * draws an line from (ex, ey) -> (sx, sy).
1068 * @param w width of the image
1069 * @param h height of the image
1070 * @param stride stride/linesize of the image
1071 * @param color color of the arrow
1073 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1076 sx= clip(sx, 0, w-1);
1077 sy= clip(sy, 0, h-1);
1078 ex= clip(ex, 0, w-1);
1079 ey= clip(ey, 0, h-1);
1081 buf[sy*stride + sx]+= color;
1083 if(ABS(ex - sx) > ABS(ey - sy)){
1088 buf+= sx + sy*stride;
1090 f= ((ey-sy)<<16)/ex;
1091 for(x= 0; x <= ex; x++){
1092 y= ((x*f) + (1<<15))>>16;
1093 buf[y*stride + x]+= color;
1100 buf+= sx + sy*stride;
1102 if(ey) f= ((ex-sx)<<16)/ey;
1104 for(y= 0; y <= ey; y++){
1105 x= ((y*f) + (1<<15))>>16;
1106 buf[y*stride + x]+= color;
1112 * draws an arrow from (ex, ey) -> (sx, sy).
1113 * @param w width of the image
1114 * @param h height of the image
1115 * @param stride stride/linesize of the image
1116 * @param color color of the arrow
1118 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1121 sx= clip(sx, -100, w+100);
1122 sy= clip(sy, -100, h+100);
1123 ex= clip(ex, -100, w+100);
1124 ey= clip(ey, -100, h+100);
1129 if(dx*dx + dy*dy > 3*3){
1132 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1134 //FIXME subpixel accuracy
1135 rx= ROUNDED_DIV(rx*3<<4, length);
1136 ry= ROUNDED_DIV(ry*3<<4, length);
1138 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1139 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1141 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1145 * prints debuging info for the given picture.
1147 void ff_print_debug_info(MpegEncContext *s, Picture *pict){
1149 if(!pict || !pict->mb_type) return;
1151 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1154 for(y=0; y<s->mb_height; y++){
1155 for(x=0; x<s->mb_width; x++){
1156 if(s->avctx->debug&FF_DEBUG_SKIP){
1157 int count= s->mbskip_table[x + y*s->mb_stride];
1158 if(count>9) count=9;
1159 printf("%1d", count);
1161 if(s->avctx->debug&FF_DEBUG_QP){
1162 printf("%2d", pict->qscale_table[x + y*s->mb_stride]);
1164 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1165 int mb_type= pict->mb_type[x + y*s->mb_stride];
1167 //Type & MV direction
1170 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1172 else if(IS_INTRA4x4(mb_type))
1174 else if(IS_INTRA16x16(mb_type))
1176 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1178 else if(IS_DIRECT(mb_type))
1180 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1182 else if(IS_GMC(mb_type))
1184 else if(IS_SKIP(mb_type))
1186 else if(!USES_LIST(mb_type, 1))
1188 else if(!USES_LIST(mb_type, 0))
1191 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1198 else if(IS_16X8(mb_type))
1200 else if(IS_8X16(mb_type))
1202 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1208 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
1219 if((s->avctx->debug&FF_DEBUG_VIS_MV) && s->motion_val){
1220 const int shift= 1 + s->quarter_sample;
1222 uint8_t *ptr= pict->data[0];
1223 s->low_delay=0; //needed to see the vectors without trashing the buffers
1225 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1227 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1228 const int mb_index= mb_x + mb_y*s->mb_stride;
1229 if(IS_8X8(s->current_picture.mb_type[mb_index])){
1232 int sx= mb_x*16 + 4 + 8*(i&1);
1233 int sy= mb_y*16 + 4 + 8*(i>>1);
1234 int xy= 1 + mb_x*2 + (i&1) + (mb_y*2 + 1 + (i>>1))*(s->mb_width*2 + 2);
1235 int mx= (s->motion_val[xy][0]>>shift) + sx;
1236 int my= (s->motion_val[xy][1]>>shift) + sy;
1237 draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
1240 int sx= mb_x*16 + 8;
1241 int sy= mb_y*16 + 8;
1242 int xy= 1 + mb_x*2 + (mb_y*2 + 1)*(s->mb_width*2 + 2);
1243 int mx= (s->motion_val[xy][0]>>shift) + sx;
1244 int my= (s->motion_val[xy][1]>>shift) + sy;
1245 draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
1247 s->mbskip_table[mb_index]=0;
1253 #ifdef CONFIG_ENCODERS
1255 static int get_sae(uint8_t *src, int ref, int stride){
1259 for(y=0; y<16; y++){
1260 for(x=0; x<16; x++){
1261 acc+= ABS(src[x+y*stride] - ref);
1268 static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){
1275 for(y=0; y<h; y+=16){
1276 for(x=0; x<w; x+=16){
1277 int offset= x + y*stride;
1278 int sad = s->dsp.pix_abs16x16(src + offset, ref + offset, stride);
1279 int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8;
1280 int sae = get_sae(src + offset, mean, stride);
1282 acc+= sae + 500 < sad;
1289 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
1292 const int encoding_delay= s->max_b_frames;
1295 if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
1296 if(pic_arg->linesize[0] != s->linesize) direct=0;
1297 if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
1298 if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
1300 // printf("%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
1303 i= find_unused_picture(s, 1);
1305 pic= (AVFrame*)&s->picture[i];
1309 pic->data[i]= pic_arg->data[i];
1310 pic->linesize[i]= pic_arg->linesize[i];
1312 alloc_picture(s, (Picture*)pic, 1);
1314 i= find_unused_picture(s, 0);
1316 pic= (AVFrame*)&s->picture[i];
1319 alloc_picture(s, (Picture*)pic, 0);
1321 /* the input will be 16 pixels to the right relative to the actual buffer start
1322 * and the current_pic, so the buffer can be reused, yes its not beatifull
1327 if( pic->data[0] == pic_arg->data[0]
1328 && pic->data[1] == pic_arg->data[1]
1329 && pic->data[2] == pic_arg->data[2]){
1332 int h_chroma_shift, v_chroma_shift;
1334 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1337 int src_stride= pic_arg->linesize[i];
1338 int dst_stride= i ? s->uvlinesize : s->linesize;
1339 int h_shift= i ? h_chroma_shift : 0;
1340 int v_shift= i ? v_chroma_shift : 0;
1341 int w= s->width >>h_shift;
1342 int h= s->height>>v_shift;
1343 uint8_t *src= pic_arg->data[i];
1344 uint8_t *dst= pic->data[i];
1346 if(src_stride==dst_stride)
1347 memcpy(dst, src, src_stride*h);
1350 memcpy(dst, src, w);
1358 pic->quality= pic_arg->quality;
1359 pic->pict_type= pic_arg->pict_type;
1360 pic->pts = pic_arg->pts;
1362 if(s->input_picture[encoding_delay])
1363 pic->display_picture_number= s->input_picture[encoding_delay]->display_picture_number + 1;
1365 /* shift buffer entries */
1366 for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++)
1367 s->input_picture[i-1]= s->input_picture[i];
1369 s->input_picture[encoding_delay]= (Picture*)pic;
1374 static void select_input_picture(MpegEncContext *s){
1376 const int encoding_delay= s->max_b_frames;
1377 int coded_pic_num=0;
1379 if(s->reordered_input_picture[0])
1380 coded_pic_num= s->reordered_input_picture[0]->coded_picture_number + 1;
1382 for(i=1; i<MAX_PICTURE_COUNT; i++)
1383 s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
1384 s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
1386 /* set next picture types & ordering */
1387 if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
1388 if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
1389 s->reordered_input_picture[0]= s->input_picture[0];
1390 s->reordered_input_picture[0]->pict_type= I_TYPE;
1391 s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
1395 if(s->flags&CODEC_FLAG_PASS2){
1396 for(i=0; i<s->max_b_frames+1; i++){
1397 int pict_num= s->input_picture[0]->display_picture_number + i;
1398 int pict_type= s->rc_context.entry[pict_num].new_pict_type;
1399 s->input_picture[i]->pict_type= pict_type;
1401 if(i + 1 >= s->rc_context.num_entries) break;
1405 if(s->input_picture[0]->pict_type){
1406 /* user selected pict_type */
1407 for(b_frames=0; b_frames<s->max_b_frames+1; b_frames++){
1408 if(s->input_picture[b_frames]->pict_type!=B_TYPE) break;
1411 if(b_frames > s->max_b_frames){
1412 fprintf(stderr, "warning, too many bframes in a row\n");
1413 b_frames = s->max_b_frames;
1415 }else if(s->b_frame_strategy==0){
1416 b_frames= s->max_b_frames;
1417 }else if(s->b_frame_strategy==1){
1418 for(i=1; i<s->max_b_frames+1; i++){
1419 if(s->input_picture[i]->b_frame_score==0){
1420 s->input_picture[i]->b_frame_score=
1421 get_intra_count(s, s->input_picture[i ]->data[0],
1422 s->input_picture[i-1]->data[0], s->linesize) + 1;
1425 for(i=0; i<s->max_b_frames; i++){
1426 if(s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break;
1429 b_frames= FFMAX(0, i-1);
1432 for(i=0; i<b_frames+1; i++){
1433 s->input_picture[i]->b_frame_score=0;
1436 fprintf(stderr, "illegal b frame strategy\n");
1441 //static int b_count=0;
1442 //b_count+= b_frames;
1443 //printf("b_frames: %d\n", b_count);
1445 s->reordered_input_picture[0]= s->input_picture[b_frames];
1446 if( s->picture_in_gop_number + b_frames >= s->gop_size
1447 || s->reordered_input_picture[0]->pict_type== I_TYPE)
1448 s->reordered_input_picture[0]->pict_type= I_TYPE;
1450 s->reordered_input_picture[0]->pict_type= P_TYPE;
1451 s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
1452 for(i=0; i<b_frames; i++){
1454 s->reordered_input_picture[i+1]= s->input_picture[i];
1455 s->reordered_input_picture[i+1]->pict_type= B_TYPE;
1456 s->reordered_input_picture[i+1]->coded_picture_number= coded_pic_num;
1461 if(s->reordered_input_picture[0]){
1462 s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0;
1464 s->new_picture= *s->reordered_input_picture[0];
1466 if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
1467 // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable
1469 int i= find_unused_picture(s, 0);
1470 Picture *pic= &s->picture[i];
1472 /* mark us unused / free shared pic */
1474 s->reordered_input_picture[0]->data[i]= NULL;
1475 s->reordered_input_picture[0]->type= 0;
1477 //FIXME bad, copy * except
1478 pic->pict_type = s->reordered_input_picture[0]->pict_type;
1479 pic->quality = s->reordered_input_picture[0]->quality;
1480 pic->coded_picture_number = s->reordered_input_picture[0]->coded_picture_number;
1481 pic->reference = s->reordered_input_picture[0]->reference;
1483 alloc_picture(s, pic, 0);
1485 s->current_picture_ptr= pic;
1487 // input is not a shared pix -> reuse buffer for current_pix
1489 assert( s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER
1490 || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
1492 s->current_picture_ptr= s->reordered_input_picture[0];
1494 //reverse the +16 we did before storing the input
1495 s->current_picture_ptr->data[i]-=16;
1498 s->current_picture= *s->current_picture_ptr;
1500 s->picture_number= s->new_picture.display_picture_number;
1501 //printf("dpn:%d\n", s->picture_number);
1503 memset(&s->new_picture, 0, sizeof(Picture));
1507 int MPV_encode_picture(AVCodecContext *avctx,
1508 unsigned char *buf, int buf_size, void *data)
1510 MpegEncContext *s = avctx->priv_data;
1511 AVFrame *pic_arg = data;
1514 init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
1516 s->picture_in_gop_number++;
1518 load_input_picture(s, pic_arg);
1520 select_input_picture(s);
1523 if(s->new_picture.data[0]){
1525 s->pict_type= s->new_picture.pict_type;
1526 if (s->fixed_qscale){ /* the ratecontrol needs the last qscale so we dont touch it for CBR */
1527 s->qscale= (int)(s->new_picture.quality+0.5);
1531 //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
1532 MPV_frame_start(s, avctx);
1534 encode_picture(s, s->picture_number);
1536 avctx->real_pict_num = s->picture_number;
1537 avctx->header_bits = s->header_bits;
1538 avctx->mv_bits = s->mv_bits;
1539 avctx->misc_bits = s->misc_bits;
1540 avctx->i_tex_bits = s->i_tex_bits;
1541 avctx->p_tex_bits = s->p_tex_bits;
1542 avctx->i_count = s->i_count;
1543 avctx->p_count = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx
1544 avctx->skip_count = s->skip_count;
1548 if (s->out_format == FMT_MJPEG)
1549 mjpeg_picture_trailer(s);
1551 if(s->flags&CODEC_FLAG_PASS1)
1552 ff_write_pass1_stats(s);
1555 avctx->error[i] += s->current_picture_ptr->error[i];
1559 s->input_picture_number++;
1561 flush_put_bits(&s->pb);
1562 s->frame_bits = (pbBufPtr(&s->pb) - s->pb.buf) * 8;
1564 s->total_bits += s->frame_bits;
1565 avctx->frame_bits = s->frame_bits;
1567 return pbBufPtr(&s->pb) - s->pb.buf;
1570 #endif //CONFIG_ENCODERS
1572 static inline void gmc1_motion(MpegEncContext *s,
1573 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1575 uint8_t **ref_picture, int src_offset)
1578 int offset, src_x, src_y, linesize, uvlinesize;
1579 int motion_x, motion_y;
1582 motion_x= s->sprite_offset[0][0];
1583 motion_y= s->sprite_offset[0][1];
1584 src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
1585 src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
1586 motion_x<<=(3-s->sprite_warping_accuracy);
1587 motion_y<<=(3-s->sprite_warping_accuracy);
1588 src_x = clip(src_x, -16, s->width);
1589 if (src_x == s->width)
1591 src_y = clip(src_y, -16, s->height);
1592 if (src_y == s->height)
1595 linesize = s->linesize;
1596 uvlinesize = s->uvlinesize;
1598 ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
1600 dest_y+=dest_offset;
1601 if(s->flags&CODEC_FLAG_EMU_EDGE){
1602 if(src_x<0 || src_y<0 || src_x + 17 >= s->h_edge_pos
1603 || src_y + 17 >= s->v_edge_pos){
1604 ff_emulated_edge_mc(s, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1605 ptr= s->edge_emu_buffer;
1609 if((motion_x|motion_y)&7){
1610 s->dsp.gmc1(dest_y , ptr , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
1611 s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
1615 dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
1616 if (s->no_rounding){
1617 s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
1619 s->dsp.put_pixels_tab [0][dxy](dest_y, ptr, linesize, 16);
1623 if(s->flags&CODEC_FLAG_GRAY) return;
1625 motion_x= s->sprite_offset[1][0];
1626 motion_y= s->sprite_offset[1][1];
1627 src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
1628 src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
1629 motion_x<<=(3-s->sprite_warping_accuracy);
1630 motion_y<<=(3-s->sprite_warping_accuracy);
1631 src_x = clip(src_x, -8, s->width>>1);
1632 if (src_x == s->width>>1)
1634 src_y = clip(src_y, -8, s->height>>1);
1635 if (src_y == s->height>>1)
1638 offset = (src_y * uvlinesize) + src_x + (src_offset>>1);
1639 ptr = ref_picture[1] + offset;
1640 if(s->flags&CODEC_FLAG_EMU_EDGE){
1641 if(src_x<0 || src_y<0 || src_x + 9 >= s->h_edge_pos>>1
1642 || src_y + 9 >= s->v_edge_pos>>1){
1643 ff_emulated_edge_mc(s, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1644 ptr= s->edge_emu_buffer;
1648 s->dsp.gmc1(dest_cb + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
1650 ptr = ref_picture[2] + offset;
1652 ff_emulated_edge_mc(s, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1653 ptr= s->edge_emu_buffer;
1655 s->dsp.gmc1(dest_cr + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
1660 static inline void gmc_motion(MpegEncContext *s,
1661 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1663 uint8_t **ref_picture, int src_offset)
1666 int linesize, uvlinesize;
1667 const int a= s->sprite_warping_accuracy;
1670 linesize = s->linesize;
1671 uvlinesize = s->uvlinesize;
1673 ptr = ref_picture[0] + src_offset;
1675 dest_y+=dest_offset;
1677 ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16;
1678 oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16;
1680 s->dsp.gmc(dest_y, ptr, linesize, 16,
1683 s->sprite_delta[0][0], s->sprite_delta[0][1],
1684 s->sprite_delta[1][0], s->sprite_delta[1][1],
1685 a+1, (1<<(2*a+1)) - s->no_rounding,
1686 s->h_edge_pos, s->v_edge_pos);
1687 s->dsp.gmc(dest_y+8, ptr, linesize, 16,
1688 ox + s->sprite_delta[0][0]*8,
1689 oy + s->sprite_delta[1][0]*8,
1690 s->sprite_delta[0][0], s->sprite_delta[0][1],
1691 s->sprite_delta[1][0], s->sprite_delta[1][1],
1692 a+1, (1<<(2*a+1)) - s->no_rounding,
1693 s->h_edge_pos, s->v_edge_pos);
1695 if(s->flags&CODEC_FLAG_GRAY) return;
1698 dest_cb+=dest_offset>>1;
1699 dest_cr+=dest_offset>>1;
1701 ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8;
1702 oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8;
1704 ptr = ref_picture[1] + (src_offset>>1);
1705 s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
1708 s->sprite_delta[0][0], s->sprite_delta[0][1],
1709 s->sprite_delta[1][0], s->sprite_delta[1][1],
1710 a+1, (1<<(2*a+1)) - s->no_rounding,
1711 s->h_edge_pos>>1, s->v_edge_pos>>1);
1713 ptr = ref_picture[2] + (src_offset>>1);
1714 s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
1717 s->sprite_delta[0][0], s->sprite_delta[0][1],
1718 s->sprite_delta[1][0], s->sprite_delta[1][1],
1719 a+1, (1<<(2*a+1)) - s->no_rounding,
1720 s->h_edge_pos>>1, s->v_edge_pos>>1);
1724 void ff_emulated_edge_mc(MpegEncContext *s, uint8_t *src, int linesize, int block_w, int block_h,
1725 int src_x, int src_y, int w, int h){
1727 int start_y, start_x, end_y, end_x;
1728 uint8_t *buf= s->edge_emu_buffer;
1731 src+= (h-1-src_y)*linesize;
1733 }else if(src_y<=-block_h){
1734 src+= (1-block_h-src_y)*linesize;
1740 }else if(src_x<=-block_w){
1741 src+= (1-block_w-src_x);
1745 start_y= FFMAX(0, -src_y);
1746 start_x= FFMAX(0, -src_x);
1747 end_y= FFMIN(block_h, h-src_y);
1748 end_x= FFMIN(block_w, w-src_x);
1750 // copy existing part
1751 for(y=start_y; y<end_y; y++){
1752 for(x=start_x; x<end_x; x++){
1753 buf[x + y*linesize]= src[x + y*linesize];
1758 for(y=0; y<start_y; y++){
1759 for(x=start_x; x<end_x; x++){
1760 buf[x + y*linesize]= buf[x + start_y*linesize];
1765 for(y=end_y; y<block_h; y++){
1766 for(x=start_x; x<end_x; x++){
1767 buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
1771 for(y=0; y<block_h; y++){
1773 for(x=0; x<start_x; x++){
1774 buf[x + y*linesize]= buf[start_x + y*linesize];
1778 for(x=end_x; x<block_w; x++){
1779 buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
1785 /* apply one mpeg motion vector to the three components */
1786 static inline void mpeg_motion(MpegEncContext *s,
1787 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1789 uint8_t **ref_picture, int src_offset,
1790 int field_based, op_pixels_func (*pix_op)[4],
1791 int motion_x, int motion_y, int h)
1794 int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1797 if(s->quarter_sample)
1803 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
1804 src_x = s->mb_x * 16 + (motion_x >> 1);
1805 src_y = s->mb_y * (16 >> field_based) + (motion_y >> 1);
1807 /* WARNING: do no forget half pels */
1808 height = s->height >> field_based;
1809 v_edge_pos = s->v_edge_pos >> field_based;
1810 src_x = clip(src_x, -16, s->width);
1811 if (src_x == s->width)
1813 src_y = clip(src_y, -16, height);
1814 if (src_y == height)
1816 linesize = s->current_picture.linesize[0] << field_based;
1817 uvlinesize = s->current_picture.linesize[1] << field_based;
1818 ptr = ref_picture[0] + (src_y * linesize) + (src_x) + src_offset;
1819 dest_y += dest_offset;
1821 if(s->flags&CODEC_FLAG_EMU_EDGE){
1822 if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 16 > s->h_edge_pos
1823 || src_y + (motion_y&1) + h > v_edge_pos){
1824 ff_emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based, //FIXME linesize? and uv below
1825 src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
1826 ptr= s->edge_emu_buffer + src_offset;
1830 pix_op[0][dxy](dest_y, ptr, linesize, h);
1832 if(s->flags&CODEC_FLAG_GRAY) return;
1834 if (s->out_format == FMT_H263) {
1836 if ((motion_x & 3) != 0)
1838 if ((motion_y & 3) != 0)
1845 dxy = ((my & 1) << 1) | (mx & 1);
1850 src_x = s->mb_x * 8 + mx;
1851 src_y = s->mb_y * (8 >> field_based) + my;
1852 src_x = clip(src_x, -8, s->width >> 1);
1853 if (src_x == (s->width >> 1))
1855 src_y = clip(src_y, -8, height >> 1);
1856 if (src_y == (height >> 1))
1858 offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
1859 ptr = ref_picture[1] + offset;
1861 ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based,
1862 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1863 ptr= s->edge_emu_buffer + (src_offset >> 1);
1865 pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1867 ptr = ref_picture[2] + offset;
1869 ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based,
1870 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1871 ptr= s->edge_emu_buffer + (src_offset >> 1);
1873 pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1876 static inline void qpel_motion(MpegEncContext *s,
1877 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1879 uint8_t **ref_picture, int src_offset,
1880 int field_based, op_pixels_func (*pix_op)[4],
1881 qpel_mc_func (*qpix_op)[16],
1882 int motion_x, int motion_y, int h)
1885 int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1888 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
1889 src_x = s->mb_x * 16 + (motion_x >> 2);
1890 src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
1892 height = s->height >> field_based;
1893 v_edge_pos = s->v_edge_pos >> field_based;
1894 src_x = clip(src_x, -16, s->width);
1895 if (src_x == s->width)
1897 src_y = clip(src_y, -16, height);
1898 if (src_y == height)
1900 linesize = s->linesize << field_based;
1901 uvlinesize = s->uvlinesize << field_based;
1902 ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
1903 dest_y += dest_offset;
1904 //printf("%d %d %d\n", src_x, src_y, dxy);
1906 if(s->flags&CODEC_FLAG_EMU_EDGE){
1907 if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 16 > s->h_edge_pos
1908 || src_y + (motion_y&3) + h > v_edge_pos){
1909 ff_emulated_edge_mc(s, ptr - src_offset, s->linesize, 17, 17+field_based,
1910 src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
1911 ptr= s->edge_emu_buffer + src_offset;
1916 qpix_op[0][dxy](dest_y, ptr, linesize);
1918 //damn interlaced mode
1919 //FIXME boundary mirroring is not exactly correct here
1920 qpix_op[1][dxy](dest_y , ptr , linesize);
1921 qpix_op[1][dxy](dest_y+8, ptr+8, linesize);
1924 if(s->flags&CODEC_FLAG_GRAY) return;
1929 }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA2){
1930 static const int rtab[8]= {0,0,1,1,0,0,0,1};
1931 mx= (motion_x>>1) + rtab[motion_x&7];
1932 my= (motion_y>>1) + rtab[motion_y&7];
1933 }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){
1934 mx= (motion_x>>1)|(motion_x&1);
1935 my= (motion_y>>1)|(motion_y&1);
1943 dxy= (mx&1) | ((my&1)<<1);
1947 src_x = s->mb_x * 8 + mx;
1948 src_y = s->mb_y * (8 >> field_based) + my;
1949 src_x = clip(src_x, -8, s->width >> 1);
1950 if (src_x == (s->width >> 1))
1952 src_y = clip(src_y, -8, height >> 1);
1953 if (src_y == (height >> 1))
1956 offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
1957 ptr = ref_picture[1] + offset;
1959 ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based,
1960 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1961 ptr= s->edge_emu_buffer + (src_offset >> 1);
1963 pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1965 ptr = ref_picture[2] + offset;
1967 ff_emulated_edge_mc(s, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based,
1968 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1969 ptr= s->edge_emu_buffer + (src_offset >> 1);
1971 pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1974 inline int ff_h263_round_chroma(int x){
1976 return (h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
1979 return -(h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
1984 * motion compesation of a single macroblock
1986 * @param dest_y luma destination pointer
1987 * @param dest_cb chroma cb/u destination pointer
1988 * @param dest_cr chroma cr/v destination pointer
1989 * @param dir direction (0->forward, 1->backward)
1990 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1991 * @param pic_op halfpel motion compensation function (average or put normally)
1992 * @param pic_op qpel motion compensation function (average or put normally)
1993 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1995 static inline void MPV_motion(MpegEncContext *s,
1996 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1997 int dir, uint8_t **ref_picture,
1998 op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16])
2000 int dxy, offset, mx, my, src_x, src_y, motion_x, motion_y;
2002 uint8_t *ptr, *dest;
2008 switch(s->mv_type) {
2012 if(s->real_sprite_warping_points==1){
2013 gmc1_motion(s, dest_y, dest_cb, dest_cr, 0,
2016 gmc_motion(s, dest_y, dest_cb, dest_cr, 0,
2019 }else if(s->quarter_sample){
2020 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
2023 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2025 ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
2026 ref_picture, pix_op,
2027 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2031 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2034 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2040 if(s->quarter_sample){
2042 motion_x = s->mv[dir][i][0];
2043 motion_y = s->mv[dir][i][1];
2045 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
2046 src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
2047 src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
2049 /* WARNING: do no forget half pels */
2050 src_x = clip(src_x, -16, s->width);
2051 if (src_x == s->width)
2053 src_y = clip(src_y, -16, s->height);
2054 if (src_y == s->height)
2057 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
2058 if(s->flags&CODEC_FLAG_EMU_EDGE){
2059 if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 8 > s->h_edge_pos
2060 || src_y + (motion_y&3) + 8 > s->v_edge_pos){
2061 ff_emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
2062 ptr= s->edge_emu_buffer;
2065 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
2066 qpix_op[1][dxy](dest, ptr, s->linesize);
2068 mx += s->mv[dir][i][0]/2;
2069 my += s->mv[dir][i][1]/2;
2073 motion_x = s->mv[dir][i][0];
2074 motion_y = s->mv[dir][i][1];
2076 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
2077 src_x = mb_x * 16 + (motion_x >> 1) + (i & 1) * 8;
2078 src_y = mb_y * 16 + (motion_y >> 1) + (i >>1) * 8;
2080 /* WARNING: do no forget half pels */
2081 src_x = clip(src_x, -16, s->width);
2082 if (src_x == s->width)
2084 src_y = clip(src_y, -16, s->height);
2085 if (src_y == s->height)
2088 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
2089 if(s->flags&CODEC_FLAG_EMU_EDGE){
2090 if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 8 > s->h_edge_pos
2091 || src_y + (motion_y&1) + 8 > s->v_edge_pos){
2092 ff_emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
2093 ptr= s->edge_emu_buffer;
2096 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
2097 pix_op[1][dxy](dest, ptr, s->linesize, 8);
2099 mx += s->mv[dir][i][0];
2100 my += s->mv[dir][i][1];
2104 if(s->flags&CODEC_FLAG_GRAY) break;
2105 /* In case of 8X8, we construct a single chroma motion vector
2106 with a special rounding */
2107 mx= ff_h263_round_chroma(mx);
2108 my= ff_h263_round_chroma(my);
2109 dxy = ((my & 1) << 1) | (mx & 1);
2113 src_x = mb_x * 8 + mx;
2114 src_y = mb_y * 8 + my;
2115 src_x = clip(src_x, -8, s->width/2);
2116 if (src_x == s->width/2)
2118 src_y = clip(src_y, -8, s->height/2);
2119 if (src_y == s->height/2)
2122 offset = (src_y * (s->uvlinesize)) + src_x;
2123 ptr = ref_picture[1] + offset;
2124 if(s->flags&CODEC_FLAG_EMU_EDGE){
2125 if(src_x<0 || src_y<0 || src_x + (dxy &1) + 8 > s->h_edge_pos>>1
2126 || src_y + (dxy>>1) + 8 > s->v_edge_pos>>1){
2127 ff_emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2128 ptr= s->edge_emu_buffer;
2132 pix_op[1][dxy](dest_cb, ptr, s->uvlinesize, 8);
2134 ptr = ref_picture[2] + offset;
2136 ff_emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2137 ptr= s->edge_emu_buffer;
2139 pix_op[1][dxy](dest_cr, ptr, s->uvlinesize, 8);
2142 if (s->picture_structure == PICT_FRAME) {
2143 if(s->quarter_sample){
2145 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
2146 ref_picture, s->field_select[dir][0] ? s->linesize : 0,
2148 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2150 qpel_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2151 ref_picture, s->field_select[dir][1] ? s->linesize : 0,
2153 s->mv[dir][1][0], s->mv[dir][1][1], 8);
2156 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2157 ref_picture, s->field_select[dir][0] ? s->linesize : 0,
2159 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2161 mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2162 ref_picture, s->field_select[dir][1] ? s->linesize : 0,
2164 s->mv[dir][1][0], s->mv[dir][1][1], 8);
2168 if(s->picture_structure == s->field_select[dir][0] + 1 || s->pict_type == B_TYPE || s->first_field){
2169 offset= s->field_select[dir][0] ? s->linesize : 0;
2171 ref_picture= s->current_picture.data;
2172 offset= s->field_select[dir][0] ? s->linesize : -s->linesize;
2175 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2176 ref_picture, offset,
2178 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2185 /* put block[] to dest[] */
2186 static inline void put_dct(MpegEncContext *s,
2187 DCTELEM *block, int i, uint8_t *dest, int line_size)
2189 s->dct_unquantize(s, block, i, s->qscale);
2190 s->dsp.idct_put (dest, line_size, block);
2193 /* add block[] to dest[] */
2194 static inline void add_dct(MpegEncContext *s,
2195 DCTELEM *block, int i, uint8_t *dest, int line_size)
2197 if (s->block_last_index[i] >= 0) {
2198 s->dsp.idct_add (dest, line_size, block);
2202 static inline void add_dequant_dct(MpegEncContext *s,
2203 DCTELEM *block, int i, uint8_t *dest, int line_size)
2205 if (s->block_last_index[i] >= 0) {
2206 s->dct_unquantize(s, block, i, s->qscale);
2208 s->dsp.idct_add (dest, line_size, block);
2213 * cleans dc, ac, coded_block for the current non intra MB
2215 void ff_clean_intra_table_entries(MpegEncContext *s)
2217 int wrap = s->block_wrap[0];
2218 int xy = s->block_index[0];
2221 s->dc_val[0][xy + 1 ] =
2222 s->dc_val[0][xy + wrap] =
2223 s->dc_val[0][xy + 1 + wrap] = 1024;
2225 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2226 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2227 if (s->msmpeg4_version>=3) {
2228 s->coded_block[xy ] =
2229 s->coded_block[xy + 1 ] =
2230 s->coded_block[xy + wrap] =
2231 s->coded_block[xy + 1 + wrap] = 0;
2234 wrap = s->block_wrap[4];
2235 xy = s->mb_x + 1 + (s->mb_y + 1) * wrap;
2237 s->dc_val[2][xy] = 1024;
2239 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2240 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2242 s->mbintra_table[s->mb_x + s->mb_y*s->mb_stride]= 0;
2245 /* generic function called after a macroblock has been parsed by the
2246 decoder or after it has been encoded by the encoder.
2248 Important variables used:
2249 s->mb_intra : true if intra macroblock
2250 s->mv_dir : motion vector direction
2251 s->mv_type : motion vector type
2252 s->mv : motion vector
2253 s->interlaced_dct : true if interlaced dct used (mpeg2)
2255 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
2258 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2263 s->current_picture.qscale_table[mb_xy]= s->qscale;
2265 /* update DC predictors for P macroblocks */
2267 if (s->h263_pred || s->h263_aic) {
2268 if(s->mbintra_table[mb_xy])
2269 ff_clean_intra_table_entries(s);
2273 s->last_dc[2] = 128 << s->intra_dc_precision;
2276 else if (s->h263_pred || s->h263_aic)
2277 s->mbintra_table[mb_xy]=1;
2279 /* update motion predictor, not for B-frames as they need the motion_val from the last P/S-Frame */
2280 if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE) { //FIXME move into h263.c if possible, format specific stuff shouldnt be here
2281 //FIXME a lot of thet is only needed for !low_delay
2282 const int wrap = s->block_wrap[0];
2283 const int xy = s->block_index[0];
2284 if(s->mv_type != MV_TYPE_8X8){
2285 int motion_x, motion_y;
2289 } else if (s->mv_type == MV_TYPE_16X16) {
2290 motion_x = s->mv[0][0][0];
2291 motion_y = s->mv[0][0][1];
2292 } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
2294 motion_x = s->mv[0][0][0] + s->mv[0][1][0];
2295 motion_y = s->mv[0][0][1] + s->mv[0][1][1];
2296 motion_x = (motion_x>>1) | (motion_x&1);
2298 s->field_mv_table[mb_xy][i][0]= s->mv[0][i][0];
2299 s->field_mv_table[mb_xy][i][1]= s->mv[0][i][1];
2300 s->field_select_table[mb_xy][i]= s->field_select[0][i];
2304 /* no update if 8X8 because it has been done during parsing */
2305 s->motion_val[xy][0] = motion_x;
2306 s->motion_val[xy][1] = motion_y;
2307 s->motion_val[xy + 1][0] = motion_x;
2308 s->motion_val[xy + 1][1] = motion_y;
2309 s->motion_val[xy + wrap][0] = motion_x;
2310 s->motion_val[xy + wrap][1] = motion_y;
2311 s->motion_val[xy + 1 + wrap][0] = motion_x;
2312 s->motion_val[xy + 1 + wrap][1] = motion_y;
2315 if(s->encoding){ //FIXME encoding MUST be cleaned up
2316 if (s->mv_type == MV_TYPE_8X8)
2317 s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_8x8;
2319 s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_16x16;
2323 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { //FIXME precalc
2324 uint8_t *dest_y, *dest_cb, *dest_cr;
2325 int dct_linesize, dct_offset;
2326 op_pixels_func (*op_pix)[4];
2327 qpel_mc_func (*op_qpix)[16];
2328 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
2329 const int uvlinesize= s->current_picture.linesize[1];
2331 /* avoid copy if macroblock skipped in last frame too */
2332 if (s->pict_type != B_TYPE) {
2333 s->current_picture.mbskip_table[mb_xy]= s->mb_skiped;
2336 /* skip only during decoding as we might trash the buffers during encoding a bit */
2338 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2339 const int age= s->current_picture.age;
2345 assert(s->pict_type!=I_TYPE);
2347 (*mbskip_ptr) ++; /* indicate that this time we skiped it */
2348 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2350 /* if previous was skipped too, then nothing to do ! */
2351 if (*mbskip_ptr >= age && s->current_picture.reference){
2354 } else if(!s->current_picture.reference){
2355 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2356 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2358 *mbskip_ptr = 0; /* not skipped */
2363 if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME){ //FIXME precalc
2364 dest_y = s->current_picture.data[0] + mb_x * 16;
2365 dest_cb = s->current_picture.data[1] + mb_x * 8;
2366 dest_cr = s->current_picture.data[2] + mb_x * 8;
2368 dest_y = s->current_picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
2369 dest_cb = s->current_picture.data[1] + (mb_y * 8 * uvlinesize) + mb_x * 8;
2370 dest_cr = s->current_picture.data[2] + (mb_y * 8 * uvlinesize) + mb_x * 8;
2373 if (s->interlaced_dct) {
2374 dct_linesize = linesize * 2;
2375 dct_offset = linesize;
2377 dct_linesize = linesize;
2378 dct_offset = linesize * 8;
2382 /* motion handling */
2383 /* decoding or more than one mb_type (MC was allready done otherwise) */
2384 if((!s->encoding) || (s->mb_type[mb_xy]&(s->mb_type[mb_xy]-1))){
2385 if ((!s->no_rounding) || s->pict_type==B_TYPE){
2386 op_pix = s->dsp.put_pixels_tab;
2387 op_qpix= s->dsp.put_qpel_pixels_tab;
2389 op_pix = s->dsp.put_no_rnd_pixels_tab;
2390 op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
2393 if (s->mv_dir & MV_DIR_FORWARD) {
2394 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2395 op_pix = s->dsp.avg_pixels_tab;
2396 op_qpix= s->dsp.avg_qpel_pixels_tab;
2398 if (s->mv_dir & MV_DIR_BACKWARD) {
2399 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2403 /* skip dequant / idct if we are really late ;) */
2404 if(s->hurry_up>1) return;
2406 /* add dct residue */
2407 if(s->encoding || !( s->mpeg2 || s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO
2408 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2409 add_dequant_dct(s, block[0], 0, dest_y, dct_linesize);
2410 add_dequant_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2411 add_dequant_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2412 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2414 if(!(s->flags&CODEC_FLAG_GRAY)){
2415 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize);
2416 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize);
2418 } else if(s->codec_id != CODEC_ID_WMV2){
2419 add_dct(s, block[0], 0, dest_y, dct_linesize);
2420 add_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2421 add_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2422 add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2424 if(!(s->flags&CODEC_FLAG_GRAY)){
2425 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2426 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2431 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2435 /* dct only in intra block */
2436 if(s->encoding || !(s->mpeg2 || s->codec_id==CODEC_ID_MPEG1VIDEO)){
2437 put_dct(s, block[0], 0, dest_y, dct_linesize);
2438 put_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2439 put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2440 put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2442 if(!(s->flags&CODEC_FLAG_GRAY)){
2443 put_dct(s, block[4], 4, dest_cb, uvlinesize);
2444 put_dct(s, block[5], 5, dest_cr, uvlinesize);
2447 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2448 s->dsp.idct_put(dest_y + 8, dct_linesize, block[1]);
2449 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2450 s->dsp.idct_put(dest_y + dct_offset + 8, dct_linesize, block[3]);
2452 if(!(s->flags&CODEC_FLAG_GRAY)){
2453 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2454 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2461 #ifdef CONFIG_ENCODERS
2463 static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
2465 static const char tab[64]=
2477 DCTELEM *block= s->block[n];
2478 const int last_index= s->block_last_index[n];
2483 threshold= -threshold;
2487 /* are all which we could set to zero are allready zero? */
2488 if(last_index<=skip_dc - 1) return;
2490 for(i=0; i<=last_index; i++){
2491 const int j = s->intra_scantable.permutated[i];
2492 const int level = ABS(block[j]);
2494 if(skip_dc && i==0) continue;
2503 if(score >= threshold) return;
2504 for(i=skip_dc; i<=last_index; i++){
2505 const int j = s->intra_scantable.permutated[i];
2508 if(block[0]) s->block_last_index[n]= 0;
2509 else s->block_last_index[n]= -1;
2512 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index)
2515 const int maxlevel= s->max_qcoeff;
2516 const int minlevel= s->min_qcoeff;
2519 i=1; //skip clipping of intra dc
2523 for(;i<=last_index; i++){
2524 const int j= s->intra_scantable.permutated[i];
2525 int level = block[j];
2527 if (level>maxlevel) level=maxlevel;
2528 else if(level<minlevel) level=minlevel;
2535 static int pix_vcmp16x8(uint8_t *s, int stride){ //FIXME move to dsputil & optimize
2540 for(x=0; x<16; x+=4){
2541 score+= ABS(s[x ] - s[x +stride]) + ABS(s[x+1] - s[x+1+stride])
2542 +ABS(s[x+2] - s[x+2+stride]) + ABS(s[x+3] - s[x+3+stride]);
2550 static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move to dsputil & optimize
2555 for(x=0; x<16; x++){
2556 score+= ABS(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
2565 #define SQ(a) ((a)*(a))
2567 static int pix_vcmp16x8(uint8_t *s, int stride){ //FIXME move to dsputil & optimize
2572 for(x=0; x<16; x+=4){
2573 score+= SQ(s[x ] - s[x +stride]) + SQ(s[x+1] - s[x+1+stride])
2574 +SQ(s[x+2] - s[x+2+stride]) + SQ(s[x+3] - s[x+3+stride]);
2582 static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move to dsputil & optimize
2587 for(x=0; x<16; x++){
2588 score+= SQ(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
2599 #endif //CONFIG_ENCODERS
2603 * @param h is the normal height, this will be reduced automatically if needed for the last row
2605 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2606 if ( s->avctx->draw_horiz_band
2607 && (s->last_picture_ptr || s->low_delay) ) {
2608 uint8_t *src_ptr[3];
2610 h= FFMIN(h, s->height - y);
2612 if(s->pict_type==B_TYPE && s->picture_structure == PICT_FRAME)
2615 offset = y * s->linesize;
2617 if(s->pict_type==B_TYPE || s->low_delay){
2618 src_ptr[0] = s->current_picture.data[0] + offset;
2619 src_ptr[1] = s->current_picture.data[1] + (offset >> 2);
2620 src_ptr[2] = s->current_picture.data[2] + (offset >> 2);
2622 src_ptr[0] = s->last_picture.data[0] + offset;
2623 src_ptr[1] = s->last_picture.data[1] + (offset >> 2);
2624 src_ptr[2] = s->last_picture.data[2] + (offset >> 2);
2628 s->avctx->draw_horiz_band(s->avctx, src_ptr, s->linesize,
2633 #ifdef CONFIG_ENCODERS
2635 static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2637 const int mb_x= s->mb_x;
2638 const int mb_y= s->mb_y;
2641 int dct_offset = s->linesize*8; //default for progressive frames
2643 for(i=0; i<6; i++) skip_dct[i]=0;
2645 if(s->adaptive_quant){
2646 s->dquant= s->current_picture.qscale_table[mb_x + mb_y*s->mb_stride] - s->qscale;
2648 if(s->out_format==FMT_H263){
2649 if (s->dquant> 2) s->dquant= 2;
2650 else if(s->dquant<-2) s->dquant=-2;
2653 if(s->codec_id==CODEC_ID_MPEG4){
2655 if(s->mv_dir&MV_DIRECT)
2658 assert(s->dquant==0 || s->mv_type!=MV_TYPE_8X8);
2661 s->qscale+= s->dquant;
2662 s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
2663 s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
2671 wrap_y = s->linesize;
2672 ptr = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
2674 if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
2675 ff_emulated_edge_mc(s, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
2676 ptr= s->edge_emu_buffer;
2680 if(s->flags&CODEC_FLAG_INTERLACED_DCT){
2681 int progressive_score, interlaced_score;
2683 progressive_score= pix_vcmp16x8(ptr, wrap_y ) + pix_vcmp16x8(ptr + wrap_y*8, wrap_y );
2684 interlaced_score = pix_vcmp16x8(ptr, wrap_y*2) + pix_vcmp16x8(ptr + wrap_y , wrap_y*2);
2686 if(progressive_score > interlaced_score + 100){
2687 s->interlaced_dct=1;
2692 s->interlaced_dct=0;
2695 s->dsp.get_pixels(s->block[0], ptr , wrap_y);
2696 s->dsp.get_pixels(s->block[1], ptr + 8, wrap_y);
2697 s->dsp.get_pixels(s->block[2], ptr + dct_offset , wrap_y);
2698 s->dsp.get_pixels(s->block[3], ptr + dct_offset + 8, wrap_y);
2700 if(s->flags&CODEC_FLAG_GRAY){
2704 int wrap_c = s->uvlinesize;
2705 ptr = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
2707 ff_emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2708 ptr= s->edge_emu_buffer;
2710 s->dsp.get_pixels(s->block[4], ptr, wrap_c);
2712 ptr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
2714 ff_emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2715 ptr= s->edge_emu_buffer;
2717 s->dsp.get_pixels(s->block[5], ptr, wrap_c);
2720 op_pixels_func (*op_pix)[4];
2721 qpel_mc_func (*op_qpix)[16];
2722 uint8_t *dest_y, *dest_cb, *dest_cr;
2723 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2727 dest_y = s->current_picture.data[0] + (mb_y * 16 * s->linesize ) + mb_x * 16;
2728 dest_cb = s->current_picture.data[1] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
2729 dest_cr = s->current_picture.data[2] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
2730 wrap_y = s->linesize;
2731 wrap_c = s->uvlinesize;
2732 ptr_y = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
2733 ptr_cb = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
2734 ptr_cr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
2736 if ((!s->no_rounding) || s->pict_type==B_TYPE){
2737 op_pix = s->dsp.put_pixels_tab;
2738 op_qpix= s->dsp.put_qpel_pixels_tab;
2740 op_pix = s->dsp.put_no_rnd_pixels_tab;
2741 op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
2744 if (s->mv_dir & MV_DIR_FORWARD) {
2745 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2746 op_pix = s->dsp.avg_pixels_tab;
2747 op_qpix= s->dsp.avg_qpel_pixels_tab;
2749 if (s->mv_dir & MV_DIR_BACKWARD) {
2750 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2753 if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
2754 ff_emulated_edge_mc(s, ptr_y, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
2755 ptr_y= s->edge_emu_buffer;
2759 if(s->flags&CODEC_FLAG_INTERLACED_DCT){
2760 int progressive_score, interlaced_score;
2762 progressive_score= pix_diff_vcmp16x8(ptr_y , dest_y , wrap_y )
2763 + pix_diff_vcmp16x8(ptr_y + wrap_y*8, dest_y + wrap_y*8, wrap_y );
2764 interlaced_score = pix_diff_vcmp16x8(ptr_y , dest_y , wrap_y*2)
2765 + pix_diff_vcmp16x8(ptr_y + wrap_y , dest_y + wrap_y , wrap_y*2);
2767 if(progressive_score > interlaced_score + 600){
2768 s->interlaced_dct=1;
2773 s->interlaced_dct=0;
2776 s->dsp.diff_pixels(s->block[0], ptr_y , dest_y , wrap_y);
2777 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2778 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset , dest_y + dct_offset , wrap_y);
2779 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y);
2781 if(s->flags&CODEC_FLAG_GRAY){
2786 ff_emulated_edge_mc(s, ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2787 ptr_cb= s->edge_emu_buffer;
2789 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2791 ff_emulated_edge_mc(s, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2792 ptr_cr= s->edge_emu_buffer;
2794 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2796 /* pre quantization */
2797 if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){
2799 if(s->dsp.pix_abs8x8(ptr_y , dest_y , wrap_y) < 20*s->qscale) skip_dct[0]= 1;
2800 if(s->dsp.pix_abs8x8(ptr_y + 8, dest_y + 8, wrap_y) < 20*s->qscale) skip_dct[1]= 1;
2801 if(s->dsp.pix_abs8x8(ptr_y +dct_offset , dest_y +dct_offset , wrap_y) < 20*s->qscale) skip_dct[2]= 1;
2802 if(s->dsp.pix_abs8x8(ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y) < 20*s->qscale) skip_dct[3]= 1;
2803 if(s->dsp.pix_abs8x8(ptr_cb , dest_cb , wrap_c) < 20*s->qscale) skip_dct[4]= 1;
2804 if(s->dsp.pix_abs8x8(ptr_cr , dest_cr , wrap_c) < 20*s->qscale) skip_dct[5]= 1;
2810 if(skip_dct[i]) num++;
2813 if(s->mb_x==0 && s->mb_y==0){
2815 printf("%6d %1d\n", stat[i], i);
2828 adap_parm = ((s->avg_mb_var << 1) + s->mb_var[s->mb_stride*mb_y+mb_x] + 1.0) /
2829 ((s->mb_var[s->mb_stride*mb_y+mb_x] << 1) + s->avg_mb_var + 1.0);
2831 printf("\ntype=%c qscale=%2d adap=%0.2f dquant=%4.2f var=%4d avgvar=%4d",
2832 (s->mb_type[s->mb_stride*mb_y+mb_x] > 0) ? 'I' : 'P',
2833 s->qscale, adap_parm, s->qscale*adap_parm,
2834 s->mb_var[s->mb_stride*mb_y+mb_x], s->avg_mb_var);
2837 /* DCT & quantize */
2838 if(s->out_format==FMT_MJPEG){
2841 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, 8, &overflow);
2842 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
2848 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2849 // FIXME we could decide to change to quantizer instead of clipping
2850 // JS: I don't think that would be a good idea it could lower quality instead
2851 // of improve it. Just INTRADC clipping deserves changes in quantizer
2852 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
2854 s->block_last_index[i]= -1;
2856 if(s->luma_elim_threshold && !s->mb_intra)
2858 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2859 if(s->chroma_elim_threshold && !s->mb_intra)
2861 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2864 if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){
2865 s->block_last_index[4]=
2866 s->block_last_index[5]= 0;
2868 s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale;
2871 /* huffman encode */
2872 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2873 case CODEC_ID_MPEG1VIDEO:
2874 mpeg1_encode_mb(s, s->block, motion_x, motion_y); break;
2876 case CODEC_ID_MPEG4:
2877 mpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
2878 case CODEC_ID_MSMPEG4V2:
2879 case CODEC_ID_MSMPEG4V3:
2881 msmpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
2883 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); break;
2885 case CODEC_ID_H263P:
2887 h263_encode_mb(s, s->block, motion_x, motion_y); break;
2889 case CODEC_ID_MJPEG:
2890 mjpeg_encode_mb(s, s->block); break;
2896 #endif //CONFIG_ENCODERS
2899 * combines the (truncated) bitstream to a complete frame
2900 * @returns -1 if no complete frame could be created
2902 int ff_combine_frame( MpegEncContext *s, int next, uint8_t **buf, int *buf_size){
2903 ParseContext *pc= &s->parse_context;
2907 printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index);
2908 printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
2912 /* copy overreaded byes from last frame into buffer */
2913 for(; pc->overread>0; pc->overread--){
2914 pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
2917 pc->last_index= pc->index;
2919 /* copy into buffer end return */
2920 if(next == END_NOT_FOUND){
2921 pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
2923 memcpy(&pc->buffer[pc->index], *buf, *buf_size);
2924 pc->index += *buf_size;
2929 pc->overread_index= pc->index + next;
2931 /* append to buffer */
2933 pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
2935 memcpy(&pc->buffer[pc->index], *buf, next + FF_INPUT_BUFFER_PADDING_SIZE );
2940 /* store overread bytes */
2941 for(;next < 0; next++){
2942 pc->state = (pc->state<<8) | pc->buffer[pc->last_index + next];
2948 printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index);
2949 printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
2956 #ifdef CONFIG_ENCODERS
2957 void ff_copy_bits(PutBitContext *pb, uint8_t *src, int length)
2959 int bytes= length>>4;
2960 int bits= length&15;
2963 if(length==0) return;
2965 for(i=0; i<bytes; i++) put_bits(pb, 16, be2me_16(((uint16_t*)src)[i]));
2966 put_bits(pb, bits, be2me_16(((uint16_t*)src)[i])>>(16-bits));
2969 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2972 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
2975 d->mb_skip_run= s->mb_skip_run;
2977 d->last_dc[i]= s->last_dc[i];
2980 d->mv_bits= s->mv_bits;
2981 d->i_tex_bits= s->i_tex_bits;
2982 d->p_tex_bits= s->p_tex_bits;
2983 d->i_count= s->i_count;
2984 d->f_count= s->f_count;
2985 d->b_count= s->b_count;
2986 d->skip_count= s->skip_count;
2987 d->misc_bits= s->misc_bits;
2990 d->mb_skiped= s->mb_skiped;
2991 d->qscale= s->qscale;
2994 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2997 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2998 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
3001 d->mb_skip_run= s->mb_skip_run;
3003 d->last_dc[i]= s->last_dc[i];
3006 d->mv_bits= s->mv_bits;
3007 d->i_tex_bits= s->i_tex_bits;
3008 d->p_tex_bits= s->p_tex_bits;
3009 d->i_count= s->i_count;
3010 d->f_count= s->f_count;
3011 d->b_count= s->b_count;
3012 d->skip_count= s->skip_count;
3013 d->misc_bits= s->misc_bits;
3015 d->mb_intra= s->mb_intra;
3016 d->mb_skiped= s->mb_skiped;
3017 d->mv_type= s->mv_type;
3018 d->mv_dir= s->mv_dir;
3020 if(s->data_partitioning){
3022 d->tex_pb= s->tex_pb;
3026 d->block_last_index[i]= s->block_last_index[i];
3027 d->interlaced_dct= s->interlaced_dct;
3028 d->qscale= s->qscale;
3031 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
3032 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
3033 int *dmin, int *next_block, int motion_x, int motion_y)
3037 copy_context_before_encode(s, backup, type);
3039 s->block= s->blocks[*next_block];
3040 s->pb= pb[*next_block];
3041 if(s->data_partitioning){
3042 s->pb2 = pb2 [*next_block];
3043 s->tex_pb= tex_pb[*next_block];
3046 encode_mb(s, motion_x, motion_y);
3048 bits_count= get_bit_count(&s->pb);
3049 if(s->data_partitioning){
3050 bits_count+= get_bit_count(&s->pb2);
3051 bits_count+= get_bit_count(&s->tex_pb);
3054 if(bits_count<*dmin){
3058 copy_context_after_encode(best, s, type);
3062 static inline int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
3063 uint32_t *sq = squareTbl + 256;
3068 return s->dsp.sse[0](NULL, src1, src2, stride);
3069 else if(w==8 && h==8)
3070 return s->dsp.sse[1](NULL, src1, src2, stride);
3074 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
3083 static void encode_picture(MpegEncContext *s, int picture_number)
3085 int mb_x, mb_y, pdif = 0;
3088 MpegEncContext best_s, backup_s;
3089 uint8_t bit_buf[2][3000];
3090 uint8_t bit_buf2[2][3000];
3091 uint8_t bit_buf_tex[2][3000];
3092 PutBitContext pb[2], pb2[2], tex_pb[2];
3095 init_put_bits(&pb [i], bit_buf [i], 3000, NULL, NULL);
3096 init_put_bits(&pb2 [i], bit_buf2 [i], 3000, NULL, NULL);
3097 init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000, NULL, NULL);
3100 s->picture_number = picture_number;
3102 /* Reset the average MB variance */
3103 s->current_picture.mb_var_sum = 0;
3104 s->current_picture.mc_mb_var_sum = 0;
3107 /* we need to initialize some time vars before we can encode b-frames */
3108 // RAL: Condition added for MPEG1VIDEO
3109 if (s->codec_id == CODEC_ID_MPEG1VIDEO || (s->h263_pred && !s->h263_msmpeg4))
3110 ff_set_mpeg4_time(s, s->picture_number);
3113 s->scene_change_score=0;
3115 s->qscale= (int)(s->frame_qscale + 0.5); //FIXME qscale / ... stuff for ME ratedistoration
3117 if(s->pict_type==I_TYPE){
3118 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3119 else s->no_rounding=0;
3120 }else if(s->pict_type!=B_TYPE){
3121 if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
3122 s->no_rounding ^= 1;
3125 /* Estimate motion for every MB */
3126 s->mb_intra=0; //for the rate distoration & bit compare functions
3127 if(s->pict_type != I_TYPE){
3128 if(s->pict_type != B_TYPE){
3129 if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){
3131 s->me.dia_size= s->avctx->pre_dia_size;
3133 for(mb_y=s->mb_height-1; mb_y >=0 ; mb_y--) {
3134 for(mb_x=s->mb_width-1; mb_x >=0 ; mb_x--) {
3137 ff_pre_estimate_p_frame_motion(s, mb_x, mb_y);
3144 s->me.dia_size= s->avctx->dia_size;
3145 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3146 s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
3147 s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
3148 s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
3149 s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
3150 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3153 s->block_index[0]+=2;
3154 s->block_index[1]+=2;
3155 s->block_index[2]+=2;
3156 s->block_index[3]+=2;
3158 /* compute motion vector & mb_type and store in context */
3159 if(s->pict_type==B_TYPE)
3160 ff_estimate_b_frame_motion(s, mb_x, mb_y);
3162 ff_estimate_p_frame_motion(s, mb_x, mb_y);
3165 }else /* if(s->pict_type == I_TYPE) */{
3167 //FIXME do we need to zero them?
3168 memset(s->motion_val[0], 0, sizeof(int16_t)*(s->mb_width*2 + 2)*(s->mb_height*2 + 2)*2);
3169 memset(s->p_mv_table , 0, sizeof(int16_t)*(s->mb_stride)*s->mb_height*2);
3170 memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height);
3172 if(!s->fixed_qscale){
3173 /* finding spatial complexity for I-frame rate control */
3174 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3175 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3178 uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
3180 int sum = s->dsp.pix_sum(pix, s->linesize);
3182 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8;
3184 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
3185 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
3186 s->current_picture.mb_var_sum += varc;
3193 if(s->scene_change_score > 0 && s->pict_type == P_TYPE){
3194 s->pict_type= I_TYPE;
3195 memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height);
3196 //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3200 if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) {
3201 s->f_code= ff_get_best_fcode(s, s->p_mv_table, MB_TYPE_INTER);
3203 ff_fix_long_p_mvs(s);
3206 if(s->pict_type==B_TYPE){
3209 a = ff_get_best_fcode(s, s->b_forw_mv_table, MB_TYPE_FORWARD);
3210 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, MB_TYPE_BIDIR);
3211 s->f_code = FFMAX(a, b);
3213 a = ff_get_best_fcode(s, s->b_back_mv_table, MB_TYPE_BACKWARD);
3214 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, MB_TYPE_BIDIR);
3215 s->b_code = FFMAX(a, b);
3217 ff_fix_long_b_mvs(s, s->b_forw_mv_table, s->f_code, MB_TYPE_FORWARD);
3218 ff_fix_long_b_mvs(s, s->b_back_mv_table, s->b_code, MB_TYPE_BACKWARD);
3219 ff_fix_long_b_mvs(s, s->b_bidir_forw_mv_table, s->f_code, MB_TYPE_BIDIR);
3220 ff_fix_long_b_mvs(s, s->b_bidir_back_mv_table, s->b_code, MB_TYPE_BIDIR);
3224 if (s->fixed_qscale)
3225 s->frame_qscale = s->current_picture.quality;
3227 s->frame_qscale = ff_rate_estimate_qscale(s);
3229 if(s->adaptive_quant){
3231 switch(s->codec_id){
3232 case CODEC_ID_MPEG4:
3233 ff_clean_mpeg4_qscales(s);
3236 case CODEC_ID_H263P:
3237 ff_clean_h263_qscales(s);
3242 s->qscale= s->current_picture.qscale_table[0];
3244 s->qscale= (int)(s->frame_qscale + 0.5);
3246 if (s->out_format == FMT_MJPEG) {
3247 /* for mjpeg, we do include qscale in the matrix */
3248 s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0];
3250 int j= s->dsp.idct_permutation[i];
3252 s->intra_matrix[j] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3254 convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3255 s->q_intra_matrix16_bias, s->intra_matrix, s->intra_quant_bias, 8, 8);
3258 //FIXME var duplication
3259 s->current_picture.key_frame= s->pict_type == I_TYPE;
3260 s->current_picture.pict_type= s->pict_type;
3262 if(s->current_picture.key_frame)
3263 s->picture_in_gop_number=0;
3265 s->last_bits= get_bit_count(&s->pb);
3266 switch(s->out_format) {
3268 mjpeg_picture_header(s);
3272 if (s->codec_id == CODEC_ID_WMV2)
3273 ff_wmv2_encode_picture_header(s, picture_number);
3274 else if (s->h263_msmpeg4)
3275 msmpeg4_encode_picture_header(s, picture_number);
3276 else if (s->h263_pred)
3277 mpeg4_encode_picture_header(s, picture_number);
3278 else if (s->h263_rv10)
3279 rv10_encode_picture_header(s, picture_number);
3281 h263_encode_picture_header(s, picture_number);
3285 mpeg1_encode_picture_header(s, picture_number);
3288 bits= get_bit_count(&s->pb);
3289 s->header_bits= bits - s->last_bits;
3301 /* init last dc values */
3302 /* note: quant matrix value (8) is implied here */
3303 s->last_dc[i] = 128;
3305 s->current_picture_ptr->error[i] = 0;
3308 s->last_mv[0][0][0] = 0;
3309 s->last_mv[0][0][1] = 0;
3310 s->last_mv[1][0][0] = 0;
3311 s->last_mv[1][0][1] = 0;
3316 if (s->codec_id==CODEC_ID_H263 || s->codec_id==CODEC_ID_H263P)
3317 s->gob_index = ff_h263_get_gob_height(s);
3319 if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame)
3320 ff_mpeg4_init_partitions(s);
3325 s->first_slice_line = 1;
3326 s->ptr_lastgob = s->pb.buf;
3327 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3328 s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
3329 s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
3331 s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
3332 s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
3333 s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
3334 s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
3335 s->block_index[4]= s->block_wrap[4]*(mb_y + 1) + s->block_wrap[0]*(s->mb_height*2 + 2);
3336 s->block_index[5]= s->block_wrap[4]*(mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2);
3337 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3338 const int xy= mb_y*s->mb_stride + mb_x;
3339 int mb_type= s->mb_type[xy];
3345 s->block_index[0]+=2;
3346 s->block_index[1]+=2;
3347 s->block_index[2]+=2;
3348 s->block_index[3]+=2;
3349 s->block_index[4]++;
3350 s->block_index[5]++;
3352 /* write gob / video packet header */
3355 int current_packet_size, is_gob_start;
3357 current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob;
3360 if(s->codec_id==CODEC_ID_MPEG4){
3361 if(current_packet_size >= s->rtp_payload_size
3362 && s->mb_y + s->mb_x>0){
3364 if(s->partitioned_frame){
3365 ff_mpeg4_merge_partitions(s);
3366 ff_mpeg4_init_partitions(s);
3368 ff_mpeg4_encode_video_packet_header(s);
3370 if(s->flags&CODEC_FLAG_PASS1){
3371 int bits= get_bit_count(&s->pb);
3372 s->misc_bits+= bits - s->last_bits;
3375 ff_mpeg4_clean_buffers(s);
3378 }else if(s->codec_id==CODEC_ID_MPEG1VIDEO){
3379 if( current_packet_size >= s->rtp_payload_size
3380 && s->mb_y + s->mb_x>0 && s->mb_skip_run==0){
3381 ff_mpeg1_encode_slice_header(s);
3382 ff_mpeg1_clean_buffers(s);
3386 if(current_packet_size >= s->rtp_payload_size
3387 && s->mb_x==0 && s->mb_y>0 && s->mb_y%s->gob_index==0){
3389 h263_encode_gob_header(s, mb_y);
3395 s->ptr_lastgob = pbBufPtr(&s->pb);
3396 s->first_slice_line=1;
3397 s->resync_mb_x=mb_x;
3398 s->resync_mb_y=mb_y;
3403 if( (s->resync_mb_x == s->mb_x)
3404 && s->resync_mb_y+1 == s->mb_y){
3405 s->first_slice_line=0;
3408 if(mb_type & (mb_type-1)){ // more than 1 MB type possible
3410 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3412 copy_context_before_encode(&backup_s, s, -1);
3414 best_s.data_partitioning= s->data_partitioning;
3415 best_s.partitioned_frame= s->partitioned_frame;
3416 if(s->data_partitioning){
3417 backup_s.pb2= s->pb2;
3418 backup_s.tex_pb= s->tex_pb;
3421 if(mb_type&MB_TYPE_INTER){
3422 s->mv_dir = MV_DIR_FORWARD;
3423 s->mv_type = MV_TYPE_16X16;
3425 s->mv[0][0][0] = s->p_mv_table[xy][0];
3426 s->mv[0][0][1] = s->p_mv_table[xy][1];
3427 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER, pb, pb2, tex_pb,
3428 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3430 if(mb_type&MB_TYPE_INTER4V){
3431 s->mv_dir = MV_DIR_FORWARD;
3432 s->mv_type = MV_TYPE_8X8;
3435 s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
3436 s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
3438 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER4V, pb, pb2, tex_pb,
3439 &dmin, &next_block, 0, 0);
3441 if(mb_type&MB_TYPE_FORWARD){
3442 s->mv_dir = MV_DIR_FORWARD;
3443 s->mv_type = MV_TYPE_16X16;
3445 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3446 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3447 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_FORWARD, pb, pb2, tex_pb,
3448 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3450 if(mb_type&MB_TYPE_BACKWARD){
3451 s->mv_dir = MV_DIR_BACKWARD;
3452 s->mv_type = MV_TYPE_16X16;
3454 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3455 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3456 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3457 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3459 if(mb_type&MB_TYPE_BIDIR){
3460 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3461 s->mv_type = MV_TYPE_16X16;
3463 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3464 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3465 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3466 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3467 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BIDIR, pb, pb2, tex_pb,
3468 &dmin, &next_block, 0, 0);
3470 if(mb_type&MB_TYPE_DIRECT){
3471 int mx= s->b_direct_mv_table[xy][0];
3472 int my= s->b_direct_mv_table[xy][1];
3474 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3477 ff_mpeg4_set_direct_mv(s, mx, my);
3479 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_DIRECT, pb, pb2, tex_pb,
3480 &dmin, &next_block, mx, my);
3482 if(mb_type&MB_TYPE_INTRA){
3484 s->mv_type = MV_TYPE_16X16;
3488 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTRA, pb, pb2, tex_pb,
3489 &dmin, &next_block, 0, 0);
3490 /* force cleaning of ac/dc pred stuff if needed ... */
3491 if(s->h263_pred || s->h263_aic)
3492 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3494 copy_context_after_encode(s, &best_s, -1);
3496 pb_bits_count= get_bit_count(&s->pb);
3497 flush_put_bits(&s->pb);
3498 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3501 if(s->data_partitioning){
3502 pb2_bits_count= get_bit_count(&s->pb2);
3503 flush_put_bits(&s->pb2);
3504 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3505 s->pb2= backup_s.pb2;
3507 tex_pb_bits_count= get_bit_count(&s->tex_pb);
3508 flush_put_bits(&s->tex_pb);
3509 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3510 s->tex_pb= backup_s.tex_pb;
3512 s->last_bits= get_bit_count(&s->pb);
3514 int motion_x, motion_y;
3516 int inter_score= s->current_picture.mb_cmp_score[mb_x + mb_y*s->mb_stride];
3518 if(!(s->flags&CODEC_FLAG_HQ) && s->pict_type==P_TYPE){
3519 /* get luma score */
3520 if((s->avctx->mb_cmp&0xFF)==FF_CMP_SSE){
3521 intra_score= (s->current_picture.mb_var[mb_x + mb_y*s->mb_stride]<<8) - 500; //FIXME dont scale it down so we dont have to fix it
3525 int mean= s->current_picture.mb_mean[mb_x + mb_y*s->mb_stride]; //FIXME
3528 dest_y = s->new_picture.data[0] + (mb_y * 16 * s->linesize ) + mb_x * 16;
3530 for(i=0; i<16; i++){
3531 *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 0]) = mean;
3532 *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 4]) = mean;
3533 *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 8]) = mean;
3534 *(uint32_t*)(&s->me.scratchpad[i*s->linesize+12]) = mean;
3538 intra_score= s->dsp.mb_cmp[0](s, s->me.scratchpad, dest_y, s->linesize);
3540 /* printf("intra:%7d inter:%7d var:%7d mc_var.%7d\n", intra_score>>8, inter_score>>8,
3541 s->current_picture.mb_var[mb_x + mb_y*s->mb_stride],
3542 s->current_picture.mc_mb_var[mb_x + mb_y*s->mb_stride]);*/
3545 /* get chroma score */
3546 if(s->avctx->mb_cmp&FF_CMP_CHROMA){
3554 if(s->out_format == FMT_H263){
3555 mean= (s->dc_val[i][mb_x + (mb_y+1)*(s->mb_width+2)] + 4)>>3; //FIXME not exact but simple ;)
3557 mean= (s->last_dc[i] + 4)>>3;
3559 dest_c = s->new_picture.data[i] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
3563 *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 0]) = mean;
3564 *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 4]) = mean;
3567 intra_score+= s->dsp.mb_cmp[1](s, s->me.scratchpad, dest_c, s->uvlinesize);
3572 switch(s->avctx->mb_cmp&0xFF){
3575 intra_score+= 32*s->qscale;
3578 intra_score+= 24*s->qscale*s->qscale;
3581 intra_score+= 96*s->qscale;
3584 intra_score+= 48*s->qscale;
3591 intra_score+= (s->qscale*s->qscale*109*8 + 64)>>7;
3595 if(intra_score < inter_score)
3596 mb_type= MB_TYPE_INTRA;
3599 s->mv_type=MV_TYPE_16X16;
3600 // only one MB-Type possible
3606 motion_x= s->mv[0][0][0] = 0;
3607 motion_y= s->mv[0][0][1] = 0;
3610 s->mv_dir = MV_DIR_FORWARD;
3612 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3613 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3615 case MB_TYPE_INTER4V:
3616 s->mv_dir = MV_DIR_FORWARD;
3617 s->mv_type = MV_TYPE_8X8;
3620 s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
3621 s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
3623 motion_x= motion_y= 0;
3625 case MB_TYPE_DIRECT:
3626 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3628 motion_x=s->b_direct_mv_table[xy][0];
3629 motion_y=s->b_direct_mv_table[xy][1];
3631 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3635 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3639 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3640 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3641 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3642 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3644 case MB_TYPE_BACKWARD:
3645 s->mv_dir = MV_DIR_BACKWARD;
3647 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3648 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3650 case MB_TYPE_FORWARD:
3651 s->mv_dir = MV_DIR_FORWARD;
3653 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3654 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3655 // printf(" %d %d ", motion_x, motion_y);
3658 motion_x=motion_y=0; //gcc warning fix
3659 printf("illegal MB type\n");
3662 encode_mb(s, motion_x, motion_y);
3664 // RAL: Update last macrobloc type
3665 s->last_mv_dir = s->mv_dir;
3668 /* clean the MV table in IPS frames for direct mode in B frames */
3669 if(s->mb_intra /* && I,P,S_TYPE */){
3670 s->p_mv_table[xy][0]=0;
3671 s->p_mv_table[xy][1]=0;
3674 MPV_decode_mb(s, s->block);
3676 if(s->flags&CODEC_FLAG_PSNR){
3680 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3681 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3683 s->current_picture_ptr->error[0] += sse(
3685 s->new_picture .data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3686 s->current_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3688 s->current_picture_ptr->error[1] += sse(
3690 s->new_picture .data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
3691 s->current_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
3692 w>>1, h>>1, s->uvlinesize);
3693 s->current_picture_ptr->error[2] += sse(
3695 s->new_picture .data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
3696 s->current_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
3697 w>>1, h>>1, s->uvlinesize);
3699 //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, get_bit_count(&s->pb));
3705 if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame)
3706 ff_mpeg4_merge_partitions(s);
3708 if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE)
3709 msmpeg4_encode_ext_header(s);
3711 if(s->codec_id==CODEC_ID_MPEG4)
3712 ff_mpeg4_stuffing(&s->pb);
3715 //if (s->gob_number)
3716 // fprintf(stderr,"\nNumber of GOB: %d", s->gob_number);
3718 /* Send the last GOB if RTP */
3720 flush_put_bits(&s->pb);
3721 pdif = pbBufPtr(&s->pb) - s->ptr_lastgob;
3722 /* Call the RTP callback to send the last GOB */
3723 if (s->rtp_callback)
3724 s->rtp_callback(s->ptr_lastgob, pdif, s->gob_number);
3725 s->ptr_lastgob = pbBufPtr(&s->pb);
3726 //fprintf(stderr,"\nGOB: %2d size: %d (last)", s->gob_number, pdif);
3730 static int dct_quantize_trellis_c(MpegEncContext *s,
3731 DCTELEM *block, int n,
3732 int qscale, int *overflow){
3734 const uint8_t *scantable= s->intra_scantable.scantable;
3736 unsigned int threshold1, threshold2;
3746 int coeff_count[64];
3747 int lambda, qmul, qadd, start_i, last_non_zero, i;
3748 const int esc_length= s->ac_esc_length;
3750 uint8_t * last_length;
3754 s->dsp.fdct (block);
3757 qadd= ((qscale-1)|1)*8;
3768 /* For AIC we skip quant/dequant of INTRADC */
3773 /* note: block[0] is assumed to be positive */
3774 block[0] = (block[0] + (q >> 1)) / q;
3777 qmat = s->q_intra_matrix[qscale];
3778 if(s->mpeg_quant || s->codec_id== CODEC_ID_MPEG1VIDEO)
3779 bias= 1<<(QMAT_SHIFT-1);
3780 length = s->intra_ac_vlc_length;
3781 last_length= s->intra_ac_vlc_last_length;
3785 qmat = s->q_inter_matrix[qscale];
3786 length = s->inter_ac_vlc_length;
3787 last_length= s->inter_ac_vlc_last_length;
3790 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3791 threshold2= (threshold1<<1);
3793 for(i=start_i; i<64; i++) {
3794 const int j = scantable[i];
3795 const int k= i-start_i;
3796 int level = block[j];
3797 level = level * qmat[j];
3799 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3800 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3801 if(((unsigned)(level+threshold1))>threshold2){
3803 level= (bias + level)>>QMAT_SHIFT;
3805 coeff[1][k]= level-1;
3806 // coeff[2][k]= level-2;
3808 level= (bias - level)>>QMAT_SHIFT;
3809 coeff[0][k]= -level;
3810 coeff[1][k]= -level+1;
3811 // coeff[2][k]= -level+2;
3813 coeff_count[k]= FFMIN(level, 2);
3817 coeff[0][k]= (level>>31)|1;
3822 *overflow= s->max_qcoeff < max; //overflow might have happend
3824 if(last_non_zero < start_i){
3825 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
3826 return last_non_zero;
3829 lambda= (qscale*qscale*64*105 + 64)>>7; //FIXME finetune
3832 for(i=0; i<=last_non_zero - start_i; i++){
3833 int level_index, run, j;
3834 const int dct_coeff= block[ scantable[i + start_i] ];
3835 const int zero_distoration= dct_coeff*dct_coeff;
3836 int best_score=256*256*256*120;
3838 last_score += zero_distoration;
3839 for(level_index=0; level_index < coeff_count[i]; level_index++){
3841 int level= coeff[level_index][i];
3846 if(s->out_format == FMT_H263){
3848 unquant_coeff= level*qmul + qadd;
3850 unquant_coeff= level*qmul - qadd;
3853 j= s->dsp.idct_permutation[ scantable[i + start_i] ]; //FIXME optimize
3856 unquant_coeff = (int)((-level) * qscale * s->intra_matrix[j]) >> 3;
3857 unquant_coeff = -((unquant_coeff - 1) | 1);
3859 unquant_coeff = (int)( level * qscale * s->intra_matrix[j]) >> 3;
3860 unquant_coeff = (unquant_coeff - 1) | 1;
3864 unquant_coeff = ((((-level) << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3865 unquant_coeff = -((unquant_coeff - 1) | 1);
3867 unquant_coeff = ((( level << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3868 unquant_coeff = (unquant_coeff - 1) | 1;
3874 distoration= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff);
3876 if((level&(~127)) == 0){
3877 for(run=0; run<=i - left_limit; run++){
3878 int score= distoration + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3879 score += score_tab[i-run];
3881 if(score < best_score){
3883 score_tab[i+1]= score;
3885 level_tab[i+1]= level-64;
3889 if(s->out_format == FMT_H263){
3890 for(run=0; run<=i - left_limit; run++){
3891 int score= distoration + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3892 score += score_tab[i-run];
3893 if(score < last_score){
3896 last_level= level-64;
3902 distoration += esc_length*lambda;
3903 for(run=0; run<=i - left_limit; run++){
3904 int score= distoration + score_tab[i-run];
3906 if(score < best_score){
3908 score_tab[i+1]= score;
3910 level_tab[i+1]= level-64;
3914 if(s->out_format == FMT_H263){
3915 for(run=0; run<=i - left_limit; run++){
3916 int score= distoration + score_tab[i-run];
3917 if(score < last_score){
3920 last_level= level-64;
3928 for(j=left_limit; j<=i; j++){
3929 score_tab[j] += zero_distoration;
3931 score_limit+= zero_distoration;
3932 if(score_tab[i+1] < score_limit)
3933 score_limit= score_tab[i+1];
3935 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3936 while(score_tab[ left_limit ] > score_limit + lambda) left_limit++;
3939 //FIXME add some cbp penalty
3941 if(s->out_format != FMT_H263){
3942 last_score= 256*256*256*120;
3943 for(i= left_limit; i<=last_non_zero - start_i + 1; i++){
3944 int score= score_tab[i];
3945 if(i) score += lambda*2; //FIXME exacter?
3947 if(score < last_score){
3950 last_level= level_tab[i];
3951 last_run= run_tab[i];
3956 last_non_zero= last_i - 1 + start_i;
3957 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
3959 if(last_non_zero < start_i)
3960 return last_non_zero;
3964 //FIXME use permutated scantable
3965 block[ s->dsp.idct_permutation[ scantable[last_non_zero] ] ]= last_level;
3968 for(;i>0 ; i -= run_tab[i] + 1){
3969 const int j= s->dsp.idct_permutation[ scantable[i - 1 + start_i] ];
3971 block[j]= level_tab[i];
3975 return last_non_zero;
3978 static int dct_quantize_c(MpegEncContext *s,
3979 DCTELEM *block, int n,
3980 int qscale, int *overflow)
3982 int i, j, level, last_non_zero, q;
3984 const uint8_t *scantable= s->intra_scantable.scantable;
3987 unsigned int threshold1, threshold2;
3989 s->dsp.fdct (block);
3999 /* For AIC we skip quant/dequant of INTRADC */
4002 /* note: block[0] is assumed to be positive */
4003 block[0] = (block[0] + (q >> 1)) / q;
4006 qmat = s->q_intra_matrix[qscale];
4007 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4011 qmat = s->q_inter_matrix[qscale];
4012 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4014 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4015 threshold2= (threshold1<<1);
4020 level = level * qmat[j];
4022 // if( bias+level >= (1<<QMAT_SHIFT)
4023 // || bias-level >= (1<<QMAT_SHIFT)){
4024 if(((unsigned)(level+threshold1))>threshold2){
4026 level= (bias + level)>>QMAT_SHIFT;
4029 level= (bias - level)>>QMAT_SHIFT;
4038 *overflow= s->max_qcoeff < max; //overflow might have happend
4040 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4041 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4042 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4044 return last_non_zero;
4047 #endif //CONFIG_ENCODERS
4049 static void dct_unquantize_mpeg1_c(MpegEncContext *s,
4050 DCTELEM *block, int n, int qscale)
4052 int i, level, nCoeffs;
4053 const uint16_t *quant_matrix;
4055 nCoeffs= s->block_last_index[n];
4059 block[0] = block[0] * s->y_dc_scale;
4061 block[0] = block[0] * s->c_dc_scale;
4062 /* XXX: only mpeg1 */
4063 quant_matrix = s->intra_matrix;
4064 for(i=1;i<=nCoeffs;i++) {
4065 int j= s->intra_scantable.permutated[i];
4070 level = (int)(level * qscale * quant_matrix[j]) >> 3;
4071 level = (level - 1) | 1;
4074 level = (int)(level * qscale * quant_matrix[j]) >> 3;
4075 level = (level - 1) | 1;
4078 if (level < -2048 || level > 2047)
4079 fprintf(stderr, "unquant error %d %d\n", i, level);
4086 quant_matrix = s->inter_matrix;
4087 for(;i<=nCoeffs;i++) {
4088 int j= s->intra_scantable.permutated[i];
4093 level = (((level << 1) + 1) * qscale *
4094 ((int) (quant_matrix[j]))) >> 4;
4095 level = (level - 1) | 1;
4098 level = (((level << 1) + 1) * qscale *
4099 ((int) (quant_matrix[j]))) >> 4;
4100 level = (level - 1) | 1;
4103 if (level < -2048 || level > 2047)
4104 fprintf(stderr, "unquant error %d %d\n", i, level);
4112 static void dct_unquantize_mpeg2_c(MpegEncContext *s,
4113 DCTELEM *block, int n, int qscale)
4115 int i, level, nCoeffs;
4116 const uint16_t *quant_matrix;
4118 if(s->alternate_scan) nCoeffs= 63;
4119 else nCoeffs= s->block_last_index[n];
4123 block[0] = block[0] * s->y_dc_scale;
4125 block[0] = block[0] * s->c_dc_scale;
4126 quant_matrix = s->intra_matrix;
4127 for(i=1;i<=nCoeffs;i++) {
4128 int j= s->intra_scantable.permutated[i];
4133 level = (int)(level * qscale * quant_matrix[j]) >> 3;
4136 level = (int)(level * qscale * quant_matrix[j]) >> 3;
4139 if (level < -2048 || level > 2047)
4140 fprintf(stderr, "unquant error %d %d\n", i, level);
4148 quant_matrix = s->inter_matrix;
4149 for(;i<=nCoeffs;i++) {
4150 int j= s->intra_scantable.permutated[i];
4155 level = (((level << 1) + 1) * qscale *
4156 ((int) (quant_matrix[j]))) >> 4;
4159 level = (((level << 1) + 1) * qscale *
4160 ((int) (quant_matrix[j]))) >> 4;
4163 if (level < -2048 || level > 2047)
4164 fprintf(stderr, "unquant error %d %d\n", i, level);
4175 static void dct_unquantize_h263_c(MpegEncContext *s,
4176 DCTELEM *block, int n, int qscale)
4178 int i, level, qmul, qadd;
4181 assert(s->block_last_index[n]>=0);
4183 qadd = (qscale - 1) | 1;
4189 block[0] = block[0] * s->y_dc_scale;
4191 block[0] = block[0] * s->c_dc_scale;
4195 nCoeffs= 63; //does not allways use zigzag table
4198 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
4201 for(;i<=nCoeffs;i++) {
4205 level = level * qmul - qadd;
4207 level = level * qmul + qadd;
4210 if (level < -2048 || level > 2047)
4211 fprintf(stderr, "unquant error %d %d\n", i, level);
4219 static const AVOption mpeg4_options[] =
4221 AVOPTION_CODEC_INT("bitrate", "desired video bitrate", bit_rate, 4, 240000000, 800000),
4222 AVOPTION_CODEC_FLAG("vhq", "very high quality", flags, CODEC_FLAG_HQ, 0),
4223 AVOPTION_CODEC_INT("ratetol", "number of bits the bitstream is allowed to diverge from the reference"
4224 "the reference can be CBR (for CBR pass1) or VBR (for pass2)",
4225 bit_rate_tolerance, 4, 240000000, 8000),
4226 AVOPTION_CODEC_INT("qmin", "minimum quantizer", qmin, 1, 31, 2),
4227 AVOPTION_CODEC_INT("qmax", "maximum quantizer", qmax, 1, 31, 31),
4228 AVOPTION_CODEC_STRING("rc_eq", "rate control equation",
4229 rc_eq, "tex^qComp,option1,options2", 0),
4230 AVOPTION_CODEC_INT("rc_minrate", "rate control minimum bitrate",
4231 rc_min_rate, 4, 24000000, 0),
4232 AVOPTION_CODEC_INT("rc_maxrate", "rate control maximum bitrate",
4233 rc_max_rate, 4, 24000000, 0),
4234 AVOPTION_CODEC_DOUBLE("rc_buf_aggresivity", "rate control buffer aggresivity",
4235 rc_buffer_aggressivity, 4, 24000000, 0),
4236 AVOPTION_CODEC_DOUBLE("rc_initial_cplx", "initial complexity for pass1 ratecontrol",
4237 rc_initial_cplx, 0., 9999999., 0),
4238 AVOPTION_CODEC_DOUBLE("i_quant_factor", "qscale factor between p and i frames",
4239 i_quant_factor, 0., 0., 0),
4240 AVOPTION_CODEC_DOUBLE("i_quant_offset", "qscale offset between p and i frames",
4241 i_quant_factor, -999999., 999999., 0),
4242 AVOPTION_CODEC_INT("dct_algo", "dct alghorithm",
4243 dct_algo, 0, 5, 0), // fixme - "Auto,FastInt,Int,MMX,MLib,Altivec"
4244 AVOPTION_CODEC_DOUBLE("lumi_masking", "luminance masking",
4245 lumi_masking, 0., 999999., 0),
4246 AVOPTION_CODEC_DOUBLE("temporal_cplx_masking", "temporary complexity masking",
4247 temporal_cplx_masking, 0., 999999., 0),
4248 AVOPTION_CODEC_DOUBLE("spatial_cplx_masking", "spatial complexity masking",
4249 spatial_cplx_masking, 0., 999999., 0),
4250 AVOPTION_CODEC_DOUBLE("p_masking", "p block masking",
4251 p_masking, 0., 999999., 0),
4252 AVOPTION_CODEC_DOUBLE("dark_masking", "darkness masking",
4253 dark_masking, 0., 999999., 0),
4254 AVOPTION_CODEC_INT("idct_algo", "idct alghorithm",
4255 idct_algo, 0, 8, 0), // fixme - "Auto,Int,Simple,SimpleMMX,LibMPEG2MMX,PS2,MLib,ARM,Altivec"
4257 AVOPTION_CODEC_INT("mb_qmin", "minimum MB quantizer",
4259 AVOPTION_CODEC_INT("mb_qmax", "maximum MB quantizer",
4262 AVOPTION_CODEC_INT("me_cmp", "ME compare function",
4263 me_cmp, 0, 24000000, 0),
4264 AVOPTION_CODEC_INT("me_sub_cmp", "subpixel ME compare function",
4265 me_sub_cmp, 0, 24000000, 0),
4268 AVOPTION_CODEC_INT("dia_size", "ME diamond size & shape",
4269 dia_size, 0, 24000000, 0),
4270 AVOPTION_CODEC_INT("last_predictor_count", "amount of previous MV predictors",
4271 last_predictor_count, 0, 24000000, 0),
4273 AVOPTION_CODEC_INT("pre_me", "pre pass for ME",
4274 pre_me, 0, 24000000, 0),
4275 AVOPTION_CODEC_INT("me_pre_cmp", "ME pre pass compare function",
4276 me_pre_cmp, 0, 24000000, 0),
4278 AVOPTION_CODEC_INT("me_range", "maximum ME search range",
4279 me_range, 0, 24000000, 0),
4280 AVOPTION_CODEC_INT("pre_dia_size", "ME pre pass diamod size & shape",
4281 pre_dia_size, 0, 24000000, 0),
4282 AVOPTION_CODEC_INT("me_subpel_quality", "subpel ME quality",
4283 me_subpel_quality, 0, 24000000, 0),
4284 AVOPTION_CODEC_INT("me_range", "maximum ME search range",
4285 me_range, 0, 24000000, 0),
4286 AVOPTION_CODEC_FLAG("psnr", "calculate PSNR of compressed frames",
4287 flags, CODEC_FLAG_PSNR, 0),
4288 AVOPTION_CODEC_RCOVERRIDE("rc_override", "ratecontrol override (=startframe,endframe,qscale,quality_factor)",
4290 AVOPTION_SUB(avoptions_common),
4294 #ifdef CONFIG_ENCODERS
4296 AVCodec mpeg1video_encoder = {
4299 CODEC_ID_MPEG1VIDEO,
4300 sizeof(MpegEncContext),
4308 AVCodec h263_encoder = {
4312 sizeof(MpegEncContext),
4318 AVCodec h263p_encoder = {
4322 sizeof(MpegEncContext),
4328 AVCodec rv10_encoder = {
4332 sizeof(MpegEncContext),
4338 AVCodec mpeg4_encoder = {
4342 sizeof(MpegEncContext),
4346 .options = mpeg4_options,
4349 AVCodec msmpeg4v1_encoder = {
4353 sizeof(MpegEncContext),
4357 .options = mpeg4_options,
4360 AVCodec msmpeg4v2_encoder = {
4364 sizeof(MpegEncContext),
4368 .options = mpeg4_options,
4371 AVCodec msmpeg4v3_encoder = {
4375 sizeof(MpegEncContext),
4379 .options = mpeg4_options,
4382 AVCodec wmv1_encoder = {
4386 sizeof(MpegEncContext),
4390 .options = mpeg4_options,
4395 AVCodec mjpeg_encoder = {
4399 sizeof(MpegEncContext),
4405 #endif //CONFIG_ENCODERS