2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard.
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
24 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "mpegvideo.h"
34 #include "fastmemcpy.h"
40 #ifdef CONFIG_ENCODERS
41 static void encode_picture(MpegEncContext *s, int picture_number);
42 #endif //CONFIG_ENCODERS
43 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
44 DCTELEM *block, int n, int qscale);
45 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
46 DCTELEM *block, int n, int qscale);
47 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w);
56 #ifdef CONFIG_ENCODERS
57 static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
58 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
59 static int sse_mb(MpegEncContext *s);
60 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block);
61 #endif //CONFIG_ENCODERS
64 extern int XVMC_field_start(MpegEncContext*s, AVCodecContext *avctx);
65 extern void XVMC_field_end(MpegEncContext *s);
66 extern void XVMC_decode_mb(MpegEncContext *s);
69 void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_edges_c;
72 /* enable all paranoid tests for rounding, overflows, etc... */
78 /* for jpeg fast DCT */
81 static const uint16_t aanscales[64] = {
82 /* precomputed values scaled up by 14 bits */
83 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
84 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270,
85 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906,
86 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315,
87 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
88 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552,
89 8867 , 12299, 11585, 10426, 8867, 6967, 4799, 2446,
90 4520 , 6270, 5906, 5315, 4520, 3552, 2446, 1247
93 static const uint8_t h263_chroma_roundtab[16] = {
94 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
95 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
98 static const uint8_t ff_default_chroma_qscale_table[32]={
99 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
100 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
103 #ifdef CONFIG_ENCODERS
104 static uint8_t (*default_mv_penalty)[MAX_MV*2+1]=NULL;
105 static uint8_t default_fcode_tab[MAX_MV*2+1];
107 enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1};
109 static void convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64],
110 const uint16_t *quant_matrix, int bias, int qmin, int qmax)
114 for(qscale=qmin; qscale<=qmax; qscale++){
116 if (dsp->fdct == ff_jpeg_fdct_islow
117 #ifdef FAAN_POSTSCALE
118 || dsp->fdct == ff_faandct
122 const int j= dsp->idct_permutation[i];
123 /* 16 <= qscale * quant_matrix[i] <= 7905 */
124 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
125 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
126 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
128 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) /
129 (qscale * quant_matrix[j]));
131 } else if (dsp->fdct == fdct_ifast
132 #ifndef FAAN_POSTSCALE
133 || dsp->fdct == ff_faandct
137 const int j= dsp->idct_permutation[i];
138 /* 16 <= qscale * quant_matrix[i] <= 7905 */
139 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
140 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
141 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
143 qmat[qscale][i] = (int)((uint64_t_C(1) << (QMAT_SHIFT + 14)) /
144 (aanscales[i] * qscale * quant_matrix[j]));
148 const int j= dsp->idct_permutation[i];
149 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
150 So 16 <= qscale * quant_matrix[i] <= 7905
151 so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
152 so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67
154 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j]));
155 // qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
156 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]);
158 if(qmat16[qscale][0][i]==0 || qmat16[qscale][0][i]==128*256) qmat16[qscale][0][i]=128*256-1;
159 qmat16[qscale][1][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][0][i]);
165 static inline void update_qscale(MpegEncContext *s){
166 s->qscale= (s->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
167 s->qscale= clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
169 s->lambda2= (s->lambda*s->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
171 #endif //CONFIG_ENCODERS
173 void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable){
177 st->scantable= src_scantable;
181 j = src_scantable[i];
182 st->permutated[i] = permutation[j];
191 j = st->permutated[i];
193 st->raster_end[i]= end;
197 #ifdef CONFIG_ENCODERS
198 void ff_write_quant_matrix(PutBitContext *pb, int16_t *matrix){
204 put_bits(pb, 8, matrix[ ff_zigzag_direct[i] ]);
209 #endif //CONFIG_ENCODERS
211 /* init common dct for both encoder and decoder */
212 int DCT_common_init(MpegEncContext *s)
214 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
215 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
216 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
217 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
218 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
219 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
221 #ifdef CONFIG_ENCODERS
222 s->dct_quantize= dct_quantize_c;
223 s->denoise_dct= denoise_dct_c;
227 MPV_common_init_mmx(s);
230 MPV_common_init_axp(s);
233 MPV_common_init_mlib(s);
236 MPV_common_init_mmi(s);
239 MPV_common_init_armv4l(s);
242 MPV_common_init_ppc(s);
245 #ifdef CONFIG_ENCODERS
246 s->fast_dct_quantize= s->dct_quantize;
248 if(s->flags&CODEC_FLAG_TRELLIS_QUANT){
249 s->dct_quantize= dct_quantize_trellis_c; //move before MPV_common_init_*
252 #endif //CONFIG_ENCODERS
254 /* load & permutate scantables
255 note: only wmv uses differnt ones
257 if(s->alternate_scan){
258 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
259 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
261 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
262 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
264 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
265 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
267 s->picture_structure= PICT_FRAME;
272 static void copy_picture(Picture *dst, Picture *src){
274 dst->type= FF_BUFFER_TYPE_COPY;
277 static void copy_picture_attributes(AVFrame *dst, AVFrame *src){
278 dst->pict_type = src->pict_type;
279 dst->quality = src->quality;
280 dst->coded_picture_number = src->coded_picture_number;
281 dst->display_picture_number = src->display_picture_number;
282 // dst->reference = src->reference;
284 dst->interlaced_frame = src->interlaced_frame;
285 dst->top_field_first = src->top_field_first;
289 * allocates a Picture
290 * The pixels are allocated/set by calling get_buffer() if shared=0
292 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
293 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11
294 const int mb_array_size= s->mb_stride*s->mb_height;
295 const int b8_array_size= s->b8_stride*s->mb_height*2;
296 const int b4_array_size= s->b4_stride*s->mb_height*4;
300 assert(pic->data[0]);
301 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
302 pic->type= FF_BUFFER_TYPE_SHARED;
306 assert(!pic->data[0]);
308 r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
310 if(r<0 || !pic->age || !pic->type || !pic->data[0]){
311 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
315 if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
316 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
320 if(pic->linesize[1] != pic->linesize[2]){
321 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride missmatch)\n");
325 s->linesize = pic->linesize[0];
326 s->uvlinesize= pic->linesize[1];
329 if(pic->qscale_table==NULL){
331 CHECKED_ALLOCZ(pic->mb_var , mb_array_size * sizeof(int16_t))
332 CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t))
333 CHECKED_ALLOCZ(pic->mb_mean , mb_array_size * sizeof(int8_t))
336 CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check
337 CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t))
338 CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num * sizeof(int))
339 pic->mb_type= pic->mb_type_base + s->mb_stride+1;
340 if(s->out_format == FMT_H264){
342 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b4_array_size+1) * sizeof(uint16_t))
343 pic->motion_val[i]= pic->motion_val_base[i]+1;
344 CHECKED_ALLOCZ(pic->ref_index[i] , b8_array_size * sizeof(uint8_t))
346 pic->motion_subsample_log2= 2;
347 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
349 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+1) * sizeof(uint16_t)*2) //FIXME
350 pic->motion_val[i]= pic->motion_val_base[i]+1;
352 pic->motion_subsample_log2= 3;
354 pic->qstride= s->mb_stride;
355 CHECKED_ALLOCZ(pic->pan_scan , 1 * sizeof(AVPanScan))
358 //it might be nicer if the application would keep track of these but it would require a API change
359 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
360 s->prev_pict_types[0]= s->pict_type;
361 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE)
362 pic->age= INT_MAX; // skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway
365 fail: //for the CHECKED_ALLOCZ macro
370 * deallocates a picture
372 static void free_picture(MpegEncContext *s, Picture *pic){
375 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
376 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
379 av_freep(&pic->mb_var);
380 av_freep(&pic->mc_mb_var);
381 av_freep(&pic->mb_mean);
382 av_freep(&pic->mbskip_table);
383 av_freep(&pic->qscale_table);
384 av_freep(&pic->mb_type_base);
385 av_freep(&pic->pan_scan);
388 av_freep(&pic->motion_val_base[i]);
389 av_freep(&pic->ref_index[i]);
392 if(pic->type == FF_BUFFER_TYPE_SHARED){
401 /* init common structure for both encoder and decoder */
402 int MPV_common_init(MpegEncContext *s)
404 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
406 dsputil_init(&s->dsp, s->avctx);
409 s->flags= s->avctx->flags;
411 s->mb_width = (s->width + 15) / 16;
412 s->mb_height = (s->height + 15) / 16;
413 s->mb_stride = s->mb_width + 1;
414 s->b8_stride = s->mb_width*2 + 1;
415 s->b4_stride = s->mb_width*4 + 1;
416 mb_array_size= s->mb_height * s->mb_stride;
417 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
419 /* set default edge pos, will be overriden in decode_header if needed */
420 s->h_edge_pos= s->mb_width*16;
421 s->v_edge_pos= s->mb_height*16;
423 s->mb_num = s->mb_width * s->mb_height;
428 s->block_wrap[3]= s->mb_width*2 + 2;
430 s->block_wrap[5]= s->mb_width + 2;
433 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
434 s->chroma_qscale_table= ff_default_chroma_qscale_table;
436 s->progressive_sequence= 1;
437 s->progressive_frame= 1;
438 s->coded_picture_number = 0;
440 y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
441 c_size = (s->mb_width + 2) * (s->mb_height + 2);
442 yc_size = y_size + 2 * c_size;
444 /* convert fourcc to upper case */
445 s->avctx->codec_tag= toupper( s->avctx->codec_tag &0xFF)
446 + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
447 + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
448 + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
450 s->avctx->stream_codec_tag= toupper( s->avctx->stream_codec_tag &0xFF)
451 + (toupper((s->avctx->stream_codec_tag>>8 )&0xFF)<<8 )
452 + (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16)
453 + (toupper((s->avctx->stream_codec_tag>>24)&0xFF)<<24);
455 CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
456 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*17;
458 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
460 CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int)) //error ressilience code looks cleaner with this
461 for(y=0; y<s->mb_height; y++){
462 for(x=0; x<s->mb_width; x++){
463 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
466 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
469 /* Allocate MV tables */
470 CHECKED_ALLOCZ(s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
471 CHECKED_ALLOCZ(s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
472 CHECKED_ALLOCZ(s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
473 CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
474 CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
475 CHECKED_ALLOCZ(s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
476 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
477 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
478 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
479 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
480 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
481 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
483 //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
484 CHECKED_ALLOCZ(s->me.scratchpad, s->width*2*16*3*sizeof(uint8_t))
486 CHECKED_ALLOCZ(s->me.map , ME_MAP_SIZE*sizeof(uint32_t))
487 CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
489 if(s->codec_id==CODEC_ID_MPEG4){
490 CHECKED_ALLOCZ(s->tex_pb_buffer, PB_BUFFER_SIZE);
491 CHECKED_ALLOCZ( s->pb2_buffer, PB_BUFFER_SIZE);
494 if(s->msmpeg4_version){
495 CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
497 CHECKED_ALLOCZ(s->avctx->stats_out, 256);
499 /* Allocate MB type table */
500 CHECKED_ALLOCZ(s->mb_type , mb_array_size * sizeof(uint16_t)) //needed for encoding
502 CHECKED_ALLOCZ(s->lambda_table, mb_array_size * sizeof(int))
504 CHECKED_ALLOCZ(s->q_intra_matrix, 64*32 * sizeof(int))
505 CHECKED_ALLOCZ(s->q_inter_matrix, 64*32 * sizeof(int))
506 CHECKED_ALLOCZ(s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t))
507 CHECKED_ALLOCZ(s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t))
508 CHECKED_ALLOCZ(s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
509 CHECKED_ALLOCZ(s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
511 if(s->avctx->noise_reduction){
512 CHECKED_ALLOCZ(s->dct_error_sum, 2 * 64 * sizeof(int))
513 CHECKED_ALLOCZ(s->dct_offset, 2 * 64 * sizeof(uint16_t))
516 CHECKED_ALLOCZ(s->blocks, 64*6*2 * sizeof(DCTELEM))
518 CHECKED_ALLOCZ(s->picture, MAX_PICTURE_COUNT * sizeof(Picture))
520 CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t))
522 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
523 /* interlaced direct mode decoding tables */
528 CHECKED_ALLOCZ(s->b_field_mv_table_base[i][j][k] , mv_table_size * 2 * sizeof(int16_t))
529 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
531 CHECKED_ALLOCZ(s->b_field_select_table[i][j] , mb_array_size * 2 * sizeof(uint8_t))
532 CHECKED_ALLOCZ(s->p_field_mv_table_base[i][j] , mv_table_size * 2 * sizeof(int16_t))
533 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
535 CHECKED_ALLOCZ(s->p_field_select_table[i] , mb_array_size * 2 * sizeof(uint8_t))
538 if (s->out_format == FMT_H263) {
540 CHECKED_ALLOCZ(s->ac_val[0], yc_size * sizeof(int16_t) * 16);
541 s->ac_val[1] = s->ac_val[0] + y_size;
542 s->ac_val[2] = s->ac_val[1] + c_size;
545 CHECKED_ALLOCZ(s->coded_block, y_size);
547 /* divx501 bitstream reorder buffer */
548 CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE);
550 /* cbp, ac_pred, pred_dir */
551 CHECKED_ALLOCZ(s->cbp_table , mb_array_size * sizeof(uint8_t))
552 CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
555 if (s->h263_pred || s->h263_plus || !s->encoding) {
557 //MN: we need these for error resilience of intra-frames
558 CHECKED_ALLOCZ(s->dc_val[0], yc_size * sizeof(int16_t));
559 s->dc_val[1] = s->dc_val[0] + y_size;
560 s->dc_val[2] = s->dc_val[1] + c_size;
561 for(i=0;i<yc_size;i++)
562 s->dc_val[0][i] = 1024;
565 /* which mb is a intra block */
566 CHECKED_ALLOCZ(s->mbintra_table, mb_array_size);
567 memset(s->mbintra_table, 1, mb_array_size);
569 /* default structure is frame */
570 s->picture_structure = PICT_FRAME;
572 /* init macroblock skip table */
573 CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2);
574 //Note the +1 is for a quicker mpeg4 slice_end detection
575 CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
577 s->block= s->blocks[0];
580 s->pblocks[i] = (short *)(&s->block[i]);
583 s->parse_context.state= -1;
584 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
585 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
586 s->visualization_buffer[1] = av_malloc((s->mb_width*8 + EDGE_WIDTH) * s->mb_height*8 + EDGE_WIDTH);
587 s->visualization_buffer[2] = av_malloc((s->mb_width*8 + EDGE_WIDTH) * s->mb_height*8 + EDGE_WIDTH);
590 s->context_initialized = 1;
600 /* init common structure for both encoder and decoder */
601 void MPV_common_end(MpegEncContext *s)
605 av_freep(&s->parse_context.buffer);
606 s->parse_context.buffer_size=0;
608 av_freep(&s->mb_type);
609 av_freep(&s->p_mv_table_base);
610 av_freep(&s->b_forw_mv_table_base);
611 av_freep(&s->b_back_mv_table_base);
612 av_freep(&s->b_bidir_forw_mv_table_base);
613 av_freep(&s->b_bidir_back_mv_table_base);
614 av_freep(&s->b_direct_mv_table_base);
616 s->b_forw_mv_table= NULL;
617 s->b_back_mv_table= NULL;
618 s->b_bidir_forw_mv_table= NULL;
619 s->b_bidir_back_mv_table= NULL;
620 s->b_direct_mv_table= NULL;
624 av_freep(&s->b_field_mv_table_base[i][j][k]);
625 s->b_field_mv_table[i][j][k]=NULL;
627 av_freep(&s->b_field_select_table[i][j]);
628 av_freep(&s->p_field_mv_table_base[i][j]);
629 s->p_field_mv_table[i][j]=NULL;
631 av_freep(&s->p_field_select_table[i]);
634 av_freep(&s->dc_val[0]);
635 av_freep(&s->ac_val[0]);
636 av_freep(&s->coded_block);
637 av_freep(&s->mbintra_table);
638 av_freep(&s->cbp_table);
639 av_freep(&s->pred_dir_table);
640 av_freep(&s->me.scratchpad);
641 av_freep(&s->me.map);
642 av_freep(&s->me.score_map);
644 av_freep(&s->mbskip_table);
645 av_freep(&s->prev_pict_types);
646 av_freep(&s->bitstream_buffer);
647 av_freep(&s->tex_pb_buffer);
648 av_freep(&s->pb2_buffer);
649 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
650 av_freep(&s->avctx->stats_out);
651 av_freep(&s->ac_stats);
652 av_freep(&s->error_status_table);
653 av_freep(&s->mb_index2xy);
654 av_freep(&s->lambda_table);
655 av_freep(&s->q_intra_matrix);
656 av_freep(&s->q_inter_matrix);
657 av_freep(&s->q_intra_matrix16);
658 av_freep(&s->q_inter_matrix16);
659 av_freep(&s->blocks);
660 av_freep(&s->input_picture);
661 av_freep(&s->reordered_input_picture);
662 av_freep(&s->dct_error_sum);
663 av_freep(&s->dct_offset);
666 for(i=0; i<MAX_PICTURE_COUNT; i++){
667 free_picture(s, &s->picture[i]);
670 av_freep(&s->picture);
671 avcodec_default_free_buffers(s->avctx);
672 s->context_initialized = 0;
675 s->current_picture_ptr= NULL;
677 if (s->visualization_buffer[i])
678 av_free(s->visualization_buffer[i]);
681 #ifdef CONFIG_ENCODERS
683 /* init video encoder */
684 int MPV_encode_init(AVCodecContext *avctx)
686 MpegEncContext *s = avctx->priv_data;
688 int chroma_h_shift, chroma_v_shift;
690 avctx->pix_fmt = PIX_FMT_YUV420P; // FIXME
692 s->bit_rate = avctx->bit_rate;
693 s->width = avctx->width;
694 s->height = avctx->height;
695 if(avctx->gop_size > 600){
696 av_log(avctx, AV_LOG_ERROR, "Warning keyframe interval too large! reducing it ...\n");
699 s->gop_size = avctx->gop_size;
701 s->flags= avctx->flags;
702 s->max_b_frames= avctx->max_b_frames;
703 s->codec_id= avctx->codec->id;
704 s->luma_elim_threshold = avctx->luma_elim_threshold;
705 s->chroma_elim_threshold= avctx->chroma_elim_threshold;
706 s->strict_std_compliance= avctx->strict_std_compliance;
707 s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
708 s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0;
709 s->mpeg_quant= avctx->mpeg_quant;
710 s->rtp_mode= !!avctx->rtp_payload_size;
712 if (s->gop_size <= 1) {
719 s->me_method = avctx->me_method;
722 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
724 s->adaptive_quant= ( s->avctx->lumi_masking
725 || s->avctx->dark_masking
726 || s->avctx->temporal_cplx_masking
727 || s->avctx->spatial_cplx_masking
728 || s->avctx->p_masking
729 || (s->flags&CODEC_FLAG_QP_RD))
732 s->obmc= !!(s->flags & CODEC_FLAG_OBMC);
733 s->loop_filter= !!(s->flags & CODEC_FLAG_LOOP_FILTER);
734 s->alternate_scan= !!(s->flags & CODEC_FLAG_ALT_SCAN);
736 if(avctx->rc_max_rate && !avctx->rc_buffer_size){
737 av_log(avctx, AV_LOG_ERROR, "a vbv buffer size is needed, for encoding with a maximum bitrate\n");
741 if(avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate){
742 av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isnt recommanded!\n");
745 if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4
746 && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P){
747 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
751 if(s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE){
752 av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with simple mb decission\n");
756 if(s->obmc && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P){
757 av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with H263(+)\n");
761 if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){
762 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
766 if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){
767 av_log(avctx, AV_LOG_ERROR, "data partitioning not supported by codec\n");
771 if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO){
772 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
776 if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too
777 av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supporetd by codec\n");
781 if((s->flags & CODEC_FLAG_CBP_RD) && !(s->flags & CODEC_FLAG_TRELLIS_QUANT)){
782 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
786 if((s->flags & CODEC_FLAG_QP_RD) && s->avctx->mb_decision != FF_MB_DECISION_RD){
787 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
791 if(s->codec_id==CODEC_ID_MJPEG){
792 s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x
793 s->inter_quant_bias= 0;
794 }else if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO){
795 s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x
796 s->inter_quant_bias= 0;
798 s->intra_quant_bias=0;
799 s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2)); //(a - x/4)/x
802 if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
803 s->intra_quant_bias= avctx->intra_quant_bias;
804 if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
805 s->inter_quant_bias= avctx->inter_quant_bias;
807 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
809 av_reduce(&s->time_increment_resolution, &dummy, s->avctx->frame_rate, s->avctx->frame_rate_base, (1<<16)-1);
810 s->time_increment_bits = av_log2(s->time_increment_resolution - 1) + 1;
812 switch(avctx->codec->id) {
813 case CODEC_ID_MPEG1VIDEO:
814 s->out_format = FMT_MPEG1;
815 s->low_delay= 0; //s->max_b_frames ? 0 : 1;
816 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
818 case CODEC_ID_MPEG2VIDEO:
819 s->out_format = FMT_MPEG1;
820 s->low_delay= 0; //s->max_b_frames ? 0 : 1;
821 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
826 s->out_format = FMT_MJPEG;
827 s->intra_only = 1; /* force intra only for jpeg */
828 s->mjpeg_write_tables = 1; /* write all tables */
829 s->mjpeg_data_only_frames = 0; /* write all the needed headers */
830 s->mjpeg_vsample[0] = 1<<chroma_v_shift;
831 s->mjpeg_vsample[1] = 1;
832 s->mjpeg_vsample[2] = 1;
833 s->mjpeg_hsample[0] = 1<<chroma_h_shift;
834 s->mjpeg_hsample[1] = 1;
835 s->mjpeg_hsample[2] = 1;
836 if (mjpeg_init(s) < 0)
843 if (h263_get_picture_format(s->width, s->height) == 7) {
844 av_log(avctx, AV_LOG_INFO, "Input picture size isn't suitable for h263 codec! try h263+\n");
847 s->out_format = FMT_H263;
848 s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0;
853 s->out_format = FMT_H263;
856 s->umvplus = (avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0;
857 s->h263_aic= (avctx->flags & CODEC_FLAG_H263P_AIC) ? 1:0;
858 s->modified_quant= s->h263_aic;
859 s->alt_inter_vlc= (avctx->flags & CODEC_FLAG_H263P_AIV) ? 1:0;
860 s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0;
861 s->loop_filter= (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1:0;
862 s->unrestricted_mv= s->obmc || s->loop_filter || s->umvplus;
863 s->h263_slice_structured= (s->flags & CODEC_FLAG_H263P_SLICE_STRUCT) ? 1:0;
866 /* These are just to be sure */
871 s->out_format = FMT_H263;
872 s->h263_flv = 2; /* format = 1; 11-bit codes */
873 s->unrestricted_mv = 1;
874 s->rtp_mode=0; /* don't allow GOB */
879 s->out_format = FMT_H263;
884 s->out_format = FMT_H263;
886 s->unrestricted_mv = 1;
887 s->low_delay= s->max_b_frames ? 0 : 1;
888 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
890 case CODEC_ID_MSMPEG4V1:
891 s->out_format = FMT_H263;
894 s->unrestricted_mv = 1;
895 s->msmpeg4_version= 1;
899 case CODEC_ID_MSMPEG4V2:
900 s->out_format = FMT_H263;
903 s->unrestricted_mv = 1;
904 s->msmpeg4_version= 2;
908 case CODEC_ID_MSMPEG4V3:
909 s->out_format = FMT_H263;
912 s->unrestricted_mv = 1;
913 s->msmpeg4_version= 3;
914 s->flipflop_rounding=1;
919 s->out_format = FMT_H263;
922 s->unrestricted_mv = 1;
923 s->msmpeg4_version= 4;
924 s->flipflop_rounding=1;
929 s->out_format = FMT_H263;
932 s->unrestricted_mv = 1;
933 s->msmpeg4_version= 5;
934 s->flipflop_rounding=1;
943 { /* set up some save defaults, some codecs might override them later */
949 default_mv_penalty= av_mallocz( sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1) );
950 memset(default_mv_penalty, 0, sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1));
951 memset(default_fcode_tab , 0, sizeof(uint8_t)*(2*MAX_MV+1));
953 for(i=-16; i<16; i++){
954 default_fcode_tab[i + MAX_MV]= 1;
958 s->me.mv_penalty= default_mv_penalty;
959 s->fcode_tab= default_fcode_tab;
961 /* dont use mv_penalty table for crap MV as it would be confused */
962 //FIXME remove after fixing / removing old ME
963 if (s->me_method < ME_EPZS) s->me.mv_penalty = default_mv_penalty;
968 if (MPV_common_init(s) < 0)
971 if(s->modified_quant)
972 s->chroma_qscale_table= ff_h263_chroma_qscale_table;
973 s->progressive_frame=
974 s->progressive_sequence= !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME));
976 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
980 #ifdef CONFIG_ENCODERS
982 if (s->out_format == FMT_H263)
984 if(s->msmpeg4_version)
985 ff_msmpeg4_encode_init(s);
987 if (s->out_format == FMT_MPEG1)
988 ff_mpeg1_encode_init(s);
991 /* init default q matrix */
993 int j= s->dsp.idct_permutation[i];
995 if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
996 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
997 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
998 }else if(s->out_format == FMT_H263){
1000 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1004 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1005 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1007 if(s->avctx->intra_matrix)
1008 s->intra_matrix[j] = s->avctx->intra_matrix[i];
1009 if(s->avctx->inter_matrix)
1010 s->inter_matrix[j] = s->avctx->inter_matrix[i];
1013 /* precompute matrix */
1014 /* for mjpeg, we do include qscale in the matrix */
1015 if (s->out_format != FMT_MJPEG) {
1016 convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
1017 s->intra_matrix, s->intra_quant_bias, 1, 31);
1018 convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
1019 s->inter_matrix, s->inter_quant_bias, 1, 31);
1022 if(ff_rate_control_init(s) < 0)
1025 s->picture_number = 0;
1026 s->input_picture_number = 0;
1027 s->picture_in_gop_number = 0;
1028 s->fake_picture_number = 0;
1029 /* motion detector init */
1036 int MPV_encode_end(AVCodecContext *avctx)
1038 MpegEncContext *s = avctx->priv_data;
1044 ff_rate_control_uninit(s);
1047 if (s->out_format == FMT_MJPEG)
1050 av_freep(&avctx->extradata);
1055 #endif //CONFIG_ENCODERS
1057 void init_rl(RLTable *rl)
1059 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
1060 uint8_t index_run[MAX_RUN+1];
1061 int last, run, level, start, end, i;
1063 /* compute max_level[], max_run[] and index_run[] */
1064 for(last=0;last<2;last++) {
1073 memset(max_level, 0, MAX_RUN + 1);
1074 memset(max_run, 0, MAX_LEVEL + 1);
1075 memset(index_run, rl->n, MAX_RUN + 1);
1076 for(i=start;i<end;i++) {
1077 run = rl->table_run[i];
1078 level = rl->table_level[i];
1079 if (index_run[run] == rl->n)
1081 if (level > max_level[run])
1082 max_level[run] = level;
1083 if (run > max_run[level])
1084 max_run[level] = run;
1086 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1087 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1088 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1089 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1090 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1091 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1095 /* draw the edges of width 'w' of an image of size width, height */
1096 //FIXME check that this is ok for mpeg4 interlaced
1097 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w)
1099 uint8_t *ptr, *last_line;
1102 last_line = buf + (height - 1) * wrap;
1104 /* top and bottom */
1105 memcpy(buf - (i + 1) * wrap, buf, width);
1106 memcpy(last_line + (i + 1) * wrap, last_line, width);
1108 /* left and right */
1110 for(i=0;i<height;i++) {
1111 memset(ptr - w, ptr[0], w);
1112 memset(ptr + width, ptr[width-1], w);
1117 memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
1118 memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
1119 memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
1120 memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
1124 int ff_find_unused_picture(MpegEncContext *s, int shared){
1128 for(i=0; i<MAX_PICTURE_COUNT; i++){
1129 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
1132 for(i=0; i<MAX_PICTURE_COUNT; i++){
1133 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
1135 for(i=0; i<MAX_PICTURE_COUNT; i++){
1136 if(s->picture[i].data[0]==NULL) return i;
1144 static void update_noise_reduction(MpegEncContext *s){
1147 for(intra=0; intra<2; intra++){
1148 if(s->dct_count[intra] > (1<<16)){
1149 for(i=0; i<64; i++){
1150 s->dct_error_sum[intra][i] >>=1;
1152 s->dct_count[intra] >>= 1;
1155 for(i=0; i<64; i++){
1156 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1162 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1164 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1170 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1172 /* mark&release old frames */
1173 if (s->pict_type != B_TYPE && s->last_picture_ptr && s->last_picture_ptr->data[0]) {
1174 avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
1176 /* release forgotten pictures */
1177 /* if(mpeg124/h263) */
1179 for(i=0; i<MAX_PICTURE_COUNT; i++){
1180 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
1181 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1182 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
1189 /* release non refernce frames */
1190 for(i=0; i<MAX_PICTURE_COUNT; i++){
1191 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1192 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1196 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
1197 pic= (AVFrame*)s->current_picture_ptr; //we allready have a unused image (maybe it was set before reading the header)
1199 i= ff_find_unused_picture(s, 0);
1200 pic= (AVFrame*)&s->picture[i];
1203 pic->reference= s->pict_type != B_TYPE ? 3 : 0;
1205 pic->coded_picture_number= s->coded_picture_number++;
1207 if( alloc_picture(s, (Picture*)pic, 0) < 0)
1210 s->current_picture_ptr= (Picture*)pic;
1211 s->current_picture_ptr->top_field_first= s->top_field_first; //FIXME use only the vars from current_pic
1212 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
1215 s->current_picture_ptr->pict_type= s->pict_type;
1216 // if(s->flags && CODEC_FLAG_QSCALE)
1217 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1218 s->current_picture_ptr->key_frame= s->pict_type == I_TYPE;
1220 copy_picture(&s->current_picture, s->current_picture_ptr);
1222 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1223 if (s->pict_type != B_TYPE) {
1224 s->last_picture_ptr= s->next_picture_ptr;
1225 s->next_picture_ptr= s->current_picture_ptr;
1228 if(s->last_picture_ptr) copy_picture(&s->last_picture, s->last_picture_ptr);
1229 if(s->next_picture_ptr) copy_picture(&s->next_picture, s->next_picture_ptr);
1231 if(s->pict_type != I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL)){
1232 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1233 assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference
1237 assert(s->pict_type == I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1239 if(s->picture_structure!=PICT_FRAME){
1242 if(s->picture_structure == PICT_BOTTOM_FIELD){
1243 s->current_picture.data[i] += s->current_picture.linesize[i];
1245 s->current_picture.linesize[i] *= 2;
1246 s->last_picture.linesize[i] *=2;
1247 s->next_picture.linesize[i] *=2;
1252 s->hurry_up= s->avctx->hurry_up;
1253 s->error_resilience= avctx->error_resilience;
1255 /* set dequantizer, we cant do it during init as it might change for mpeg4
1256 and we cant do it in the header decode as init isnt called for mpeg4 there yet */
1257 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1258 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1259 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1260 }else if(s->out_format == FMT_H263){
1261 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1262 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1264 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1265 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1268 if(s->dct_error_sum){
1269 assert(s->avctx->noise_reduction && s->encoding);
1271 update_noise_reduction(s);
1275 if(s->avctx->xvmc_acceleration)
1276 return XVMC_field_start(s, avctx);
1281 /* generic function for encode/decode called after a frame has been coded/decoded */
1282 void MPV_frame_end(MpegEncContext *s)
1285 /* draw edge for correct motion prediction if outside */
1287 //just to make sure that all data is rendered.
1288 if(s->avctx->xvmc_acceleration){
1292 if(s->unrestricted_mv && s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1293 draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
1294 draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1295 draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1299 s->last_pict_type = s->pict_type;
1300 if(s->pict_type!=B_TYPE){
1301 s->last_non_b_pict_type= s->pict_type;
1304 /* copy back current_picture variables */
1305 for(i=0; i<MAX_PICTURE_COUNT; i++){
1306 if(s->picture[i].data[0] == s->current_picture.data[0]){
1307 s->picture[i]= s->current_picture;
1311 assert(i<MAX_PICTURE_COUNT);
1315 /* release non refernce frames */
1316 for(i=0; i<MAX_PICTURE_COUNT; i++){
1317 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1318 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1322 // clear copies, to avoid confusion
1324 memset(&s->last_picture, 0, sizeof(Picture));
1325 memset(&s->next_picture, 0, sizeof(Picture));
1326 memset(&s->current_picture, 0, sizeof(Picture));
1331 * draws an line from (ex, ey) -> (sx, sy).
1332 * @param w width of the image
1333 * @param h height of the image
1334 * @param stride stride/linesize of the image
1335 * @param color color of the arrow
1337 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1340 sx= clip(sx, 0, w-1);
1341 sy= clip(sy, 0, h-1);
1342 ex= clip(ex, 0, w-1);
1343 ey= clip(ey, 0, h-1);
1345 buf[sy*stride + sx]+= color;
1347 if(ABS(ex - sx) > ABS(ey - sy)){
1352 buf+= sx + sy*stride;
1354 f= ((ey-sy)<<16)/ex;
1355 for(x= 0; x <= ex; x++){
1356 y= ((x*f) + (1<<15))>>16;
1357 buf[y*stride + x]+= color;
1364 buf+= sx + sy*stride;
1366 if(ey) f= ((ex-sx)<<16)/ey;
1368 for(y= 0; y <= ey; y++){
1369 x= ((y*f) + (1<<15))>>16;
1370 buf[y*stride + x]+= color;
1376 * draws an arrow from (ex, ey) -> (sx, sy).
1377 * @param w width of the image
1378 * @param h height of the image
1379 * @param stride stride/linesize of the image
1380 * @param color color of the arrow
1382 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1385 sx= clip(sx, -100, w+100);
1386 sy= clip(sy, -100, h+100);
1387 ex= clip(ex, -100, w+100);
1388 ey= clip(ey, -100, h+100);
1393 if(dx*dx + dy*dy > 3*3){
1396 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1398 //FIXME subpixel accuracy
1399 rx= ROUNDED_DIV(rx*3<<4, length);
1400 ry= ROUNDED_DIV(ry*3<<4, length);
1402 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1403 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1405 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1409 * prints debuging info for the given picture.
1411 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1413 if(!pict || !pict->mb_type) return;
1415 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1418 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1419 switch (pict->pict_type) {
1420 case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1421 case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1422 case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1423 case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1424 case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1425 case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1427 for(y=0; y<s->mb_height; y++){
1428 for(x=0; x<s->mb_width; x++){
1429 if(s->avctx->debug&FF_DEBUG_SKIP){
1430 int count= s->mbskip_table[x + y*s->mb_stride];
1431 if(count>9) count=9;
1432 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1434 if(s->avctx->debug&FF_DEBUG_QP){
1435 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1437 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1438 int mb_type= pict->mb_type[x + y*s->mb_stride];
1439 //Type & MV direction
1441 av_log(s->avctx, AV_LOG_DEBUG, "P");
1442 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1443 av_log(s->avctx, AV_LOG_DEBUG, "A");
1444 else if(IS_INTRA4x4(mb_type))
1445 av_log(s->avctx, AV_LOG_DEBUG, "i");
1446 else if(IS_INTRA16x16(mb_type))
1447 av_log(s->avctx, AV_LOG_DEBUG, "I");
1448 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1449 av_log(s->avctx, AV_LOG_DEBUG, "d");
1450 else if(IS_DIRECT(mb_type))
1451 av_log(s->avctx, AV_LOG_DEBUG, "D");
1452 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1453 av_log(s->avctx, AV_LOG_DEBUG, "g");
1454 else if(IS_GMC(mb_type))
1455 av_log(s->avctx, AV_LOG_DEBUG, "G");
1456 else if(IS_SKIP(mb_type))
1457 av_log(s->avctx, AV_LOG_DEBUG, "S");
1458 else if(!USES_LIST(mb_type, 1))
1459 av_log(s->avctx, AV_LOG_DEBUG, ">");
1460 else if(!USES_LIST(mb_type, 0))
1461 av_log(s->avctx, AV_LOG_DEBUG, "<");
1463 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1464 av_log(s->avctx, AV_LOG_DEBUG, "X");
1469 av_log(s->avctx, AV_LOG_DEBUG, "+");
1470 else if(IS_16X8(mb_type))
1471 av_log(s->avctx, AV_LOG_DEBUG, "-");
1472 else if(IS_8X16(mb_type))
1473 av_log(s->avctx, AV_LOG_DEBUG, "¦");
1474 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1475 av_log(s->avctx, AV_LOG_DEBUG, " ");
1477 av_log(s->avctx, AV_LOG_DEBUG, "?");
1480 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
1481 av_log(s->avctx, AV_LOG_DEBUG, "=");
1483 av_log(s->avctx, AV_LOG_DEBUG, " ");
1485 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1487 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1491 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1492 const int shift= 1 + s->quarter_sample;
1496 int h_chroma_shift, v_chroma_shift;
1497 s->low_delay=0; //needed to see the vectors without trashing the buffers
1499 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1501 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*s->height:pict->linesize[i]*s->height >> v_chroma_shift);
1502 pict->data[i]= s->visualization_buffer[i];
1504 pict->type= FF_BUFFER_TYPE_COPY;
1507 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1509 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1510 const int mb_index= mb_x + mb_y*s->mb_stride;
1511 if((s->avctx->debug_mv) && pict->motion_val){
1513 for(type=0; type<3; type++){
1516 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
1520 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
1524 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
1529 if(!USES_LIST(pict->mb_type[mb_index], direction))
1532 if(IS_8X8(pict->mb_type[mb_index])){
1535 int sx= mb_x*16 + 4 + 8*(i&1);
1536 int sy= mb_y*16 + 4 + 8*(i>>1);
1537 int xy= 1 + mb_x*2 + (i&1) + (mb_y*2 + 1 + (i>>1))*(s->mb_width*2 + 2);
1538 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1539 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1540 draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
1542 }else if(IS_16X8(pict->mb_type[mb_index])){
1546 int sy=mb_y*16 + 4 + 8*i;
1547 int xy=1 + mb_x*2 + (mb_y*2 + 1 + i)*(s->mb_width*2 + 2);
1548 int mx=(pict->motion_val[direction][xy][0]>>shift) + sx;
1549 int my=(pict->motion_val[direction][xy][1]>>shift) + sy;
1550 draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
1553 int sx= mb_x*16 + 8;
1554 int sy= mb_y*16 + 8;
1555 int xy= 1 + mb_x*2 + (mb_y*2 + 1)*(s->mb_width*2 + 2);
1556 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1557 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1558 draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
1562 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1563 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1566 *(uint64_t*)(pict->data[1] + 8*mb_x + (8*mb_y + y)*pict->linesize[1])= c;
1567 *(uint64_t*)(pict->data[2] + 8*mb_x + (8*mb_y + y)*pict->linesize[2])= c;
1570 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1571 int mb_type= pict->mb_type[mb_index];
1574 #define COLOR(theta, r)\
1575 u= (int)(128 + r*cos(theta*3.141592/180));\
1576 v= (int)(128 + r*sin(theta*3.141592/180));
1580 if(IS_PCM(mb_type)){
1582 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1584 }else if(IS_INTRA4x4(mb_type)){
1586 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1588 }else if(IS_DIRECT(mb_type)){
1590 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1592 }else if(IS_GMC(mb_type)){
1594 }else if(IS_SKIP(mb_type)){
1596 }else if(!USES_LIST(mb_type, 1)){
1598 }else if(!USES_LIST(mb_type, 0)){
1601 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1605 u*= 0x0101010101010101ULL;
1606 v*= 0x0101010101010101ULL;
1608 *(uint64_t*)(pict->data[1] + 8*mb_x + (8*mb_y + y)*pict->linesize[1])= u;
1609 *(uint64_t*)(pict->data[2] + 8*mb_x + (8*mb_y + y)*pict->linesize[2])= v;
1613 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1614 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1615 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1617 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1619 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1622 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1626 s->mbskip_table[mb_index]=0;
1632 #ifdef CONFIG_ENCODERS
1634 static int get_sae(uint8_t *src, int ref, int stride){
1638 for(y=0; y<16; y++){
1639 for(x=0; x<16; x++){
1640 acc+= ABS(src[x+y*stride] - ref);
1647 static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){
1654 for(y=0; y<h; y+=16){
1655 for(x=0; x<w; x+=16){
1656 int offset= x + y*stride;
1657 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride, 16);
1658 int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8;
1659 int sae = get_sae(src + offset, mean, stride);
1661 acc+= sae + 500 < sad;
1668 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
1671 const int encoding_delay= s->max_b_frames;
1675 if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
1676 if(pic_arg->linesize[0] != s->linesize) direct=0;
1677 if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
1678 if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
1680 // av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
1683 i= ff_find_unused_picture(s, 1);
1685 pic= (AVFrame*)&s->picture[i];
1689 pic->data[i]= pic_arg->data[i];
1690 pic->linesize[i]= pic_arg->linesize[i];
1692 alloc_picture(s, (Picture*)pic, 1);
1695 i= ff_find_unused_picture(s, 0);
1697 pic= (AVFrame*)&s->picture[i];
1700 alloc_picture(s, (Picture*)pic, 0);
1702 if( pic->data[0] + offset == pic_arg->data[0]
1703 && pic->data[1] + offset == pic_arg->data[1]
1704 && pic->data[2] + offset == pic_arg->data[2]){
1707 int h_chroma_shift, v_chroma_shift;
1708 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1711 int src_stride= pic_arg->linesize[i];
1712 int dst_stride= i ? s->uvlinesize : s->linesize;
1713 int h_shift= i ? h_chroma_shift : 0;
1714 int v_shift= i ? v_chroma_shift : 0;
1715 int w= s->width >>h_shift;
1716 int h= s->height>>v_shift;
1717 uint8_t *src= pic_arg->data[i];
1718 uint8_t *dst= pic->data[i] + offset;
1720 if(src_stride==dst_stride)
1721 memcpy(dst, src, src_stride*h);
1724 memcpy(dst, src, w);
1732 copy_picture_attributes(pic, pic_arg);
1734 pic->display_picture_number= s->input_picture_number++;
1737 /* shift buffer entries */
1738 for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++)
1739 s->input_picture[i-1]= s->input_picture[i];
1741 s->input_picture[encoding_delay]= (Picture*)pic;
1746 static void select_input_picture(MpegEncContext *s){
1749 for(i=1; i<MAX_PICTURE_COUNT; i++)
1750 s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
1751 s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
1753 /* set next picture types & ordering */
1754 if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
1755 if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
1756 s->reordered_input_picture[0]= s->input_picture[0];
1757 s->reordered_input_picture[0]->pict_type= I_TYPE;
1758 s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
1762 if(s->flags&CODEC_FLAG_PASS2){
1763 for(i=0; i<s->max_b_frames+1; i++){
1764 int pict_num= s->input_picture[0]->display_picture_number + i;
1765 int pict_type= s->rc_context.entry[pict_num].new_pict_type;
1766 s->input_picture[i]->pict_type= pict_type;
1768 if(i + 1 >= s->rc_context.num_entries) break;
1772 if(s->input_picture[0]->pict_type){
1773 /* user selected pict_type */
1774 for(b_frames=0; b_frames<s->max_b_frames+1; b_frames++){
1775 if(s->input_picture[b_frames]->pict_type!=B_TYPE) break;
1778 if(b_frames > s->max_b_frames){
1779 av_log(s->avctx, AV_LOG_ERROR, "warning, too many bframes in a row\n");
1780 b_frames = s->max_b_frames;
1782 }else if(s->avctx->b_frame_strategy==0){
1783 b_frames= s->max_b_frames;
1784 while(b_frames && !s->input_picture[b_frames]) b_frames--;
1785 }else if(s->avctx->b_frame_strategy==1){
1786 for(i=1; i<s->max_b_frames+1; i++){
1787 if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){
1788 s->input_picture[i]->b_frame_score=
1789 get_intra_count(s, s->input_picture[i ]->data[0],
1790 s->input_picture[i-1]->data[0], s->linesize) + 1;
1793 for(i=0; i<s->max_b_frames; i++){
1794 if(s->input_picture[i]==NULL || s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break;
1797 b_frames= FFMAX(0, i-1);
1800 for(i=0; i<b_frames+1; i++){
1801 s->input_picture[i]->b_frame_score=0;
1804 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1809 //static int b_count=0;
1810 //b_count+= b_frames;
1811 //av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
1813 s->reordered_input_picture[0]= s->input_picture[b_frames];
1814 if( s->picture_in_gop_number + b_frames >= s->gop_size
1815 || s->reordered_input_picture[0]->pict_type== I_TYPE)
1816 s->reordered_input_picture[0]->pict_type= I_TYPE;
1818 s->reordered_input_picture[0]->pict_type= P_TYPE;
1819 s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
1820 for(i=0; i<b_frames; i++){
1821 s->reordered_input_picture[i+1]= s->input_picture[i];
1822 s->reordered_input_picture[i+1]->pict_type= B_TYPE;
1823 s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++;
1828 if(s->reordered_input_picture[0]){
1829 s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0;
1831 copy_picture(&s->new_picture, s->reordered_input_picture[0]);
1833 if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
1834 // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable
1836 int i= ff_find_unused_picture(s, 0);
1837 Picture *pic= &s->picture[i];
1839 /* mark us unused / free shared pic */
1841 s->reordered_input_picture[0]->data[i]= NULL;
1842 s->reordered_input_picture[0]->type= 0;
1844 copy_picture_attributes((AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]);
1845 pic->reference = s->reordered_input_picture[0]->reference;
1847 alloc_picture(s, pic, 0);
1849 s->current_picture_ptr= pic;
1851 // input is not a shared pix -> reuse buffer for current_pix
1853 assert( s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER
1854 || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
1856 s->current_picture_ptr= s->reordered_input_picture[0];
1858 s->new_picture.data[i]+=16;
1861 copy_picture(&s->current_picture, s->current_picture_ptr);
1863 s->picture_number= s->new_picture.display_picture_number;
1864 //printf("dpn:%d\n", s->picture_number);
1866 memset(&s->new_picture, 0, sizeof(Picture));
1870 int MPV_encode_picture(AVCodecContext *avctx,
1871 unsigned char *buf, int buf_size, void *data)
1873 MpegEncContext *s = avctx->priv_data;
1874 AVFrame *pic_arg = data;
1875 int i, stuffing_count;
1877 if(avctx->pix_fmt != PIX_FMT_YUV420P){
1878 av_log(avctx, AV_LOG_ERROR, "this codec supports only YUV420P\n");
1882 init_put_bits(&s->pb, buf, buf_size);
1884 s->picture_in_gop_number++;
1886 load_input_picture(s, pic_arg);
1888 select_input_picture(s);
1891 if(s->new_picture.data[0]){
1892 s->pict_type= s->new_picture.pict_type;
1894 //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
1895 MPV_frame_start(s, avctx);
1897 encode_picture(s, s->picture_number);
1899 avctx->real_pict_num = s->picture_number;
1900 avctx->header_bits = s->header_bits;
1901 avctx->mv_bits = s->mv_bits;
1902 avctx->misc_bits = s->misc_bits;
1903 avctx->i_tex_bits = s->i_tex_bits;
1904 avctx->p_tex_bits = s->p_tex_bits;
1905 avctx->i_count = s->i_count;
1906 avctx->p_count = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx
1907 avctx->skip_count = s->skip_count;
1911 if (s->out_format == FMT_MJPEG)
1912 mjpeg_picture_trailer(s);
1914 if(s->flags&CODEC_FLAG_PASS1)
1915 ff_write_pass1_stats(s);
1918 avctx->error[i] += s->current_picture_ptr->error[i];
1922 flush_put_bits(&s->pb);
1923 s->frame_bits = (pbBufPtr(&s->pb) - s->pb.buf) * 8;
1925 stuffing_count= ff_vbv_update(s, s->frame_bits);
1927 switch(s->codec_id){
1928 case CODEC_ID_MPEG1VIDEO:
1929 case CODEC_ID_MPEG2VIDEO:
1930 while(stuffing_count--){
1931 put_bits(&s->pb, 8, 0);
1934 case CODEC_ID_MPEG4:
1935 put_bits(&s->pb, 16, 0);
1936 put_bits(&s->pb, 16, 0x1C3);
1937 stuffing_count -= 4;
1938 while(stuffing_count--){
1939 put_bits(&s->pb, 8, 0xFF);
1943 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1945 flush_put_bits(&s->pb);
1946 s->frame_bits = (pbBufPtr(&s->pb) - s->pb.buf) * 8;
1949 /* update mpeg1/2 vbv_delay for CBR */
1950 if(s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate){
1953 assert(s->repeat_first_field==0);
1955 vbv_delay= lrintf(90000 * s->rc_context.buffer_index / s->avctx->rc_max_rate);
1956 assert(vbv_delay < 0xFFFF);
1958 s->vbv_delay_ptr[0] &= 0xF8;
1959 s->vbv_delay_ptr[0] |= vbv_delay>>13;
1960 s->vbv_delay_ptr[1] = vbv_delay>>5;
1961 s->vbv_delay_ptr[2] &= 0x07;
1962 s->vbv_delay_ptr[2] |= vbv_delay<<3;
1965 s->total_bits += s->frame_bits;
1966 avctx->frame_bits = s->frame_bits;
1968 return s->frame_bits/8;
1971 #endif //CONFIG_ENCODERS
1973 static inline void gmc1_motion(MpegEncContext *s,
1974 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1976 uint8_t **ref_picture, int src_offset)
1979 int offset, src_x, src_y, linesize, uvlinesize;
1980 int motion_x, motion_y;
1983 motion_x= s->sprite_offset[0][0];
1984 motion_y= s->sprite_offset[0][1];
1985 src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
1986 src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
1987 motion_x<<=(3-s->sprite_warping_accuracy);
1988 motion_y<<=(3-s->sprite_warping_accuracy);
1989 src_x = clip(src_x, -16, s->width);
1990 if (src_x == s->width)
1992 src_y = clip(src_y, -16, s->height);
1993 if (src_y == s->height)
1996 linesize = s->linesize;
1997 uvlinesize = s->uvlinesize;
1999 ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
2001 dest_y+=dest_offset;
2002 if(s->flags&CODEC_FLAG_EMU_EDGE){
2003 if( (unsigned)src_x >= s->h_edge_pos - 17
2004 || (unsigned)src_y >= s->v_edge_pos - 17){
2005 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
2006 ptr= s->edge_emu_buffer;
2010 if((motion_x|motion_y)&7){
2011 s->dsp.gmc1(dest_y , ptr , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
2012 s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
2016 dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
2017 if (s->no_rounding){
2018 s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
2020 s->dsp.put_pixels_tab [0][dxy](dest_y, ptr, linesize, 16);
2024 if(s->flags&CODEC_FLAG_GRAY) return;
2026 motion_x= s->sprite_offset[1][0];
2027 motion_y= s->sprite_offset[1][1];
2028 src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
2029 src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
2030 motion_x<<=(3-s->sprite_warping_accuracy);
2031 motion_y<<=(3-s->sprite_warping_accuracy);
2032 src_x = clip(src_x, -8, s->width>>1);
2033 if (src_x == s->width>>1)
2035 src_y = clip(src_y, -8, s->height>>1);
2036 if (src_y == s->height>>1)
2039 offset = (src_y * uvlinesize) + src_x + (src_offset>>1);
2040 ptr = ref_picture[1] + offset;
2041 if(s->flags&CODEC_FLAG_EMU_EDGE){
2042 if( (unsigned)src_x >= (s->h_edge_pos>>1) - 9
2043 || (unsigned)src_y >= (s->v_edge_pos>>1) - 9){
2044 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2045 ptr= s->edge_emu_buffer;
2049 s->dsp.gmc1(dest_cb + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
2051 ptr = ref_picture[2] + offset;
2053 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2054 ptr= s->edge_emu_buffer;
2056 s->dsp.gmc1(dest_cr + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
2061 static inline void gmc_motion(MpegEncContext *s,
2062 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2064 uint8_t **ref_picture, int src_offset)
2067 int linesize, uvlinesize;
2068 const int a= s->sprite_warping_accuracy;
2071 linesize = s->linesize;
2072 uvlinesize = s->uvlinesize;
2074 ptr = ref_picture[0] + src_offset;
2076 dest_y+=dest_offset;
2078 ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16;
2079 oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16;
2081 s->dsp.gmc(dest_y, ptr, linesize, 16,
2084 s->sprite_delta[0][0], s->sprite_delta[0][1],
2085 s->sprite_delta[1][0], s->sprite_delta[1][1],
2086 a+1, (1<<(2*a+1)) - s->no_rounding,
2087 s->h_edge_pos, s->v_edge_pos);
2088 s->dsp.gmc(dest_y+8, ptr, linesize, 16,
2089 ox + s->sprite_delta[0][0]*8,
2090 oy + s->sprite_delta[1][0]*8,
2091 s->sprite_delta[0][0], s->sprite_delta[0][1],
2092 s->sprite_delta[1][0], s->sprite_delta[1][1],
2093 a+1, (1<<(2*a+1)) - s->no_rounding,
2094 s->h_edge_pos, s->v_edge_pos);
2096 if(s->flags&CODEC_FLAG_GRAY) return;
2099 dest_cb+=dest_offset>>1;
2100 dest_cr+=dest_offset>>1;
2102 ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8;
2103 oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8;
2105 ptr = ref_picture[1] + (src_offset>>1);
2106 s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
2109 s->sprite_delta[0][0], s->sprite_delta[0][1],
2110 s->sprite_delta[1][0], s->sprite_delta[1][1],
2111 a+1, (1<<(2*a+1)) - s->no_rounding,
2112 s->h_edge_pos>>1, s->v_edge_pos>>1);
2114 ptr = ref_picture[2] + (src_offset>>1);
2115 s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
2118 s->sprite_delta[0][0], s->sprite_delta[0][1],
2119 s->sprite_delta[1][0], s->sprite_delta[1][1],
2120 a+1, (1<<(2*a+1)) - s->no_rounding,
2121 s->h_edge_pos>>1, s->v_edge_pos>>1);
2125 * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples.
2126 * @param buf destination buffer
2127 * @param src source buffer
2128 * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
2129 * @param block_w width of block
2130 * @param block_h height of block
2131 * @param src_x x coordinate of the top left sample of the block in the source buffer
2132 * @param src_y y coordinate of the top left sample of the block in the source buffer
2133 * @param w width of the source buffer
2134 * @param h height of the source buffer
2136 void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h,
2137 int src_x, int src_y, int w, int h){
2139 int start_y, start_x, end_y, end_x;
2142 src+= (h-1-src_y)*linesize;
2144 }else if(src_y<=-block_h){
2145 src+= (1-block_h-src_y)*linesize;
2151 }else if(src_x<=-block_w){
2152 src+= (1-block_w-src_x);
2156 start_y= FFMAX(0, -src_y);
2157 start_x= FFMAX(0, -src_x);
2158 end_y= FFMIN(block_h, h-src_y);
2159 end_x= FFMIN(block_w, w-src_x);
2161 // copy existing part
2162 for(y=start_y; y<end_y; y++){
2163 for(x=start_x; x<end_x; x++){
2164 buf[x + y*linesize]= src[x + y*linesize];
2169 for(y=0; y<start_y; y++){
2170 for(x=start_x; x<end_x; x++){
2171 buf[x + y*linesize]= buf[x + start_y*linesize];
2176 for(y=end_y; y<block_h; y++){
2177 for(x=start_x; x<end_x; x++){
2178 buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
2182 for(y=0; y<block_h; y++){
2184 for(x=0; x<start_x; x++){
2185 buf[x + y*linesize]= buf[start_x + y*linesize];
2189 for(x=end_x; x<block_w; x++){
2190 buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
2195 static inline int hpel_motion(MpegEncContext *s,
2196 uint8_t *dest, uint8_t *src,
2197 int src_x, int src_y,
2198 int width, int height, int stride,
2199 int h_edge_pos, int v_edge_pos,
2200 int w, int h, op_pixels_func *pix_op,
2201 int motion_x, int motion_y)
2206 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
2207 src_x += motion_x >> 1;
2208 src_y += motion_y >> 1;
2210 /* WARNING: do no forget half pels */
2211 src_x = clip(src_x, -16, width); //FIXME unneeded for emu?
2214 src_y = clip(src_y, -16, height);
2215 if (src_y == height)
2217 src += src_y * stride + src_x;
2219 if(s->unrestricted_mv && (s->flags&CODEC_FLAG_EMU_EDGE)){
2220 if( (unsigned)src_x > h_edge_pos - (motion_x&1) - w
2221 || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){
2222 ff_emulated_edge_mc(s->edge_emu_buffer, src, stride, w+1, h+1,
2223 src_x, src_y, h_edge_pos, v_edge_pos);
2224 src= s->edge_emu_buffer;
2228 pix_op[dxy](dest, src, stride, h);
2232 /* apply one mpeg motion vector to the three components */
2233 static inline void mpeg_motion(MpegEncContext *s,
2234 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2236 uint8_t **ref_picture, int src_offset,
2237 int field_based, op_pixels_func (*pix_op)[4],
2238 int motion_x, int motion_y, int h)
2241 int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, uvlinesize;
2244 if(s->quarter_sample)
2251 height = s->height >> field_based;
2252 v_edge_pos = s->v_edge_pos >> field_based;
2253 uvlinesize = s->current_picture.linesize[1] << field_based;
2256 dest_y + dest_offset, ref_picture[0] + src_offset,
2257 s->mb_x * 16, s->mb_y * (16 >> field_based),
2258 s->width, height, s->current_picture.linesize[0] << field_based,
2259 s->h_edge_pos, v_edge_pos,
2261 motion_x, motion_y);
2264 if(s->flags&CODEC_FLAG_GRAY) return;
2266 if (s->out_format == FMT_H263) {
2268 if ((motion_x & 3) != 0)
2270 if ((motion_y & 3) != 0)
2277 dxy = ((my & 1) << 1) | (mx & 1);
2282 src_x = s->mb_x * 8 + mx;
2283 src_y = s->mb_y * (8 >> field_based) + my;
2284 src_x = clip(src_x, -8, s->width >> 1);
2285 if (src_x == (s->width >> 1))
2287 src_y = clip(src_y, -8, height >> 1);
2288 if (src_y == (height >> 1))
2290 offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
2291 ptr = ref_picture[1] + offset;
2293 ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based,
2294 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2295 ptr= s->edge_emu_buffer + (src_offset >> 1);
2297 pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
2299 ptr = ref_picture[2] + offset;
2301 ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based,
2302 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2303 ptr= s->edge_emu_buffer + (src_offset >> 1);
2305 pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
2307 //FIXME move to dsputil, avg variant, 16x16 version
2308 static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride){
2310 uint8_t * const top = src[1];
2311 uint8_t * const left = src[2];
2312 uint8_t * const mid = src[0];
2313 uint8_t * const right = src[3];
2314 uint8_t * const bottom= src[4];
2315 #define OBMC_FILTER(x, t, l, m, r, b)\
2316 dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
2317 #define OBMC_FILTER4(x, t, l, m, r, b)\
2318 OBMC_FILTER(x , t, l, m, r, b);\
2319 OBMC_FILTER(x+1 , t, l, m, r, b);\
2320 OBMC_FILTER(x +stride, t, l, m, r, b);\
2321 OBMC_FILTER(x+1+stride, t, l, m, r, b);
2324 OBMC_FILTER (x , 2, 2, 4, 0, 0);
2325 OBMC_FILTER (x+1, 2, 1, 5, 0, 0);
2326 OBMC_FILTER4(x+2, 2, 1, 5, 0, 0);
2327 OBMC_FILTER4(x+4, 2, 0, 5, 1, 0);
2328 OBMC_FILTER (x+6, 2, 0, 5, 1, 0);
2329 OBMC_FILTER (x+7, 2, 0, 4, 2, 0);
2331 OBMC_FILTER (x , 1, 2, 5, 0, 0);
2332 OBMC_FILTER (x+1, 1, 2, 5, 0, 0);
2333 OBMC_FILTER (x+6, 1, 0, 5, 2, 0);
2334 OBMC_FILTER (x+7, 1, 0, 5, 2, 0);
2336 OBMC_FILTER4(x , 1, 2, 5, 0, 0);
2337 OBMC_FILTER4(x+2, 1, 1, 6, 0, 0);
2338 OBMC_FILTER4(x+4, 1, 0, 6, 1, 0);
2339 OBMC_FILTER4(x+6, 1, 0, 5, 2, 0);
2341 OBMC_FILTER4(x , 0, 2, 5, 0, 1);
2342 OBMC_FILTER4(x+2, 0, 1, 6, 0, 1);
2343 OBMC_FILTER4(x+4, 0, 0, 6, 1, 1);
2344 OBMC_FILTER4(x+6, 0, 0, 5, 2, 1);
2346 OBMC_FILTER (x , 0, 2, 5, 0, 1);
2347 OBMC_FILTER (x+1, 0, 2, 5, 0, 1);
2348 OBMC_FILTER4(x+2, 0, 1, 5, 0, 2);
2349 OBMC_FILTER4(x+4, 0, 0, 5, 1, 2);
2350 OBMC_FILTER (x+6, 0, 0, 5, 2, 1);
2351 OBMC_FILTER (x+7, 0, 0, 5, 2, 1);
2353 OBMC_FILTER (x , 0, 2, 4, 0, 2);
2354 OBMC_FILTER (x+1, 0, 1, 5, 0, 2);
2355 OBMC_FILTER (x+6, 0, 0, 5, 1, 2);
2356 OBMC_FILTER (x+7, 0, 0, 4, 2, 2);
2359 /* obmc for 1 8x8 luma block */
2360 static inline void obmc_motion(MpegEncContext *s,
2361 uint8_t *dest, uint8_t *src,
2362 int src_x, int src_y,
2363 op_pixels_func *pix_op,
2364 int16_t mv[5][2]/* mid top left right bottom*/)
2370 assert(s->quarter_sample==0);
2373 if(i && mv[i][0]==mv[MID][0] && mv[i][1]==mv[MID][1]){
2376 ptr[i]= s->edge_emu_buffer + 16 + 8*(i&1) + s->linesize*8*(i>>1);
2377 hpel_motion(s, ptr[i], src,
2379 s->width, s->height, s->linesize,
2380 s->h_edge_pos, s->v_edge_pos,
2382 mv[i][0], mv[i][1]);
2386 put_obmc(dest, ptr, s->linesize);
2389 static inline void qpel_motion(MpegEncContext *s,
2390 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2392 uint8_t **ref_picture, int src_offset,
2393 int field_based, op_pixels_func (*pix_op)[4],
2394 qpel_mc_func (*qpix_op)[16],
2395 int motion_x, int motion_y, int h)
2398 int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
2401 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
2402 src_x = s->mb_x * 16 + (motion_x >> 2);
2403 src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
2405 height = s->height >> field_based;
2406 v_edge_pos = s->v_edge_pos >> field_based;
2407 src_x = clip(src_x, -16, s->width);
2408 if (src_x == s->width)
2410 src_y = clip(src_y, -16, height);
2411 if (src_y == height)
2413 linesize = s->linesize << field_based;
2414 uvlinesize = s->uvlinesize << field_based;
2415 ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
2416 dest_y += dest_offset;
2417 //printf("%d %d %d\n", src_x, src_y, dxy);
2419 if(s->flags&CODEC_FLAG_EMU_EDGE){
2420 if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 16
2421 || (unsigned)src_y > v_edge_pos - (motion_y&3) - h ){
2422 ff_emulated_edge_mc(s->edge_emu_buffer, ptr - src_offset, s->linesize, 17, 17+field_based,
2423 src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
2424 ptr= s->edge_emu_buffer + src_offset;
2429 qpix_op[0][dxy](dest_y, ptr, linesize);
2431 //damn interlaced mode
2432 //FIXME boundary mirroring is not exactly correct here
2433 qpix_op[1][dxy](dest_y , ptr , linesize);
2434 qpix_op[1][dxy](dest_y+8, ptr+8, linesize);
2437 if(s->flags&CODEC_FLAG_GRAY) return;
2442 }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA2){
2443 static const int rtab[8]= {0,0,1,1,0,0,0,1};
2444 mx= (motion_x>>1) + rtab[motion_x&7];
2445 my= (motion_y>>1) + rtab[motion_y&7];
2446 }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){
2447 mx= (motion_x>>1)|(motion_x&1);
2448 my= (motion_y>>1)|(motion_y&1);
2456 dxy= (mx&1) | ((my&1)<<1);
2460 src_x = s->mb_x * 8 + mx;
2461 src_y = s->mb_y * (8 >> field_based) + my;
2462 src_x = clip(src_x, -8, s->width >> 1);
2463 if (src_x == (s->width >> 1))
2465 src_y = clip(src_y, -8, height >> 1);
2466 if (src_y == (height >> 1))
2469 offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
2470 ptr = ref_picture[1] + offset;
2472 ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based,
2473 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2474 ptr= s->edge_emu_buffer + (src_offset >> 1);
2476 pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
2478 ptr = ref_picture[2] + offset;
2480 ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based,
2481 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2482 ptr= s->edge_emu_buffer + (src_offset >> 1);
2484 pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
2487 inline int ff_h263_round_chroma(int x){
2489 return (h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
2492 return -(h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
2497 * h263 chorma 4mv motion compensation.
2499 static inline void chroma_4mv_motion(MpegEncContext *s,
2500 uint8_t *dest_cb, uint8_t *dest_cr,
2501 uint8_t **ref_picture,
2502 op_pixels_func *pix_op,
2504 int dxy, emu=0, src_x, src_y, offset;
2507 /* In case of 8X8, we construct a single chroma motion vector
2508 with a special rounding */
2509 mx= ff_h263_round_chroma(mx);
2510 my= ff_h263_round_chroma(my);
2512 dxy = ((my & 1) << 1) | (mx & 1);
2516 src_x = s->mb_x * 8 + mx;
2517 src_y = s->mb_y * 8 + my;
2518 src_x = clip(src_x, -8, s->width/2);
2519 if (src_x == s->width/2)
2521 src_y = clip(src_y, -8, s->height/2);
2522 if (src_y == s->height/2)
2525 offset = (src_y * (s->uvlinesize)) + src_x;
2526 ptr = ref_picture[1] + offset;
2527 if(s->flags&CODEC_FLAG_EMU_EDGE){
2528 if( (unsigned)src_x > (s->h_edge_pos>>1) - (dxy &1) - 8
2529 || (unsigned)src_y > (s->v_edge_pos>>1) - (dxy>>1) - 8){
2530 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2531 ptr= s->edge_emu_buffer;
2535 pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
2537 ptr = ref_picture[2] + offset;
2539 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2540 ptr= s->edge_emu_buffer;
2542 pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
2546 * motion compesation of a single macroblock
2548 * @param dest_y luma destination pointer
2549 * @param dest_cb chroma cb/u destination pointer
2550 * @param dest_cr chroma cr/v destination pointer
2551 * @param dir direction (0->forward, 1->backward)
2552 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2553 * @param pic_op halfpel motion compensation function (average or put normally)
2554 * @param pic_op qpel motion compensation function (average or put normally)
2555 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2557 static inline void MPV_motion(MpegEncContext *s,
2558 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2559 int dir, uint8_t **ref_picture,
2560 op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16])
2562 int dxy, mx, my, src_x, src_y, motion_x, motion_y;
2564 uint8_t *ptr, *dest;
2569 if(s->obmc && s->pict_type != B_TYPE){
2570 int16_t mv_cache[4][4][2];
2571 const int xy= s->mb_x + s->mb_y*s->mb_stride;
2572 const int mot_stride= s->mb_width*2 + 2;
2573 const int mot_xy= 1 + mb_x*2 + (mb_y*2 + 1)*mot_stride;
2575 assert(!s->mb_skiped);
2577 memcpy(mv_cache[1][1], s->current_picture.motion_val[0][mot_xy ], sizeof(int16_t)*4);
2578 memcpy(mv_cache[2][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
2579 memcpy(mv_cache[3][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
2581 if(mb_y==0 || IS_INTRA(s->current_picture.mb_type[xy-s->mb_stride])){
2582 memcpy(mv_cache[0][1], mv_cache[1][1], sizeof(int16_t)*4);
2584 memcpy(mv_cache[0][1], s->current_picture.motion_val[0][mot_xy-mot_stride], sizeof(int16_t)*4);
2587 if(mb_x==0 || IS_INTRA(s->current_picture.mb_type[xy-1])){
2588 *(int32_t*)mv_cache[1][0]= *(int32_t*)mv_cache[1][1];
2589 *(int32_t*)mv_cache[2][0]= *(int32_t*)mv_cache[2][1];
2591 *(int32_t*)mv_cache[1][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1];
2592 *(int32_t*)mv_cache[2][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1+mot_stride];
2595 if(mb_x+1>=s->mb_width || IS_INTRA(s->current_picture.mb_type[xy+1])){
2596 *(int32_t*)mv_cache[1][3]= *(int32_t*)mv_cache[1][2];
2597 *(int32_t*)mv_cache[2][3]= *(int32_t*)mv_cache[2][2];
2599 *(int32_t*)mv_cache[1][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2];
2600 *(int32_t*)mv_cache[2][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2+mot_stride];
2606 const int x= (i&1)+1;
2607 const int y= (i>>1)+1;
2609 {mv_cache[y][x ][0], mv_cache[y][x ][1]},
2610 {mv_cache[y-1][x][0], mv_cache[y-1][x][1]},
2611 {mv_cache[y][x-1][0], mv_cache[y][x-1][1]},
2612 {mv_cache[y][x+1][0], mv_cache[y][x+1][1]},
2613 {mv_cache[y+1][x][0], mv_cache[y+1][x][1]}};
2615 obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
2617 mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8,
2624 if(!(s->flags&CODEC_FLAG_GRAY))
2625 chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my);
2630 switch(s->mv_type) {
2634 if(s->real_sprite_warping_points==1){
2635 gmc1_motion(s, dest_y, dest_cb, dest_cr, 0,
2638 gmc_motion(s, dest_y, dest_cb, dest_cr, 0,
2641 }else if(s->quarter_sample){
2642 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
2645 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2647 ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
2648 ref_picture, pix_op,
2649 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2653 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2656 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2662 if(s->quarter_sample){
2664 motion_x = s->mv[dir][i][0];
2665 motion_y = s->mv[dir][i][1];
2667 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
2668 src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
2669 src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
2671 /* WARNING: do no forget half pels */
2672 src_x = clip(src_x, -16, s->width);
2673 if (src_x == s->width)
2675 src_y = clip(src_y, -16, s->height);
2676 if (src_y == s->height)
2679 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
2680 if(s->flags&CODEC_FLAG_EMU_EDGE){
2681 if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 8
2682 || (unsigned)src_y > s->v_edge_pos - (motion_y&3) - 8 ){
2683 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
2684 ptr= s->edge_emu_buffer;
2687 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
2688 qpix_op[1][dxy](dest, ptr, s->linesize);
2690 mx += s->mv[dir][i][0]/2;
2691 my += s->mv[dir][i][1]/2;
2695 hpel_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
2697 mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8,
2698 s->width, s->height, s->linesize,
2699 s->h_edge_pos, s->v_edge_pos,
2701 s->mv[dir][i][0], s->mv[dir][i][1]);
2703 mx += s->mv[dir][i][0];
2704 my += s->mv[dir][i][1];
2708 if(!(s->flags&CODEC_FLAG_GRAY))
2709 chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my);
2712 if (s->picture_structure == PICT_FRAME) {
2713 if(s->quarter_sample){
2715 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
2716 ref_picture, s->field_select[dir][0] ? s->linesize : 0,
2718 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2720 qpel_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2721 ref_picture, s->field_select[dir][1] ? s->linesize : 0,
2723 s->mv[dir][1][0], s->mv[dir][1][1], 8);
2726 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2727 ref_picture, s->field_select[dir][0] ? s->linesize : 0,
2729 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2731 mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2732 ref_picture, s->field_select[dir][1] ? s->linesize : 0,
2734 s->mv[dir][1][0], s->mv[dir][1][1], 8);
2738 if(s->picture_structure == s->field_select[dir][0] + 1 || s->pict_type == B_TYPE || s->first_field){
2739 offset= s->field_select[dir][0] ? s->linesize : 0;
2741 ref_picture= s->current_picture.data;
2742 offset= s->field_select[dir][0] ? s->linesize : -s->linesize;
2745 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2746 ref_picture, offset,
2748 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2753 uint8_t ** ref2picture;
2755 if(s->picture_structure == s->field_select[dir][0] + 1 || s->pict_type == B_TYPE || s->first_field){
2756 ref2picture= ref_picture;
2757 offset= s->field_select[dir][0] ? s->linesize : 0;
2759 ref2picture= s->current_picture.data;
2760 offset= s->field_select[dir][0] ? s->linesize : -s->linesize;
2763 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2764 ref2picture, offset,
2766 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2769 if(s->picture_structure == s->field_select[dir][1] + 1 || s->pict_type == B_TYPE || s->first_field){
2770 ref2picture= ref_picture;
2771 offset= s->field_select[dir][1] ? s->linesize : 0;
2773 ref2picture= s->current_picture.data;
2774 offset= s->field_select[dir][1] ? s->linesize : -s->linesize;
2776 // I know it is ugly but this is the only way to fool emu_edge without rewrite mpeg_motion
2777 mpeg_motion(s, dest_y+16*s->linesize, dest_cb+8*s->uvlinesize, dest_cr+8*s->uvlinesize,
2779 ref2picture, offset,
2781 s->mv[dir][1][0], s->mv[dir][1][1]+16, 8);
2787 op_pixels_func (*dmv_pix_op)[4];
2790 dmv_pix_op = s->dsp.put_pixels_tab;
2792 if(s->picture_structure == PICT_FRAME){
2793 //put top field from top field
2794 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2797 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2798 //put bottom field from bottom field
2799 mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2800 ref_picture, s->linesize,
2802 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2804 dmv_pix_op = s->dsp.avg_pixels_tab;
2806 //avg top field from bottom field
2807 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2808 ref_picture, s->linesize,
2810 s->mv[dir][2][0], s->mv[dir][2][1], 8);
2811 //avg bottom field from top field
2812 mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2815 s->mv[dir][3][0], s->mv[dir][3][1], 8);
2818 offset=(s->picture_structure == PICT_BOTTOM_FIELD)?
2821 //put field from the same parity
2822 //same parity is never in the same frame
2823 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2826 s->mv[dir][0][0],s->mv[dir][0][1],16);
2828 // after put we make avg of the same block
2829 dmv_pix_op=s->dsp.avg_pixels_tab;
2831 //opposite parity is always in the same frame if this is second field
2832 if(!s->first_field){
2833 ref_picture = s->current_picture.data;
2834 //top field is one linesize from frame beginig
2835 offset=(s->picture_structure == PICT_BOTTOM_FIELD)?
2836 -s->linesize : s->linesize;
2838 offset=(s->picture_structure == PICT_BOTTOM_FIELD)?
2841 //avg field from the opposite parity
2842 mpeg_motion(s, dest_y, dest_cb, dest_cr,0,
2843 ref_picture, offset,
2845 s->mv[dir][2][0],s->mv[dir][2][1],16);
2854 /* put block[] to dest[] */
2855 static inline void put_dct(MpegEncContext *s,
2856 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2858 s->dct_unquantize_intra(s, block, i, qscale);
2859 s->dsp.idct_put (dest, line_size, block);
2862 /* add block[] to dest[] */
2863 static inline void add_dct(MpegEncContext *s,
2864 DCTELEM *block, int i, uint8_t *dest, int line_size)
2866 if (s->block_last_index[i] >= 0) {
2867 s->dsp.idct_add (dest, line_size, block);
2871 static inline void add_dequant_dct(MpegEncContext *s,
2872 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2874 if (s->block_last_index[i] >= 0) {
2875 s->dct_unquantize_inter(s, block, i, qscale);
2877 s->dsp.idct_add (dest, line_size, block);
2882 * cleans dc, ac, coded_block for the current non intra MB
2884 void ff_clean_intra_table_entries(MpegEncContext *s)
2886 int wrap = s->block_wrap[0];
2887 int xy = s->block_index[0];
2890 s->dc_val[0][xy + 1 ] =
2891 s->dc_val[0][xy + wrap] =
2892 s->dc_val[0][xy + 1 + wrap] = 1024;
2894 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2895 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2896 if (s->msmpeg4_version>=3) {
2897 s->coded_block[xy ] =
2898 s->coded_block[xy + 1 ] =
2899 s->coded_block[xy + wrap] =
2900 s->coded_block[xy + 1 + wrap] = 0;
2903 wrap = s->block_wrap[4];
2904 xy = s->mb_x + 1 + (s->mb_y + 1) * wrap;
2906 s->dc_val[2][xy] = 1024;
2908 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2909 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2911 s->mbintra_table[s->mb_x + s->mb_y*s->mb_stride]= 0;
2914 /* generic function called after a macroblock has been parsed by the
2915 decoder or after it has been encoded by the encoder.
2917 Important variables used:
2918 s->mb_intra : true if intra macroblock
2919 s->mv_dir : motion vector direction
2920 s->mv_type : motion vector type
2921 s->mv : motion vector
2922 s->interlaced_dct : true if interlaced dct used (mpeg2)
2924 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
2927 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2929 if(s->avctx->xvmc_acceleration){
2930 XVMC_decode_mb(s);//xvmc uses pblocks
2938 s->current_picture.qscale_table[mb_xy]= s->qscale;
2940 /* update DC predictors for P macroblocks */
2942 if (s->h263_pred || s->h263_aic) {
2943 if(s->mbintra_table[mb_xy])
2944 ff_clean_intra_table_entries(s);
2948 s->last_dc[2] = 128 << s->intra_dc_precision;
2951 else if (s->h263_pred || s->h263_aic)
2952 s->mbintra_table[mb_xy]=1;
2954 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { //FIXME precalc
2955 uint8_t *dest_y, *dest_cb, *dest_cr;
2956 int dct_linesize, dct_offset;
2957 op_pixels_func (*op_pix)[4];
2958 qpel_mc_func (*op_qpix)[16];
2959 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
2960 const int uvlinesize= s->current_picture.linesize[1];
2961 const int readable= s->pict_type != B_TYPE || s->encoding || s->avctx->draw_horiz_band;
2963 /* avoid copy if macroblock skipped in last frame too */
2964 /* skip only during decoding as we might trash the buffers during encoding a bit */
2966 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2967 const int age= s->current_picture.age;
2973 assert(s->pict_type!=I_TYPE);
2975 (*mbskip_ptr) ++; /* indicate that this time we skiped it */
2976 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2978 /* if previous was skipped too, then nothing to do ! */
2979 if (*mbskip_ptr >= age && s->current_picture.reference){
2982 } else if(!s->current_picture.reference){
2983 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2984 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2986 *mbskip_ptr = 0; /* not skipped */
2990 if (s->interlaced_dct) {
2991 dct_linesize = linesize * 2;
2992 dct_offset = linesize;
2994 dct_linesize = linesize;
2995 dct_offset = linesize * 8;
2999 dest_cb= s->dest[1];
3000 dest_cr= s->dest[2];
3002 dest_y = s->edge_emu_buffer+32; //FIXME cleanup scratchpad pointers
3003 dest_cb= s->edge_emu_buffer+48;
3004 dest_cr= s->edge_emu_buffer+56;
3007 /* motion handling */
3008 /* decoding or more than one mb_type (MC was allready done otherwise) */
3010 if ((!s->no_rounding) || s->pict_type==B_TYPE){
3011 op_pix = s->dsp.put_pixels_tab;
3012 op_qpix= s->dsp.put_qpel_pixels_tab;
3014 op_pix = s->dsp.put_no_rnd_pixels_tab;
3015 op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
3018 if (s->mv_dir & MV_DIR_FORWARD) {
3019 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
3020 op_pix = s->dsp.avg_pixels_tab;
3021 op_qpix= s->dsp.avg_qpel_pixels_tab;
3023 if (s->mv_dir & MV_DIR_BACKWARD) {
3024 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
3028 /* skip dequant / idct if we are really late ;) */
3029 if(s->hurry_up>1) return;
3031 /* add dct residue */
3032 if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
3033 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
3034 add_dequant_dct(s, block[0], 0, dest_y, dct_linesize, s->qscale);
3035 add_dequant_dct(s, block[1], 1, dest_y + 8, dct_linesize, s->qscale);
3036 add_dequant_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize, s->qscale);
3037 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize, s->qscale);
3039 if(!(s->flags&CODEC_FLAG_GRAY)){
3040 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3041 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3043 } else if(s->codec_id != CODEC_ID_WMV2){
3044 add_dct(s, block[0], 0, dest_y, dct_linesize);
3045 add_dct(s, block[1], 1, dest_y + 8, dct_linesize);
3046 add_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
3047 add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
3049 if(!(s->flags&CODEC_FLAG_GRAY)){
3050 add_dct(s, block[4], 4, dest_cb, uvlinesize);
3051 add_dct(s, block[5], 5, dest_cr, uvlinesize);
3056 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3060 /* dct only in intra block */
3061 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
3062 put_dct(s, block[0], 0, dest_y, dct_linesize, s->qscale);
3063 put_dct(s, block[1], 1, dest_y + 8, dct_linesize, s->qscale);
3064 put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize, s->qscale);
3065 put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize, s->qscale);
3067 if(!(s->flags&CODEC_FLAG_GRAY)){
3068 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3069 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3072 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
3073 s->dsp.idct_put(dest_y + 8, dct_linesize, block[1]);
3074 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
3075 s->dsp.idct_put(dest_y + dct_offset + 8, dct_linesize, block[3]);
3077 if(!(s->flags&CODEC_FLAG_GRAY)){
3078 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
3079 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
3084 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3085 s->dsp.put_pixels_tab[1][0](s->dest[1], dest_cb, uvlinesize, 8);
3086 s->dsp.put_pixels_tab[1][0](s->dest[2], dest_cr, uvlinesize, 8);
3091 #ifdef CONFIG_ENCODERS
3093 static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
3095 static const char tab[64]=
3107 DCTELEM *block= s->block[n];
3108 const int last_index= s->block_last_index[n];
3113 threshold= -threshold;
3117 /* are all which we could set to zero are allready zero? */
3118 if(last_index<=skip_dc - 1) return;
3120 for(i=0; i<=last_index; i++){
3121 const int j = s->intra_scantable.permutated[i];
3122 const int level = ABS(block[j]);
3124 if(skip_dc && i==0) continue;
3133 if(score >= threshold) return;
3134 for(i=skip_dc; i<=last_index; i++){
3135 const int j = s->intra_scantable.permutated[i];
3138 if(block[0]) s->block_last_index[n]= 0;
3139 else s->block_last_index[n]= -1;
3142 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index)
3145 const int maxlevel= s->max_qcoeff;
3146 const int minlevel= s->min_qcoeff;
3150 i=1; //skip clipping of intra dc
3154 for(;i<=last_index; i++){
3155 const int j= s->intra_scantable.permutated[i];
3156 int level = block[j];
3158 if (level>maxlevel){
3161 }else if(level<minlevel){
3169 if(overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
3170 av_log(s->avctx, AV_LOG_INFO, "warning, cliping %d dct coefficents to %d..%d\n", overflow, minlevel, maxlevel);
3173 #endif //CONFIG_ENCODERS
3177 * @param h is the normal height, this will be reduced automatically if needed for the last row
3179 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
3180 if (s->avctx->draw_horiz_band) {
3184 if(s->picture_structure != PICT_FRAME){
3187 if(s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
3190 h= FFMIN(h, s->height - y);
3192 if(s->pict_type==B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
3193 src= (AVFrame*)s->current_picture_ptr;
3194 else if(s->last_picture_ptr)
3195 src= (AVFrame*)s->last_picture_ptr;
3199 if(s->pict_type==B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
3205 offset[0]= y * s->linesize;;
3207 offset[2]= (y>>1) * s->uvlinesize;;
3213 s->avctx->draw_horiz_band(s->avctx, src, offset,
3214 y, s->picture_structure, h);
3218 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3219 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
3220 const int uvlinesize= s->current_picture.linesize[1];
3222 s->block_index[0]= s->block_wrap[0]*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3223 s->block_index[1]= s->block_wrap[0]*(s->mb_y*2 + 1) + s->mb_x*2;
3224 s->block_index[2]= s->block_wrap[0]*(s->mb_y*2 + 2) - 1 + s->mb_x*2;
3225 s->block_index[3]= s->block_wrap[0]*(s->mb_y*2 + 2) + s->mb_x*2;
3226 s->block_index[4]= s->block_wrap[4]*(s->mb_y + 1) + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x;
3227 s->block_index[5]= s->block_wrap[4]*(s->mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x;
3229 if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME){
3230 s->dest[0] = s->current_picture.data[0] + s->mb_x * 16 - 16;
3231 s->dest[1] = s->current_picture.data[1] + s->mb_x * 8 - 8;
3232 s->dest[2] = s->current_picture.data[2] + s->mb_x * 8 - 8;
3234 s->dest[0] = s->current_picture.data[0] + (s->mb_y * 16* linesize ) + s->mb_x * 16 - 16;
3235 s->dest[1] = s->current_picture.data[1] + (s->mb_y * 8 * uvlinesize) + s->mb_x * 8 - 8;
3236 s->dest[2] = s->current_picture.data[2] + (s->mb_y * 8 * uvlinesize) + s->mb_x * 8 - 8;
3240 #ifdef CONFIG_ENCODERS
3242 static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
3244 const int mb_x= s->mb_x;
3245 const int mb_y= s->mb_y;
3248 int dct_offset = s->linesize*8; //default for progressive frames
3250 for(i=0; i<6; i++) skip_dct[i]=0;
3252 if(s->adaptive_quant){
3253 const int last_qp= s->qscale;
3254 const int mb_xy= mb_x + mb_y*s->mb_stride;
3256 s->lambda= s->lambda_table[mb_xy];
3259 if(!(s->flags&CODEC_FLAG_QP_RD)){
3260 s->dquant= s->qscale - last_qp;
3262 if(s->out_format==FMT_H263)
3263 s->dquant= clip(s->dquant, -2, 2); //FIXME RD
3265 if(s->codec_id==CODEC_ID_MPEG4){
3267 if((s->mv_dir&MV_DIRECT) || s->mv_type==MV_TYPE_8X8)
3272 ff_set_qscale(s, last_qp + s->dquant);
3280 wrap_y = s->linesize;
3281 ptr = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
3283 if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
3284 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
3285 ptr= s->edge_emu_buffer;
3289 if(s->flags&CODEC_FLAG_INTERLACED_DCT){
3290 int progressive_score, interlaced_score;
3292 s->interlaced_dct=0;
3293 progressive_score= s->dsp.ildct_cmp[4](s, ptr , NULL, wrap_y, 8)
3294 +s->dsp.ildct_cmp[4](s, ptr + wrap_y*8, NULL, wrap_y, 8) - 400;
3296 if(progressive_score > 0){
3297 interlaced_score = s->dsp.ildct_cmp[4](s, ptr , NULL, wrap_y*2, 8)
3298 +s->dsp.ildct_cmp[4](s, ptr + wrap_y , NULL, wrap_y*2, 8);
3299 if(progressive_score > interlaced_score){
3300 s->interlaced_dct=1;
3308 s->dsp.get_pixels(s->block[0], ptr , wrap_y);
3309 s->dsp.get_pixels(s->block[1], ptr + 8, wrap_y);
3310 s->dsp.get_pixels(s->block[2], ptr + dct_offset , wrap_y);
3311 s->dsp.get_pixels(s->block[3], ptr + dct_offset + 8, wrap_y);
3313 if(s->flags&CODEC_FLAG_GRAY){
3317 int wrap_c = s->uvlinesize;
3318 ptr = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
3320 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
3321 ptr= s->edge_emu_buffer;
3323 s->dsp.get_pixels(s->block[4], ptr, wrap_c);
3325 ptr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
3327 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
3328 ptr= s->edge_emu_buffer;
3330 s->dsp.get_pixels(s->block[5], ptr, wrap_c);
3333 op_pixels_func (*op_pix)[4];
3334 qpel_mc_func (*op_qpix)[16];
3335 uint8_t *dest_y, *dest_cb, *dest_cr;
3336 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
3340 dest_y = s->dest[0];
3341 dest_cb = s->dest[1];
3342 dest_cr = s->dest[2];
3343 wrap_y = s->linesize;
3344 wrap_c = s->uvlinesize;
3345 ptr_y = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
3346 ptr_cb = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
3347 ptr_cr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
3349 if ((!s->no_rounding) || s->pict_type==B_TYPE){
3350 op_pix = s->dsp.put_pixels_tab;
3351 op_qpix= s->dsp.put_qpel_pixels_tab;
3353 op_pix = s->dsp.put_no_rnd_pixels_tab;
3354 op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
3357 if (s->mv_dir & MV_DIR_FORWARD) {
3358 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
3359 op_pix = s->dsp.avg_pixels_tab;
3360 op_qpix= s->dsp.avg_qpel_pixels_tab;
3362 if (s->mv_dir & MV_DIR_BACKWARD) {
3363 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
3366 if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
3367 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
3368 ptr_y= s->edge_emu_buffer;
3372 if(s->flags&CODEC_FLAG_INTERLACED_DCT){
3373 int progressive_score, interlaced_score;
3375 s->interlaced_dct=0;
3376 progressive_score= s->dsp.ildct_cmp[0](s, dest_y , ptr_y , wrap_y, 8)
3377 +s->dsp.ildct_cmp[0](s, dest_y + wrap_y*8, ptr_y + wrap_y*8, wrap_y, 8) - 400;
3379 if(s->avctx->ildct_cmp == FF_CMP_VSSE) progressive_score -= 400;
3381 if(progressive_score>0){
3382 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y , ptr_y , wrap_y*2, 8)
3383 +s->dsp.ildct_cmp[0](s, dest_y + wrap_y , ptr_y + wrap_y , wrap_y*2, 8);
3385 if(progressive_score > interlaced_score){
3386 s->interlaced_dct=1;
3394 s->dsp.diff_pixels(s->block[0], ptr_y , dest_y , wrap_y);
3395 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
3396 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset , dest_y + dct_offset , wrap_y);
3397 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y);
3399 if(s->flags&CODEC_FLAG_GRAY){
3404 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
3405 ptr_cb= s->edge_emu_buffer;
3407 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
3409 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
3410 ptr_cr= s->edge_emu_buffer;
3412 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
3414 /* pre quantization */
3415 if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){
3417 if(s->dsp.sad[1](NULL, ptr_y , dest_y , wrap_y, 8) < 20*s->qscale) skip_dct[0]= 1;
3418 if(s->dsp.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20*s->qscale) skip_dct[1]= 1;
3419 if(s->dsp.sad[1](NULL, ptr_y +dct_offset , dest_y +dct_offset , wrap_y, 8) < 20*s->qscale) skip_dct[2]= 1;
3420 if(s->dsp.sad[1](NULL, ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y, 8) < 20*s->qscale) skip_dct[3]= 1;
3421 if(s->dsp.sad[1](NULL, ptr_cb , dest_cb , wrap_c, 8) < 20*s->qscale) skip_dct[4]= 1;
3422 if(s->dsp.sad[1](NULL, ptr_cr , dest_cr , wrap_c, 8) < 20*s->qscale) skip_dct[5]= 1;
3428 if(skip_dct[i]) num++;
3431 if(s->mb_x==0 && s->mb_y==0){
3433 printf("%6d %1d\n", stat[i], i);
3442 /* DCT & quantize */
3443 if(s->out_format==FMT_MJPEG){
3446 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, 8, &overflow);
3447 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
3453 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
3454 // FIXME we could decide to change to quantizer instead of clipping
3455 // JS: I don't think that would be a good idea it could lower quality instead
3456 // of improve it. Just INTRADC clipping deserves changes in quantizer
3457 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
3459 s->block_last_index[i]= -1;
3462 if(s->luma_elim_threshold && !s->mb_intra)
3464 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
3465 if(s->chroma_elim_threshold && !s->mb_intra)
3467 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
3469 if(s->flags & CODEC_FLAG_CBP_RD){
3471 if(s->block_last_index[i] == -1)
3472 s->coded_score[i]= INT_MAX/256;
3477 if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){
3478 s->block_last_index[4]=
3479 s->block_last_index[5]= 0;
3481 s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale;
3484 //non c quantize code returns incorrect block_last_index FIXME
3485 if(s->alternate_scan && s->dct_quantize != dct_quantize_c){
3488 if(s->block_last_index[i]>0){
3489 for(j=63; j>0; j--){
3490 if(s->block[i][ s->intra_scantable.permutated[j] ]) break;
3492 s->block_last_index[i]= j;
3497 /* huffman encode */
3498 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
3499 case CODEC_ID_MPEG1VIDEO:
3500 case CODEC_ID_MPEG2VIDEO:
3501 mpeg1_encode_mb(s, s->block, motion_x, motion_y); break;
3503 case CODEC_ID_MPEG4:
3504 mpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
3505 case CODEC_ID_MSMPEG4V2:
3506 case CODEC_ID_MSMPEG4V3:
3508 msmpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
3510 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); break;
3512 case CODEC_ID_H263P:
3515 h263_encode_mb(s, s->block, motion_x, motion_y); break;
3517 case CODEC_ID_MJPEG:
3518 mjpeg_encode_mb(s, s->block); break;
3524 #endif //CONFIG_ENCODERS
3527 * combines the (truncated) bitstream to a complete frame
3528 * @returns -1 if no complete frame could be created
3530 int ff_combine_frame( MpegEncContext *s, int next, uint8_t **buf, int *buf_size){
3531 ParseContext *pc= &s->parse_context;
3535 printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index);
3536 printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
3540 /* copy overreaded byes from last frame into buffer */
3541 for(; pc->overread>0; pc->overread--){
3542 pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
3545 pc->last_index= pc->index;
3547 /* copy into buffer end return */
3548 if(next == END_NOT_FOUND){
3549 pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
3551 memcpy(&pc->buffer[pc->index], *buf, *buf_size);
3552 pc->index += *buf_size;
3557 pc->overread_index= pc->index + next;
3559 /* append to buffer */
3561 pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
3563 memcpy(&pc->buffer[pc->index], *buf, next + FF_INPUT_BUFFER_PADDING_SIZE );
3568 /* store overread bytes */
3569 for(;next < 0; next++){
3570 pc->state = (pc->state<<8) | pc->buffer[pc->last_index + next];
3576 printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index);
3577 printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
3584 void ff_mpeg_flush(AVCodecContext *avctx){
3586 MpegEncContext *s = avctx->priv_data;
3588 for(i=0; i<MAX_PICTURE_COUNT; i++){
3589 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
3590 || s->picture[i].type == FF_BUFFER_TYPE_USER))
3591 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
3593 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3595 s->parse_context.state= -1;
3596 s->parse_context.frame_start_found= 0;
3597 s->parse_context.overread= 0;
3598 s->parse_context.overread_index= 0;
3599 s->parse_context.index= 0;
3600 s->parse_context.last_index= 0;
3603 #ifdef CONFIG_ENCODERS
3604 void ff_copy_bits(PutBitContext *pb, uint8_t *src, int length)
3606 int bytes= length>>4;
3607 int bits= length&15;
3610 if(length==0) return;
3612 for(i=0; i<bytes; i++) put_bits(pb, 16, be2me_16(((uint16_t*)src)[i]));
3613 put_bits(pb, bits, be2me_16(((uint16_t*)src)[i])>>(16-bits));
3616 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
3619 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
3622 d->mb_skip_run= s->mb_skip_run;
3624 d->last_dc[i]= s->last_dc[i];
3627 d->mv_bits= s->mv_bits;
3628 d->i_tex_bits= s->i_tex_bits;
3629 d->p_tex_bits= s->p_tex_bits;
3630 d->i_count= s->i_count;
3631 d->f_count= s->f_count;
3632 d->b_count= s->b_count;
3633 d->skip_count= s->skip_count;
3634 d->misc_bits= s->misc_bits;
3638 d->qscale= s->qscale;
3639 d->dquant= s->dquant;
3642 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
3645 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
3646 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
3649 d->mb_skip_run= s->mb_skip_run;
3651 d->last_dc[i]= s->last_dc[i];
3654 d->mv_bits= s->mv_bits;
3655 d->i_tex_bits= s->i_tex_bits;
3656 d->p_tex_bits= s->p_tex_bits;
3657 d->i_count= s->i_count;
3658 d->f_count= s->f_count;
3659 d->b_count= s->b_count;
3660 d->skip_count= s->skip_count;
3661 d->misc_bits= s->misc_bits;
3663 d->mb_intra= s->mb_intra;
3664 d->mb_skiped= s->mb_skiped;
3665 d->mv_type= s->mv_type;
3666 d->mv_dir= s->mv_dir;
3668 if(s->data_partitioning){
3670 d->tex_pb= s->tex_pb;
3674 d->block_last_index[i]= s->block_last_index[i];
3675 d->interlaced_dct= s->interlaced_dct;
3676 d->qscale= s->qscale;
3679 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
3680 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
3681 int *dmin, int *next_block, int motion_x, int motion_y)
3684 uint8_t *dest_backup[3];
3686 copy_context_before_encode(s, backup, type);
3688 s->block= s->blocks[*next_block];
3689 s->pb= pb[*next_block];
3690 if(s->data_partitioning){
3691 s->pb2 = pb2 [*next_block];
3692 s->tex_pb= tex_pb[*next_block];
3696 memcpy(dest_backup, s->dest, sizeof(s->dest));
3697 s->dest[0] = s->me.scratchpad;
3698 s->dest[1] = s->me.scratchpad + 16;
3699 s->dest[2] = s->me.scratchpad + 16 + 8;
3700 assert(2*s->uvlinesize == s->linesize); //should be no prob for encoding
3701 assert(s->linesize >= 64); //FIXME
3704 encode_mb(s, motion_x, motion_y);
3706 score= get_bit_count(&s->pb);
3707 if(s->data_partitioning){
3708 score+= get_bit_count(&s->pb2);
3709 score+= get_bit_count(&s->tex_pb);
3712 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
3713 MPV_decode_mb(s, s->block);
3715 score *= s->lambda2;
3716 score += sse_mb(s) << FF_LAMBDA_SHIFT;
3720 memcpy(s->dest, dest_backup, sizeof(s->dest));
3727 copy_context_after_encode(best, s, type);
3731 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
3732 uint32_t *sq = squareTbl + 256;
3737 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
3738 else if(w==8 && h==8)
3739 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
3743 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
3752 static int sse_mb(MpegEncContext *s){
3756 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3757 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3760 return s->dsp.sse[0](NULL, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
3761 +s->dsp.sse[1](NULL, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
3762 +s->dsp.sse[1](NULL, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
3764 return sse(s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
3765 +sse(s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
3766 +sse(s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
3769 static void encode_picture(MpegEncContext *s, int picture_number)
3771 int mb_x, mb_y, pdif = 0;
3774 MpegEncContext best_s, backup_s;
3775 uint8_t bit_buf[2][3000];
3776 uint8_t bit_buf2[2][3000];
3777 uint8_t bit_buf_tex[2][3000];
3778 PutBitContext pb[2], pb2[2], tex_pb[2];
3781 init_put_bits(&pb [i], bit_buf [i], 3000);
3782 init_put_bits(&pb2 [i], bit_buf2 [i], 3000);
3783 init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000);
3786 s->picture_number = picture_number;
3788 /* Reset the average MB variance */
3789 s->current_picture.mb_var_sum = 0;
3790 s->current_picture.mc_mb_var_sum = 0;
3793 /* we need to initialize some time vars before we can encode b-frames */
3794 // RAL: Condition added for MPEG1VIDEO
3795 if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->h263_msmpeg4))
3796 ff_set_mpeg4_time(s, s->picture_number);
3799 s->scene_change_score=0;
3801 s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME ratedistoration
3803 if(s->pict_type==I_TYPE){
3804 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3805 else s->no_rounding=0;
3806 }else if(s->pict_type!=B_TYPE){
3807 if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
3808 s->no_rounding ^= 1;
3811 /* Estimate motion for every MB */
3812 s->mb_intra=0; //for the rate distoration & bit compare functions
3813 if(s->pict_type != I_TYPE){
3814 if(s->pict_type != B_TYPE){
3815 if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){
3817 s->me.dia_size= s->avctx->pre_dia_size;
3819 for(mb_y=s->mb_height-1; mb_y >=0 ; mb_y--) {
3820 for(mb_x=s->mb_width-1; mb_x >=0 ; mb_x--) {
3823 ff_pre_estimate_p_frame_motion(s, mb_x, mb_y);
3830 s->me.dia_size= s->avctx->dia_size;
3831 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3832 s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
3833 s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
3834 s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
3835 s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
3836 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3839 s->block_index[0]+=2;
3840 s->block_index[1]+=2;
3841 s->block_index[2]+=2;
3842 s->block_index[3]+=2;
3844 /* compute motion vector & mb_type and store in context */
3845 if(s->pict_type==B_TYPE)
3846 ff_estimate_b_frame_motion(s, mb_x, mb_y);
3848 ff_estimate_p_frame_motion(s, mb_x, mb_y);
3851 }else /* if(s->pict_type == I_TYPE) */{
3853 //FIXME do we need to zero them?
3854 memset(s->current_picture.motion_val[0][0], 0, sizeof(int16_t)*(s->mb_width*2 + 2)*(s->mb_height*2 + 2)*2);
3855 memset(s->p_mv_table , 0, sizeof(int16_t)*(s->mb_stride)*s->mb_height*2);
3856 for(i=0; i<s->mb_stride*s->mb_height; i++)
3857 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3859 if(!s->fixed_qscale){
3860 /* finding spatial complexity for I-frame rate control */
3861 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3862 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3865 uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
3867 int sum = s->dsp.pix_sum(pix, s->linesize);
3869 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8;
3871 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
3872 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
3873 s->current_picture.mb_var_sum += varc;
3880 if(s->scene_change_score > s->avctx->scenechange_threshold && s->pict_type == P_TYPE){
3881 s->pict_type= I_TYPE;
3882 for(i=0; i<s->mb_stride*s->mb_height; i++)
3883 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3884 //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3888 if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) {
3889 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3891 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3893 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3894 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3895 s->f_code= FFMAX(s->f_code, FFMAX(a,b));
3898 ff_fix_long_p_mvs(s);
3899 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3900 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3903 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3904 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3909 if(s->pict_type==B_TYPE){
3912 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3913 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3914 s->f_code = FFMAX(a, b);
3916 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3917 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3918 s->b_code = FFMAX(a, b);
3920 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3921 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3922 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3923 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3924 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3926 for(dir=0; dir<2; dir++){
3929 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3930 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3931 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3932 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3940 if (!s->fixed_qscale)
3941 s->current_picture.quality = ff_rate_estimate_qscale(s);
3943 if(s->adaptive_quant){
3945 switch(s->codec_id){
3946 case CODEC_ID_MPEG4:
3947 ff_clean_mpeg4_qscales(s);
3950 case CODEC_ID_H263P:
3952 ff_clean_h263_qscales(s);
3957 s->lambda= s->lambda_table[0];
3960 s->lambda= s->current_picture.quality;
3961 //printf("%d %d\n", s->avctx->global_quality, s->current_picture.quality);
3964 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==I_TYPE && !(s->flags & CODEC_FLAG_QSCALE))
3965 s->qscale= 3; //reduce cliping problems
3967 if (s->out_format == FMT_MJPEG) {
3968 /* for mjpeg, we do include qscale in the matrix */
3969 s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0];
3971 int j= s->dsp.idct_permutation[i];
3973 s->intra_matrix[j] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3975 convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3976 s->intra_matrix, s->intra_quant_bias, 8, 8);
3979 //FIXME var duplication
3980 s->current_picture.key_frame= s->pict_type == I_TYPE;
3981 s->current_picture.pict_type= s->pict_type;
3983 if(s->current_picture.key_frame)
3984 s->picture_in_gop_number=0;
3986 s->last_bits= get_bit_count(&s->pb);
3987 switch(s->out_format) {
3989 mjpeg_picture_header(s);
3993 if (s->codec_id == CODEC_ID_WMV2)
3994 ff_wmv2_encode_picture_header(s, picture_number);
3995 else if (s->h263_msmpeg4)
3996 msmpeg4_encode_picture_header(s, picture_number);
3997 else if (s->h263_pred)
3998 mpeg4_encode_picture_header(s, picture_number);
3999 else if (s->codec_id == CODEC_ID_RV10)
4000 rv10_encode_picture_header(s, picture_number);
4001 else if (s->codec_id == CODEC_ID_FLV1)
4002 ff_flv_encode_picture_header(s, picture_number);
4004 h263_encode_picture_header(s, picture_number);
4008 mpeg1_encode_picture_header(s, picture_number);
4013 bits= get_bit_count(&s->pb);
4014 s->header_bits= bits - s->last_bits;
4026 /* init last dc values */
4027 /* note: quant matrix value (8) is implied here */
4028 s->last_dc[i] = 128;
4030 s->current_picture_ptr->error[i] = 0;
4033 memset(s->last_mv, 0, sizeof(s->last_mv));
4038 switch(s->codec_id){
4040 case CODEC_ID_H263P:
4042 s->gob_index = ff_h263_get_gob_height(s);
4044 case CODEC_ID_MPEG4:
4045 if(s->partitioned_frame)
4046 ff_mpeg4_init_partitions(s);
4053 s->first_slice_line = 1;
4054 s->ptr_lastgob = s->pb.buf;
4055 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
4059 ff_set_qscale(s, s->qscale);
4060 ff_init_block_index(s);
4062 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
4063 const int xy= mb_y*s->mb_stride + mb_x;
4064 int mb_type= s->mb_type[xy];
4070 ff_update_block_index(s);
4072 /* write gob / video packet header */
4075 int current_packet_size, is_gob_start;
4077 current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob;
4079 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
4081 switch(s->codec_id){
4083 case CODEC_ID_H263P:
4084 if(!s->h263_slice_structured)
4085 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
4087 case CODEC_ID_MPEG2VIDEO:
4088 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
4089 case CODEC_ID_MPEG1VIDEO:
4090 if(s->mb_skip_run) is_gob_start=0;
4095 if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame){
4096 ff_mpeg4_merge_partitions(s);
4097 ff_mpeg4_init_partitions(s);
4100 if(s->codec_id==CODEC_ID_MPEG4)
4101 ff_mpeg4_stuffing(&s->pb);
4103 align_put_bits(&s->pb);
4104 flush_put_bits(&s->pb);
4106 assert((get_bit_count(&s->pb)&7) == 0);
4107 current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob;
4109 if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
4110 int r= get_bit_count(&s->pb)/8 + s->picture_number + s->codec_id + s->mb_x + s->mb_y;
4111 int d= 100 / s->avctx->error_rate;
4113 current_packet_size=0;
4114 #ifndef ALT_BITSTREAM_WRITER
4115 s->pb.buf_ptr= s->ptr_lastgob;
4117 assert(pbBufPtr(&s->pb) == s->ptr_lastgob);
4121 if (s->avctx->rtp_callback)
4122 s->avctx->rtp_callback(s->ptr_lastgob, current_packet_size, 0);
4124 switch(s->codec_id){
4125 case CODEC_ID_MPEG4:
4126 ff_mpeg4_encode_video_packet_header(s);
4127 ff_mpeg4_clean_buffers(s);
4129 case CODEC_ID_MPEG1VIDEO:
4130 case CODEC_ID_MPEG2VIDEO:
4131 ff_mpeg1_encode_slice_header(s);
4132 ff_mpeg1_clean_buffers(s);
4135 case CODEC_ID_H263P:
4136 h263_encode_gob_header(s, mb_y);
4140 if(s->flags&CODEC_FLAG_PASS1){
4141 int bits= get_bit_count(&s->pb);
4142 s->misc_bits+= bits - s->last_bits;
4146 s->ptr_lastgob += current_packet_size;
4147 s->first_slice_line=1;
4148 s->resync_mb_x=mb_x;
4149 s->resync_mb_y=mb_y;
4154 if( (s->resync_mb_x == s->mb_x)
4155 && s->resync_mb_y+1 == s->mb_y){
4156 s->first_slice_line=0;
4160 s->dquant=0; //only for QP_RD
4162 if(mb_type & (mb_type-1) || (s->flags & CODEC_FLAG_QP_RD)){ // more than 1 MB type possible
4164 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
4166 copy_context_before_encode(&backup_s, s, -1);
4168 best_s.data_partitioning= s->data_partitioning;
4169 best_s.partitioned_frame= s->partitioned_frame;
4170 if(s->data_partitioning){
4171 backup_s.pb2= s->pb2;
4172 backup_s.tex_pb= s->tex_pb;
4175 if(mb_type&CANDIDATE_MB_TYPE_INTER){
4176 s->mv_dir = MV_DIR_FORWARD;
4177 s->mv_type = MV_TYPE_16X16;
4179 s->mv[0][0][0] = s->p_mv_table[xy][0];
4180 s->mv[0][0][1] = s->p_mv_table[xy][1];
4181 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
4182 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
4184 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
4185 s->mv_dir = MV_DIR_FORWARD;
4186 s->mv_type = MV_TYPE_FIELD;
4189 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
4190 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
4191 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
4193 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
4194 &dmin, &next_block, 0, 0);
4196 if(mb_type&CANDIDATE_MB_TYPE_SKIPED){
4197 s->mv_dir = MV_DIR_FORWARD;
4198 s->mv_type = MV_TYPE_16X16;
4202 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPED, pb, pb2, tex_pb,
4203 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
4205 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
4206 s->mv_dir = MV_DIR_FORWARD;
4207 s->mv_type = MV_TYPE_8X8;
4210 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
4211 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
4213 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
4214 &dmin, &next_block, 0, 0);
4216 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
4217 s->mv_dir = MV_DIR_FORWARD;
4218 s->mv_type = MV_TYPE_16X16;
4220 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
4221 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
4222 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
4223 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
4225 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
4226 s->mv_dir = MV_DIR_BACKWARD;
4227 s->mv_type = MV_TYPE_16X16;
4229 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
4230 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
4231 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
4232 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
4234 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
4235 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
4236 s->mv_type = MV_TYPE_16X16;
4238 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
4239 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
4240 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
4241 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
4242 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
4243 &dmin, &next_block, 0, 0);
4245 if(mb_type&CANDIDATE_MB_TYPE_DIRECT){
4246 int mx= s->b_direct_mv_table[xy][0];
4247 int my= s->b_direct_mv_table[xy][1];
4249 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
4252 ff_mpeg4_set_direct_mv(s, mx, my);
4254 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
4255 &dmin, &next_block, mx, my);
4257 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
4258 s->mv_dir = MV_DIR_FORWARD;
4259 s->mv_type = MV_TYPE_FIELD;
4262 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
4263 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
4264 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
4266 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
4267 &dmin, &next_block, 0, 0);
4269 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
4270 s->mv_dir = MV_DIR_BACKWARD;
4271 s->mv_type = MV_TYPE_FIELD;
4274 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
4275 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
4276 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
4278 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
4279 &dmin, &next_block, 0, 0);
4281 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
4282 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
4283 s->mv_type = MV_TYPE_FIELD;
4285 for(dir=0; dir<2; dir++){
4287 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
4288 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
4289 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
4292 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
4293 &dmin, &next_block, 0, 0);
4295 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
4297 s->mv_type = MV_TYPE_16X16;
4301 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
4302 &dmin, &next_block, 0, 0);
4303 if(s->h263_pred || s->h263_aic){
4305 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
4307 ff_clean_intra_table_entries(s); //old mode?
4311 if(s->flags & CODEC_FLAG_QP_RD){
4312 if(best_s.mv_type==MV_TYPE_16X16 && !(best_s.mv_dir&MV_DIRECT)){
4313 const int last_qp= backup_s.qscale;
4314 int dquant, dir, qp, dc[6];
4316 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
4318 assert(backup_s.dquant == 0);
4321 s->mv_dir= best_s.mv_dir;
4322 s->mv_type = MV_TYPE_16X16;
4323 s->mb_intra= best_s.mb_intra;
4324 s->mv[0][0][0] = best_s.mv[0][0][0];
4325 s->mv[0][0][1] = best_s.mv[0][0][1];
4326 s->mv[1][0][0] = best_s.mv[1][0][0];
4327 s->mv[1][0][1] = best_s.mv[1][0][1];
4329 dir= s->pict_type == B_TYPE ? 2 : 1;
4330 if(last_qp + dir > s->avctx->qmax) dir= -dir;
4331 for(dquant= dir; dquant<=2 && dquant>=-2; dquant += dir){
4332 qp= last_qp + dquant;
4333 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
4335 backup_s.dquant= dquant;
4338 dc[i]= s->dc_val[0][ s->block_index[i] ];
4339 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16);
4343 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
4344 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
4345 if(best_s.qscale != qp){
4348 s->dc_val[0][ s->block_index[i] ]= dc[i];
4349 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16);
4352 if(dir > 0 && dquant==dir){
4360 s->current_picture.qscale_table[xy]= qp;
4364 copy_context_after_encode(s, &best_s, -1);
4366 pb_bits_count= get_bit_count(&s->pb);
4367 flush_put_bits(&s->pb);
4368 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
4371 if(s->data_partitioning){
4372 pb2_bits_count= get_bit_count(&s->pb2);
4373 flush_put_bits(&s->pb2);
4374 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
4375 s->pb2= backup_s.pb2;
4377 tex_pb_bits_count= get_bit_count(&s->tex_pb);
4378 flush_put_bits(&s->tex_pb);
4379 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
4380 s->tex_pb= backup_s.tex_pb;
4382 s->last_bits= get_bit_count(&s->pb);
4385 if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
4386 ff_h263_update_motion_val(s);
4390 s->dsp.put_pixels_tab[0][0](s->dest[0], s->me.scratchpad , s->linesize ,16);
4391 s->dsp.put_pixels_tab[1][0](s->dest[1], s->me.scratchpad + 16, s->uvlinesize, 8);
4392 s->dsp.put_pixels_tab[1][0](s->dest[2], s->me.scratchpad + 24, s->uvlinesize, 8);
4395 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
4396 MPV_decode_mb(s, s->block);
4398 int motion_x, motion_y;
4399 s->mv_type=MV_TYPE_16X16;
4400 // only one MB-Type possible
4403 case CANDIDATE_MB_TYPE_INTRA:
4406 motion_x= s->mv[0][0][0] = 0;
4407 motion_y= s->mv[0][0][1] = 0;
4409 case CANDIDATE_MB_TYPE_INTER:
4410 s->mv_dir = MV_DIR_FORWARD;
4412 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
4413 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
4415 case CANDIDATE_MB_TYPE_INTER_I:
4416 s->mv_dir = MV_DIR_FORWARD;
4417 s->mv_type = MV_TYPE_FIELD;
4420 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
4421 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
4422 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
4424 motion_x = motion_y = 0;
4426 case CANDIDATE_MB_TYPE_INTER4V:
4427 s->mv_dir = MV_DIR_FORWARD;
4428 s->mv_type = MV_TYPE_8X8;
4431 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
4432 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
4434 motion_x= motion_y= 0;
4436 case CANDIDATE_MB_TYPE_DIRECT:
4437 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
4439 motion_x=s->b_direct_mv_table[xy][0];
4440 motion_y=s->b_direct_mv_table[xy][1];
4442 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
4445 case CANDIDATE_MB_TYPE_BIDIR:
4446 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
4450 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
4451 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
4452 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
4453 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
4455 case CANDIDATE_MB_TYPE_BACKWARD:
4456 s->mv_dir = MV_DIR_BACKWARD;
4458 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
4459 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
4461 case CANDIDATE_MB_TYPE_FORWARD:
4462 s->mv_dir = MV_DIR_FORWARD;
4464 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
4465 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
4466 // printf(" %d %d ", motion_x, motion_y);
4468 case CANDIDATE_MB_TYPE_FORWARD_I:
4469 s->mv_dir = MV_DIR_FORWARD;
4470 s->mv_type = MV_TYPE_FIELD;
4473 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
4474 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
4475 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
4477 motion_x=motion_y=0;
4479 case CANDIDATE_MB_TYPE_BACKWARD_I:
4480 s->mv_dir = MV_DIR_BACKWARD;
4481 s->mv_type = MV_TYPE_FIELD;
4484 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
4485 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
4486 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
4488 motion_x=motion_y=0;
4490 case CANDIDATE_MB_TYPE_BIDIR_I:
4491 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
4492 s->mv_type = MV_TYPE_FIELD;
4494 for(dir=0; dir<2; dir++){
4496 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
4497 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
4498 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
4501 motion_x=motion_y=0;
4504 motion_x=motion_y=0; //gcc warning fix
4505 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
4508 encode_mb(s, motion_x, motion_y);
4510 // RAL: Update last macrobloc type
4511 s->last_mv_dir = s->mv_dir;
4514 if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
4515 ff_h263_update_motion_val(s);
4518 MPV_decode_mb(s, s->block);
4521 /* clean the MV table in IPS frames for direct mode in B frames */
4522 if(s->mb_intra /* && I,P,S_TYPE */){
4523 s->p_mv_table[xy][0]=0;
4524 s->p_mv_table[xy][1]=0;
4527 if(s->flags&CODEC_FLAG_PSNR){
4531 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
4532 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
4534 s->current_picture_ptr->error[0] += sse(
4535 s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
4536 s->dest[0], w, h, s->linesize);
4537 s->current_picture_ptr->error[1] += sse(
4538 s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
4539 s->dest[1], w>>1, h>>1, s->uvlinesize);
4540 s->current_picture_ptr->error[2] += sse(
4541 s, s->new_picture .data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
4542 s->dest[2], w>>1, h>>1, s->uvlinesize);
4545 ff_h263_loop_filter(s);
4546 //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, get_bit_count(&s->pb));
4552 if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame)
4553 ff_mpeg4_merge_partitions(s);
4555 if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE)
4556 msmpeg4_encode_ext_header(s);
4558 if(s->codec_id==CODEC_ID_MPEG4)
4559 ff_mpeg4_stuffing(&s->pb);
4562 /* Send the last GOB if RTP */
4563 if (s->avctx->rtp_callback) {
4564 flush_put_bits(&s->pb);
4565 pdif = pbBufPtr(&s->pb) - s->ptr_lastgob;
4566 /* Call the RTP callback to send the last GOB */
4567 s->avctx->rtp_callback(s->ptr_lastgob, pdif, 0);
4571 #endif //CONFIG_ENCODERS
4573 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block){
4574 const int intra= s->mb_intra;
4577 s->dct_count[intra]++;
4579 for(i=0; i<64; i++){
4580 int level= block[i];
4584 s->dct_error_sum[intra][i] += level;
4585 level -= s->dct_offset[intra][i];
4586 if(level<0) level=0;
4588 s->dct_error_sum[intra][i] -= level;
4589 level += s->dct_offset[intra][i];
4590 if(level>0) level=0;
4597 #ifdef CONFIG_ENCODERS
4599 static int dct_quantize_trellis_c(MpegEncContext *s,
4600 DCTELEM *block, int n,
4601 int qscale, int *overflow){
4603 const uint8_t *scantable= s->intra_scantable.scantable;
4604 const uint8_t *perm_scantable= s->intra_scantable.permutated;
4606 unsigned int threshold1, threshold2;
4618 int coeff_count[64];
4619 int qmul, qadd, start_i, last_non_zero, i, dc;
4620 const int esc_length= s->ac_esc_length;
4622 uint8_t * last_length;
4623 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4625 s->dsp.fdct (block);
4627 if(s->dct_error_sum)
4628 s->denoise_dct(s, block);
4630 qadd= ((qscale-1)|1)*8;
4641 /* For AIC we skip quant/dequant of INTRADC */
4646 /* note: block[0] is assumed to be positive */
4647 block[0] = (block[0] + (q >> 1)) / q;
4650 qmat = s->q_intra_matrix[qscale];
4651 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4652 bias= 1<<(QMAT_SHIFT-1);
4653 length = s->intra_ac_vlc_length;
4654 last_length= s->intra_ac_vlc_last_length;
4658 qmat = s->q_inter_matrix[qscale];
4659 length = s->inter_ac_vlc_length;
4660 last_length= s->inter_ac_vlc_last_length;
4664 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4665 threshold2= (threshold1<<1);
4667 for(i=63; i>=start_i; i--) {
4668 const int j = scantable[i];
4669 int level = block[j] * qmat[j];
4671 if(((unsigned)(level+threshold1))>threshold2){
4677 for(i=start_i; i<=last_non_zero; i++) {
4678 const int j = scantable[i];
4679 int level = block[j] * qmat[j];
4681 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4682 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4683 if(((unsigned)(level+threshold1))>threshold2){
4685 level= (bias + level)>>QMAT_SHIFT;
4687 coeff[1][i]= level-1;
4688 // coeff[2][k]= level-2;
4690 level= (bias - level)>>QMAT_SHIFT;
4691 coeff[0][i]= -level;
4692 coeff[1][i]= -level+1;
4693 // coeff[2][k]= -level+2;
4695 coeff_count[i]= FFMIN(level, 2);
4696 assert(coeff_count[i]);
4699 coeff[0][i]= (level>>31)|1;
4704 *overflow= s->max_qcoeff < max; //overflow might have happend
4706 if(last_non_zero < start_i){
4707 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
4708 return last_non_zero;
4711 score_tab[start_i]= 0;
4712 survivor[0]= start_i;
4715 for(i=start_i; i<=last_non_zero; i++){
4717 const int dct_coeff= ABS(block[ scantable[i] ]);
4718 const int zero_distoration= dct_coeff*dct_coeff;
4719 int best_score=256*256*256*120;
4720 for(level_index=0; level_index < coeff_count[i]; level_index++){
4722 int level= coeff[level_index][i];
4723 const int alevel= ABS(level);
4728 if(s->out_format == FMT_H263){
4729 unquant_coeff= alevel*qmul + qadd;
4731 j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
4733 unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
4734 unquant_coeff = (unquant_coeff - 1) | 1;
4736 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
4737 unquant_coeff = (unquant_coeff - 1) | 1;
4742 distoration= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distoration;
4744 if((level&(~127)) == 0){
4745 for(j=survivor_count-1; j>=0; j--){
4746 int run= i - survivor[j];
4747 int score= distoration + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4748 score += score_tab[i-run];
4750 if(score < best_score){
4753 level_tab[i+1]= level-64;
4757 if(s->out_format == FMT_H263){
4758 for(j=survivor_count-1; j>=0; j--){
4759 int run= i - survivor[j];
4760 int score= distoration + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4761 score += score_tab[i-run];
4762 if(score < last_score){
4765 last_level= level-64;
4771 distoration += esc_length*lambda;
4772 for(j=survivor_count-1; j>=0; j--){
4773 int run= i - survivor[j];
4774 int score= distoration + score_tab[i-run];
4776 if(score < best_score){
4779 level_tab[i+1]= level-64;
4783 if(s->out_format == FMT_H263){
4784 for(j=survivor_count-1; j>=0; j--){
4785 int run= i - survivor[j];
4786 int score= distoration + score_tab[i-run];
4787 if(score < last_score){
4790 last_level= level-64;
4798 score_tab[i+1]= best_score;
4800 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
4801 if(last_non_zero <= 27){
4802 for(; survivor_count; survivor_count--){
4803 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4807 for(; survivor_count; survivor_count--){
4808 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4813 survivor[ survivor_count++ ]= i+1;
4816 if(s->out_format != FMT_H263){
4817 last_score= 256*256*256*120;
4818 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4819 int score= score_tab[i];
4820 if(i) score += lambda*2; //FIXME exacter?
4822 if(score < last_score){
4825 last_level= level_tab[i];
4826 last_run= run_tab[i];
4831 s->coded_score[n] = last_score;
4834 last_non_zero= last_i - 1;
4835 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
4837 if(last_non_zero < start_i)
4838 return last_non_zero;
4840 if(last_non_zero == 0 && start_i == 0){
4842 int best_score= dc * dc;
4844 for(i=0; i<coeff_count[0]; i++){
4845 int level= coeff[i][0];
4846 int alevel= ABS(level);
4847 int unquant_coeff, score, distortion;
4849 if(s->out_format == FMT_H263){
4850 unquant_coeff= (alevel*qmul + qadd)>>3;
4852 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
4853 unquant_coeff = (unquant_coeff - 1) | 1;
4855 unquant_coeff = (unquant_coeff + 4) >> 3;
4856 unquant_coeff<<= 3 + 3;
4858 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4860 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4861 else score= distortion + esc_length*lambda;
4863 if(score < best_score){
4865 best_level= level - 64;
4868 block[0]= best_level;
4869 s->coded_score[n] = best_score - dc*dc;
4870 if(best_level == 0) return -1;
4871 else return last_non_zero;
4877 block[ perm_scantable[last_non_zero] ]= last_level;
4880 for(; i>start_i; i -= run_tab[i] + 1){
4881 block[ perm_scantable[i-1] ]= level_tab[i];
4884 return last_non_zero;
4887 static int dct_quantize_c(MpegEncContext *s,
4888 DCTELEM *block, int n,
4889 int qscale, int *overflow)
4891 int i, j, level, last_non_zero, q, start_i;
4893 const uint8_t *scantable= s->intra_scantable.scantable;
4896 unsigned int threshold1, threshold2;
4898 s->dsp.fdct (block);
4900 if(s->dct_error_sum)
4901 s->denoise_dct(s, block);
4911 /* For AIC we skip quant/dequant of INTRADC */
4914 /* note: block[0] is assumed to be positive */
4915 block[0] = (block[0] + (q >> 1)) / q;
4918 qmat = s->q_intra_matrix[qscale];
4919 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4923 qmat = s->q_inter_matrix[qscale];
4924 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4926 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4927 threshold2= (threshold1<<1);
4928 for(i=63;i>=start_i;i--) {
4930 level = block[j] * qmat[j];
4932 if(((unsigned)(level+threshold1))>threshold2){
4939 for(i=start_i; i<=last_non_zero; i++) {
4941 level = block[j] * qmat[j];
4943 // if( bias+level >= (1<<QMAT_SHIFT)
4944 // || bias-level >= (1<<QMAT_SHIFT)){
4945 if(((unsigned)(level+threshold1))>threshold2){
4947 level= (bias + level)>>QMAT_SHIFT;
4950 level= (bias - level)>>QMAT_SHIFT;
4958 *overflow= s->max_qcoeff < max; //overflow might have happend
4960 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4961 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4962 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4964 return last_non_zero;
4967 #endif //CONFIG_ENCODERS
4969 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
4970 DCTELEM *block, int n, int qscale)
4972 int i, level, nCoeffs;
4973 const uint16_t *quant_matrix;
4975 nCoeffs= s->block_last_index[n];
4978 block[0] = block[0] * s->y_dc_scale;
4980 block[0] = block[0] * s->c_dc_scale;
4981 /* XXX: only mpeg1 */
4982 quant_matrix = s->intra_matrix;
4983 for(i=1;i<=nCoeffs;i++) {
4984 int j= s->intra_scantable.permutated[i];
4989 level = (int)(level * qscale * quant_matrix[j]) >> 3;
4990 level = (level - 1) | 1;
4993 level = (int)(level * qscale * quant_matrix[j]) >> 3;
4994 level = (level - 1) | 1;
5001 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
5002 DCTELEM *block, int n, int qscale)
5004 int i, level, nCoeffs;
5005 const uint16_t *quant_matrix;
5007 nCoeffs= s->block_last_index[n];
5009 quant_matrix = s->inter_matrix;
5010 for(i=0; i<=nCoeffs; i++) {
5011 int j= s->intra_scantable.permutated[i];
5016 level = (((level << 1) + 1) * qscale *
5017 ((int) (quant_matrix[j]))) >> 4;
5018 level = (level - 1) | 1;
5021 level = (((level << 1) + 1) * qscale *
5022 ((int) (quant_matrix[j]))) >> 4;
5023 level = (level - 1) | 1;
5030 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
5031 DCTELEM *block, int n, int qscale)
5033 int i, level, nCoeffs;
5034 const uint16_t *quant_matrix;
5036 if(s->alternate_scan) nCoeffs= 63;
5037 else nCoeffs= s->block_last_index[n];
5040 block[0] = block[0] * s->y_dc_scale;
5042 block[0] = block[0] * s->c_dc_scale;
5043 quant_matrix = s->intra_matrix;
5044 for(i=1;i<=nCoeffs;i++) {
5045 int j= s->intra_scantable.permutated[i];
5050 level = (int)(level * qscale * quant_matrix[j]) >> 3;
5053 level = (int)(level * qscale * quant_matrix[j]) >> 3;
5060 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
5061 DCTELEM *block, int n, int qscale)
5063 int i, level, nCoeffs;
5064 const uint16_t *quant_matrix;
5067 if(s->alternate_scan) nCoeffs= 63;
5068 else nCoeffs= s->block_last_index[n];
5070 quant_matrix = s->inter_matrix;
5071 for(i=0; i<=nCoeffs; i++) {
5072 int j= s->intra_scantable.permutated[i];
5077 level = (((level << 1) + 1) * qscale *
5078 ((int) (quant_matrix[j]))) >> 4;
5081 level = (((level << 1) + 1) * qscale *
5082 ((int) (quant_matrix[j]))) >> 4;
5091 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
5092 DCTELEM *block, int n, int qscale)
5094 int i, level, qmul, qadd;
5097 assert(s->block_last_index[n]>=0);
5103 block[0] = block[0] * s->y_dc_scale;
5105 block[0] = block[0] * s->c_dc_scale;
5106 qadd = (qscale - 1) | 1;
5113 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
5115 for(i=1; i<=nCoeffs; i++) {
5119 level = level * qmul - qadd;
5121 level = level * qmul + qadd;
5128 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
5129 DCTELEM *block, int n, int qscale)
5131 int i, level, qmul, qadd;
5134 assert(s->block_last_index[n]>=0);
5136 qadd = (qscale - 1) | 1;
5139 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
5141 for(i=0; i<=nCoeffs; i++) {
5145 level = level * qmul - qadd;
5147 level = level * qmul + qadd;
5154 static const AVOption mpeg4_options[] =
5156 AVOPTION_CODEC_INT("bitrate", "desired video bitrate", bit_rate, 4, 240000000, 800000),
5157 AVOPTION_CODEC_INT("ratetol", "number of bits the bitstream is allowed to diverge from the reference"
5158 "the reference can be CBR (for CBR pass1) or VBR (for pass2)",
5159 bit_rate_tolerance, 4, 240000000, 8000),
5160 AVOPTION_CODEC_INT("qmin", "minimum quantizer", qmin, 1, 31, 2),
5161 AVOPTION_CODEC_INT("qmax", "maximum quantizer", qmax, 1, 31, 31),
5162 AVOPTION_CODEC_STRING("rc_eq", "rate control equation",
5163 rc_eq, "tex^qComp,option1,options2", 0),
5164 AVOPTION_CODEC_INT("rc_minrate", "rate control minimum bitrate",
5165 rc_min_rate, 4, 24000000, 0),
5166 AVOPTION_CODEC_INT("rc_maxrate", "rate control maximum bitrate",
5167 rc_max_rate, 4, 24000000, 0),
5168 AVOPTION_CODEC_DOUBLE("rc_buf_aggresivity", "rate control buffer aggresivity",
5169 rc_buffer_aggressivity, 4, 24000000, 0),
5170 AVOPTION_CODEC_DOUBLE("rc_initial_cplx", "initial complexity for pass1 ratecontrol",
5171 rc_initial_cplx, 0., 9999999., 0),
5172 AVOPTION_CODEC_DOUBLE("i_quant_factor", "qscale factor between p and i frames",
5173 i_quant_factor, 0., 0., 0),
5174 AVOPTION_CODEC_DOUBLE("i_quant_offset", "qscale offset between p and i frames",
5175 i_quant_factor, -999999., 999999., 0),
5176 AVOPTION_CODEC_INT("dct_algo", "dct alghorithm",
5177 dct_algo, 0, 5, 0), // fixme - "Auto,FastInt,Int,MMX,MLib,Altivec"
5178 AVOPTION_CODEC_DOUBLE("lumi_masking", "luminance masking",
5179 lumi_masking, 0., 999999., 0),
5180 AVOPTION_CODEC_DOUBLE("temporal_cplx_masking", "temporary complexity masking",
5181 temporal_cplx_masking, 0., 999999., 0),
5182 AVOPTION_CODEC_DOUBLE("spatial_cplx_masking", "spatial complexity masking",
5183 spatial_cplx_masking, 0., 999999., 0),
5184 AVOPTION_CODEC_DOUBLE("p_masking", "p block masking",
5185 p_masking, 0., 999999., 0),
5186 AVOPTION_CODEC_DOUBLE("dark_masking", "darkness masking",
5187 dark_masking, 0., 999999., 0),
5188 AVOPTION_CODEC_INT("idct_algo", "idct alghorithm",
5189 idct_algo, 0, 8, 0), // fixme - "Auto,Int,Simple,SimpleMMX,LibMPEG2MMX,PS2,MLib,ARM,Altivec"
5191 AVOPTION_CODEC_INT("mb_qmin", "minimum MB quantizer",
5193 AVOPTION_CODEC_INT("mb_qmax", "maximum MB quantizer",
5196 AVOPTION_CODEC_INT("me_cmp", "ME compare function",
5197 me_cmp, 0, 24000000, 0),
5198 AVOPTION_CODEC_INT("me_sub_cmp", "subpixel ME compare function",
5199 me_sub_cmp, 0, 24000000, 0),
5202 AVOPTION_CODEC_INT("dia_size", "ME diamond size & shape",
5203 dia_size, 0, 24000000, 0),
5204 AVOPTION_CODEC_INT("last_predictor_count", "amount of previous MV predictors",
5205 last_predictor_count, 0, 24000000, 0),
5207 AVOPTION_CODEC_INT("pre_me", "pre pass for ME",
5208 pre_me, 0, 24000000, 0),
5209 AVOPTION_CODEC_INT("me_pre_cmp", "ME pre pass compare function",
5210 me_pre_cmp, 0, 24000000, 0),
5212 AVOPTION_CODEC_INT("me_range", "maximum ME search range",
5213 me_range, 0, 24000000, 0),
5214 AVOPTION_CODEC_INT("pre_dia_size", "ME pre pass diamod size & shape",
5215 pre_dia_size, 0, 24000000, 0),
5216 AVOPTION_CODEC_INT("me_subpel_quality", "subpel ME quality",
5217 me_subpel_quality, 0, 24000000, 0),
5218 AVOPTION_CODEC_INT("me_range", "maximum ME search range",
5219 me_range, 0, 24000000, 0),
5220 AVOPTION_CODEC_FLAG("psnr", "calculate PSNR of compressed frames",
5221 flags, CODEC_FLAG_PSNR, 0),
5222 AVOPTION_CODEC_RCOVERRIDE("rc_override", "ratecontrol override (=startframe,endframe,qscale,quality_factor)",
5224 AVOPTION_SUB(avoptions_common),
5228 #ifdef CONFIG_ENCODERS
5230 AVCodec mpeg1video_encoder = {
5233 CODEC_ID_MPEG1VIDEO,
5234 sizeof(MpegEncContext),
5242 AVCodec mpeg2video_encoder = {
5245 CODEC_ID_MPEG2VIDEO,
5246 sizeof(MpegEncContext),
5252 AVCodec h263_encoder = {
5256 sizeof(MpegEncContext),
5262 AVCodec h263p_encoder = {
5266 sizeof(MpegEncContext),
5272 AVCodec flv_encoder = {
5276 sizeof(MpegEncContext),
5282 AVCodec rv10_encoder = {
5286 sizeof(MpegEncContext),
5292 AVCodec mpeg4_encoder = {
5296 sizeof(MpegEncContext),
5300 .options = mpeg4_options,
5303 AVCodec msmpeg4v1_encoder = {
5307 sizeof(MpegEncContext),
5311 .options = mpeg4_options,
5314 AVCodec msmpeg4v2_encoder = {
5318 sizeof(MpegEncContext),
5322 .options = mpeg4_options,
5325 AVCodec msmpeg4v3_encoder = {
5329 sizeof(MpegEncContext),
5333 .options = mpeg4_options,
5336 AVCodec wmv1_encoder = {
5340 sizeof(MpegEncContext),
5344 .options = mpeg4_options,
5349 AVCodec mjpeg_encoder = {
5353 sizeof(MpegEncContext),
5359 #endif //CONFIG_ENCODERS