2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard.
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
25 * The simplest mpeg encoder (well, it was the simplest!).
31 #include "mpegvideo.h"
35 #include "fastmemcpy.h"
41 #ifdef CONFIG_ENCODERS
42 static void encode_picture(MpegEncContext *s, int picture_number);
43 #endif //CONFIG_ENCODERS
44 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
45 DCTELEM *block, int n, int qscale);
46 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
47 DCTELEM *block, int n, int qscale);
48 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
49 DCTELEM *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
51 DCTELEM *block, int n, int qscale);
52 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
53 DCTELEM *block, int n, int qscale);
54 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
55 DCTELEM *block, int n, int qscale);
56 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w);
57 #ifdef CONFIG_ENCODERS
58 static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
59 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
60 static int sse_mb(MpegEncContext *s);
61 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block);
62 #endif //CONFIG_ENCODERS
65 extern int XVMC_field_start(MpegEncContext*s, AVCodecContext *avctx);
66 extern void XVMC_field_end(MpegEncContext *s);
67 extern void XVMC_decode_mb(MpegEncContext *s);
70 void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_edges_c;
73 /* enable all paranoid tests for rounding, overflows, etc... */
79 /* for jpeg fast DCT */
82 static const uint16_t aanscales[64] = {
83 /* precomputed values scaled up by 14 bits */
84 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
85 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270,
86 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906,
87 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315,
88 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
89 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552,
90 8867 , 12299, 11585, 10426, 8867, 6967, 4799, 2446,
91 4520 , 6270, 5906, 5315, 4520, 3552, 2446, 1247
94 static const uint8_t h263_chroma_roundtab[16] = {
95 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
96 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
99 static const uint8_t ff_default_chroma_qscale_table[32]={
100 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
101 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
104 #ifdef CONFIG_ENCODERS
105 static uint8_t (*default_mv_penalty)[MAX_MV*2+1]=NULL;
106 static uint8_t default_fcode_tab[MAX_MV*2+1];
108 enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1};
110 static void convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64],
111 const uint16_t *quant_matrix, int bias, int qmin, int qmax)
115 for(qscale=qmin; qscale<=qmax; qscale++){
117 if (dsp->fdct == ff_jpeg_fdct_islow
118 #ifdef FAAN_POSTSCALE
119 || dsp->fdct == ff_faandct
123 const int j= dsp->idct_permutation[i];
124 /* 16 <= qscale * quant_matrix[i] <= 7905 */
125 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
126 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
127 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
129 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) /
130 (qscale * quant_matrix[j]));
132 } else if (dsp->fdct == fdct_ifast
133 #ifndef FAAN_POSTSCALE
134 || dsp->fdct == ff_faandct
138 const int j= dsp->idct_permutation[i];
139 /* 16 <= qscale * quant_matrix[i] <= 7905 */
140 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
141 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
142 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
144 qmat[qscale][i] = (int)((uint64_t_C(1) << (QMAT_SHIFT + 14)) /
145 (aanscales[i] * qscale * quant_matrix[j]));
149 const int j= dsp->idct_permutation[i];
150 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
151 So 16 <= qscale * quant_matrix[i] <= 7905
152 so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
153 so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67
155 qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j]));
156 // qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
157 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]);
159 if(qmat16[qscale][0][i]==0 || qmat16[qscale][0][i]==128*256) qmat16[qscale][0][i]=128*256-1;
160 qmat16[qscale][1][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][0][i]);
166 static inline void update_qscale(MpegEncContext *s){
167 s->qscale= (s->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
168 s->qscale= clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
170 s->lambda2= (s->lambda*s->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
172 #endif //CONFIG_ENCODERS
174 void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable){
178 st->scantable= src_scantable;
182 j = src_scantable[i];
183 st->permutated[i] = permutation[j];
192 j = st->permutated[i];
194 st->raster_end[i]= end;
198 #ifdef CONFIG_ENCODERS
199 void ff_write_quant_matrix(PutBitContext *pb, int16_t *matrix){
205 put_bits(pb, 8, matrix[ ff_zigzag_direct[i] ]);
210 #endif //CONFIG_ENCODERS
212 /* init common dct for both encoder and decoder */
213 int DCT_common_init(MpegEncContext *s)
215 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
216 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
217 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
218 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
219 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
220 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
222 #ifdef CONFIG_ENCODERS
223 s->dct_quantize= dct_quantize_c;
224 s->denoise_dct= denoise_dct_c;
228 MPV_common_init_mmx(s);
231 MPV_common_init_axp(s);
234 MPV_common_init_mlib(s);
237 MPV_common_init_mmi(s);
240 MPV_common_init_armv4l(s);
243 MPV_common_init_ppc(s);
246 #ifdef CONFIG_ENCODERS
247 s->fast_dct_quantize= s->dct_quantize;
249 if(s->flags&CODEC_FLAG_TRELLIS_QUANT){
250 s->dct_quantize= dct_quantize_trellis_c; //move before MPV_common_init_*
253 #endif //CONFIG_ENCODERS
255 /* load & permutate scantables
256 note: only wmv uses differnt ones
258 if(s->alternate_scan){
259 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
260 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
262 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
263 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
265 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
266 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
268 s->picture_structure= PICT_FRAME;
273 static void copy_picture(Picture *dst, Picture *src){
275 dst->type= FF_BUFFER_TYPE_COPY;
278 static void copy_picture_attributes(AVFrame *dst, AVFrame *src){
279 dst->pict_type = src->pict_type;
280 dst->quality = src->quality;
281 dst->coded_picture_number = src->coded_picture_number;
282 dst->display_picture_number = src->display_picture_number;
283 // dst->reference = src->reference;
285 dst->interlaced_frame = src->interlaced_frame;
286 dst->top_field_first = src->top_field_first;
290 * allocates a Picture
291 * The pixels are allocated/set by calling get_buffer() if shared=0
293 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
294 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11
295 const int mb_array_size= s->mb_stride*s->mb_height;
296 const int b8_array_size= s->b8_stride*s->mb_height*2;
297 const int b4_array_size= s->b4_stride*s->mb_height*4;
301 assert(pic->data[0]);
302 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
303 pic->type= FF_BUFFER_TYPE_SHARED;
307 assert(!pic->data[0]);
309 r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
311 if(r<0 || !pic->age || !pic->type || !pic->data[0]){
312 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
316 if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
317 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
321 if(pic->linesize[1] != pic->linesize[2]){
322 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride missmatch)\n");
326 s->linesize = pic->linesize[0];
327 s->uvlinesize= pic->linesize[1];
330 if(pic->qscale_table==NULL){
332 CHECKED_ALLOCZ(pic->mb_var , mb_array_size * sizeof(int16_t))
333 CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t))
334 CHECKED_ALLOCZ(pic->mb_mean , mb_array_size * sizeof(int8_t))
337 CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check
338 CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t))
339 CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num * sizeof(uint32_t))
340 pic->mb_type= pic->mb_type_base + s->mb_stride+1;
341 if(s->out_format == FMT_H264){
343 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b4_array_size+1) * sizeof(int16_t))
344 pic->motion_val[i]= pic->motion_val_base[i]+1;
345 CHECKED_ALLOCZ(pic->ref_index[i] , b8_array_size * sizeof(uint8_t))
347 pic->motion_subsample_log2= 2;
348 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
350 CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+1) * sizeof(int16_t)*2) //FIXME
351 pic->motion_val[i]= pic->motion_val_base[i]+1;
353 pic->motion_subsample_log2= 3;
355 pic->qstride= s->mb_stride;
356 CHECKED_ALLOCZ(pic->pan_scan , 1 * sizeof(AVPanScan))
359 //it might be nicer if the application would keep track of these but it would require a API change
360 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
361 s->prev_pict_types[0]= s->pict_type;
362 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE)
363 pic->age= INT_MAX; // skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway
366 fail: //for the CHECKED_ALLOCZ macro
371 * deallocates a picture
373 static void free_picture(MpegEncContext *s, Picture *pic){
376 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
377 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
380 av_freep(&pic->mb_var);
381 av_freep(&pic->mc_mb_var);
382 av_freep(&pic->mb_mean);
383 av_freep(&pic->mbskip_table);
384 av_freep(&pic->qscale_table);
385 av_freep(&pic->mb_type_base);
386 av_freep(&pic->pan_scan);
389 av_freep(&pic->motion_val_base[i]);
390 av_freep(&pic->ref_index[i]);
393 if(pic->type == FF_BUFFER_TYPE_SHARED){
402 /* init common structure for both encoder and decoder */
403 int MPV_common_init(MpegEncContext *s)
405 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
407 dsputil_init(&s->dsp, s->avctx);
410 s->flags= s->avctx->flags;
411 s->flags2= s->avctx->flags2;
413 s->mb_width = (s->width + 15) / 16;
414 s->mb_height = (s->height + 15) / 16;
415 s->mb_stride = s->mb_width + 1;
416 s->b8_stride = s->mb_width*2 + 1;
417 s->b4_stride = s->mb_width*4 + 1;
418 mb_array_size= s->mb_height * s->mb_stride;
419 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
421 /* set default edge pos, will be overriden in decode_header if needed */
422 s->h_edge_pos= s->mb_width*16;
423 s->v_edge_pos= s->mb_height*16;
425 s->mb_num = s->mb_width * s->mb_height;
430 s->block_wrap[3]= s->mb_width*2 + 2;
432 s->block_wrap[5]= s->mb_width + 2;
435 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
436 s->chroma_qscale_table= ff_default_chroma_qscale_table;
438 s->progressive_sequence= 1;
439 s->progressive_frame= 1;
440 s->coded_picture_number = 0;
442 y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
443 c_size = (s->mb_width + 2) * (s->mb_height + 2);
444 yc_size = y_size + 2 * c_size;
446 /* convert fourcc to upper case */
447 s->avctx->codec_tag= toupper( s->avctx->codec_tag &0xFF)
448 + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
449 + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
450 + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
452 s->avctx->stream_codec_tag= toupper( s->avctx->stream_codec_tag &0xFF)
453 + (toupper((s->avctx->stream_codec_tag>>8 )&0xFF)<<8 )
454 + (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16)
455 + (toupper((s->avctx->stream_codec_tag>>24)&0xFF)<<24);
457 CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
458 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*17;
460 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
462 CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int)) //error ressilience code looks cleaner with this
463 for(y=0; y<s->mb_height; y++){
464 for(x=0; x<s->mb_width; x++){
465 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
468 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
471 /* Allocate MV tables */
472 CHECKED_ALLOCZ(s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
473 CHECKED_ALLOCZ(s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
474 CHECKED_ALLOCZ(s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
475 CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
476 CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
477 CHECKED_ALLOCZ(s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
478 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
479 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
480 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
481 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
482 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
483 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
485 //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
486 CHECKED_ALLOCZ(s->me.scratchpad, s->width*2*16*3*sizeof(uint8_t))
488 CHECKED_ALLOCZ(s->me.map , ME_MAP_SIZE*sizeof(uint32_t))
489 CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
491 if(s->codec_id==CODEC_ID_MPEG4){
492 CHECKED_ALLOCZ(s->tex_pb_buffer, PB_BUFFER_SIZE);
493 CHECKED_ALLOCZ( s->pb2_buffer, PB_BUFFER_SIZE);
496 if(s->msmpeg4_version){
497 CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
499 CHECKED_ALLOCZ(s->avctx->stats_out, 256);
501 /* Allocate MB type table */
502 CHECKED_ALLOCZ(s->mb_type , mb_array_size * sizeof(uint16_t)) //needed for encoding
504 CHECKED_ALLOCZ(s->lambda_table, mb_array_size * sizeof(int))
506 CHECKED_ALLOCZ(s->q_intra_matrix, 64*32 * sizeof(int))
507 CHECKED_ALLOCZ(s->q_inter_matrix, 64*32 * sizeof(int))
508 CHECKED_ALLOCZ(s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t))
509 CHECKED_ALLOCZ(s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t))
510 CHECKED_ALLOCZ(s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
511 CHECKED_ALLOCZ(s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
513 if(s->avctx->noise_reduction){
514 CHECKED_ALLOCZ(s->dct_error_sum, 2 * 64 * sizeof(int))
515 CHECKED_ALLOCZ(s->dct_offset, 2 * 64 * sizeof(uint16_t))
518 CHECKED_ALLOCZ(s->blocks, 64*6*2 * sizeof(DCTELEM))
520 CHECKED_ALLOCZ(s->picture, MAX_PICTURE_COUNT * sizeof(Picture))
522 CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t))
524 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
525 /* interlaced direct mode decoding tables */
530 CHECKED_ALLOCZ(s->b_field_mv_table_base[i][j][k] , mv_table_size * 2 * sizeof(int16_t))
531 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
533 CHECKED_ALLOCZ(s->b_field_select_table[i][j] , mb_array_size * 2 * sizeof(uint8_t))
534 CHECKED_ALLOCZ(s->p_field_mv_table_base[i][j] , mv_table_size * 2 * sizeof(int16_t))
535 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
537 CHECKED_ALLOCZ(s->p_field_select_table[i] , mb_array_size * 2 * sizeof(uint8_t))
540 if (s->out_format == FMT_H263) {
542 CHECKED_ALLOCZ(s->ac_val[0], yc_size * sizeof(int16_t) * 16);
543 s->ac_val[1] = s->ac_val[0] + y_size;
544 s->ac_val[2] = s->ac_val[1] + c_size;
547 CHECKED_ALLOCZ(s->coded_block, y_size);
549 /* divx501 bitstream reorder buffer */
550 CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE);
552 /* cbp, ac_pred, pred_dir */
553 CHECKED_ALLOCZ(s->cbp_table , mb_array_size * sizeof(uint8_t))
554 CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
557 if (s->h263_pred || s->h263_plus || !s->encoding) {
559 //MN: we need these for error resilience of intra-frames
560 CHECKED_ALLOCZ(s->dc_val[0], yc_size * sizeof(int16_t));
561 s->dc_val[1] = s->dc_val[0] + y_size;
562 s->dc_val[2] = s->dc_val[1] + c_size;
563 for(i=0;i<yc_size;i++)
564 s->dc_val[0][i] = 1024;
567 /* which mb is a intra block */
568 CHECKED_ALLOCZ(s->mbintra_table, mb_array_size);
569 memset(s->mbintra_table, 1, mb_array_size);
571 /* default structure is frame */
572 s->picture_structure = PICT_FRAME;
574 /* init macroblock skip table */
575 CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2);
576 //Note the +1 is for a quicker mpeg4 slice_end detection
577 CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
579 s->block= s->blocks[0];
582 s->pblocks[i] = (short *)(&s->block[i]);
585 s->parse_context.state= -1;
586 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
587 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
588 s->visualization_buffer[1] = av_malloc((s->mb_width*8 + EDGE_WIDTH) * s->mb_height*8 + EDGE_WIDTH);
589 s->visualization_buffer[2] = av_malloc((s->mb_width*8 + EDGE_WIDTH) * s->mb_height*8 + EDGE_WIDTH);
592 s->context_initialized = 1;
602 /* init common structure for both encoder and decoder */
603 void MPV_common_end(MpegEncContext *s)
607 av_freep(&s->parse_context.buffer);
608 s->parse_context.buffer_size=0;
610 av_freep(&s->mb_type);
611 av_freep(&s->p_mv_table_base);
612 av_freep(&s->b_forw_mv_table_base);
613 av_freep(&s->b_back_mv_table_base);
614 av_freep(&s->b_bidir_forw_mv_table_base);
615 av_freep(&s->b_bidir_back_mv_table_base);
616 av_freep(&s->b_direct_mv_table_base);
618 s->b_forw_mv_table= NULL;
619 s->b_back_mv_table= NULL;
620 s->b_bidir_forw_mv_table= NULL;
621 s->b_bidir_back_mv_table= NULL;
622 s->b_direct_mv_table= NULL;
626 av_freep(&s->b_field_mv_table_base[i][j][k]);
627 s->b_field_mv_table[i][j][k]=NULL;
629 av_freep(&s->b_field_select_table[i][j]);
630 av_freep(&s->p_field_mv_table_base[i][j]);
631 s->p_field_mv_table[i][j]=NULL;
633 av_freep(&s->p_field_select_table[i]);
636 av_freep(&s->dc_val[0]);
637 av_freep(&s->ac_val[0]);
638 av_freep(&s->coded_block);
639 av_freep(&s->mbintra_table);
640 av_freep(&s->cbp_table);
641 av_freep(&s->pred_dir_table);
642 av_freep(&s->me.scratchpad);
643 av_freep(&s->me.map);
644 av_freep(&s->me.score_map);
646 av_freep(&s->mbskip_table);
647 av_freep(&s->prev_pict_types);
648 av_freep(&s->bitstream_buffer);
649 av_freep(&s->tex_pb_buffer);
650 av_freep(&s->pb2_buffer);
651 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
652 av_freep(&s->avctx->stats_out);
653 av_freep(&s->ac_stats);
654 av_freep(&s->error_status_table);
655 av_freep(&s->mb_index2xy);
656 av_freep(&s->lambda_table);
657 av_freep(&s->q_intra_matrix);
658 av_freep(&s->q_inter_matrix);
659 av_freep(&s->q_intra_matrix16);
660 av_freep(&s->q_inter_matrix16);
661 av_freep(&s->blocks);
662 av_freep(&s->input_picture);
663 av_freep(&s->reordered_input_picture);
664 av_freep(&s->dct_error_sum);
665 av_freep(&s->dct_offset);
668 for(i=0; i<MAX_PICTURE_COUNT; i++){
669 free_picture(s, &s->picture[i]);
672 av_freep(&s->picture);
673 avcodec_default_free_buffers(s->avctx);
674 s->context_initialized = 0;
677 s->current_picture_ptr= NULL;
679 if (s->visualization_buffer[i])
680 av_free(s->visualization_buffer[i]);
683 #ifdef CONFIG_ENCODERS
685 /* init video encoder */
686 int MPV_encode_init(AVCodecContext *avctx)
688 MpegEncContext *s = avctx->priv_data;
690 int chroma_h_shift, chroma_v_shift;
692 avctx->pix_fmt = PIX_FMT_YUV420P; // FIXME
694 s->bit_rate = avctx->bit_rate;
695 s->width = avctx->width;
696 s->height = avctx->height;
697 if(avctx->gop_size > 600){
698 av_log(avctx, AV_LOG_ERROR, "Warning keyframe interval too large! reducing it ...\n");
701 s->gop_size = avctx->gop_size;
703 s->flags= avctx->flags;
704 s->flags2= avctx->flags2;
705 s->max_b_frames= avctx->max_b_frames;
706 s->codec_id= avctx->codec->id;
707 s->luma_elim_threshold = avctx->luma_elim_threshold;
708 s->chroma_elim_threshold= avctx->chroma_elim_threshold;
709 s->strict_std_compliance= avctx->strict_std_compliance;
710 s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
711 s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0;
712 s->mpeg_quant= avctx->mpeg_quant;
713 s->rtp_mode= !!avctx->rtp_payload_size;
715 if (s->gop_size <= 1) {
722 s->me_method = avctx->me_method;
725 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
727 s->adaptive_quant= ( s->avctx->lumi_masking
728 || s->avctx->dark_masking
729 || s->avctx->temporal_cplx_masking
730 || s->avctx->spatial_cplx_masking
731 || s->avctx->p_masking
732 || (s->flags&CODEC_FLAG_QP_RD))
735 s->obmc= !!(s->flags & CODEC_FLAG_OBMC);
736 s->loop_filter= !!(s->flags & CODEC_FLAG_LOOP_FILTER);
737 s->alternate_scan= !!(s->flags & CODEC_FLAG_ALT_SCAN);
739 if(avctx->rc_max_rate && !avctx->rc_buffer_size){
740 av_log(avctx, AV_LOG_ERROR, "a vbv buffer size is needed, for encoding with a maximum bitrate\n");
744 if(avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate){
745 av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isnt recommanded!\n");
748 if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4
749 && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P){
750 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
754 if(s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE){
755 av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with simple mb decission\n");
759 if(s->obmc && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P){
760 av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with H263(+)\n");
764 if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){
765 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
769 if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){
770 av_log(avctx, AV_LOG_ERROR, "data partitioning not supported by codec\n");
774 if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO){
775 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
779 if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too
780 av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supporetd by codec\n");
784 if((s->flags & CODEC_FLAG_CBP_RD) && !(s->flags & CODEC_FLAG_TRELLIS_QUANT)){
785 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
789 if((s->flags & CODEC_FLAG_QP_RD) && s->avctx->mb_decision != FF_MB_DECISION_RD){
790 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
794 if(s->avctx->scenechange_threshold < 1000000000 && (s->flags & CODEC_FLAG_CLOSED_GOP)){
795 av_log(avctx, AV_LOG_ERROR, "closed gop with scene change detection arent supported yet\n");
799 i= ff_gcd(avctx->frame_rate, avctx->frame_rate_base);
801 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
802 avctx->frame_rate /= i;
803 avctx->frame_rate_base /= i;
807 if(s->codec_id==CODEC_ID_MJPEG){
808 s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x
809 s->inter_quant_bias= 0;
810 }else if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO){
811 s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x
812 s->inter_quant_bias= 0;
814 s->intra_quant_bias=0;
815 s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2)); //(a - x/4)/x
818 if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
819 s->intra_quant_bias= avctx->intra_quant_bias;
820 if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
821 s->inter_quant_bias= avctx->inter_quant_bias;
823 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
825 av_reduce(&s->time_increment_resolution, &dummy, s->avctx->frame_rate, s->avctx->frame_rate_base, (1<<16)-1);
826 s->time_increment_bits = av_log2(s->time_increment_resolution - 1) + 1;
828 switch(avctx->codec->id) {
829 case CODEC_ID_MPEG1VIDEO:
830 s->out_format = FMT_MPEG1;
831 s->low_delay= 0; //s->max_b_frames ? 0 : 1;
832 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
834 case CODEC_ID_MPEG2VIDEO:
835 s->out_format = FMT_MPEG1;
836 s->low_delay= 0; //s->max_b_frames ? 0 : 1;
837 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
842 s->out_format = FMT_MJPEG;
843 s->intra_only = 1; /* force intra only for jpeg */
844 s->mjpeg_write_tables = 1; /* write all tables */
845 s->mjpeg_data_only_frames = 0; /* write all the needed headers */
846 s->mjpeg_vsample[0] = 1<<chroma_v_shift;
847 s->mjpeg_vsample[1] = 1;
848 s->mjpeg_vsample[2] = 1;
849 s->mjpeg_hsample[0] = 1<<chroma_h_shift;
850 s->mjpeg_hsample[1] = 1;
851 s->mjpeg_hsample[2] = 1;
852 if (mjpeg_init(s) < 0)
859 if (h263_get_picture_format(s->width, s->height) == 7) {
860 av_log(avctx, AV_LOG_INFO, "Input picture size isn't suitable for h263 codec! try h263+\n");
863 s->out_format = FMT_H263;
864 s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0;
869 s->out_format = FMT_H263;
872 s->umvplus = (avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0;
873 s->h263_aic= (avctx->flags & CODEC_FLAG_H263P_AIC) ? 1:0;
874 s->modified_quant= s->h263_aic;
875 s->alt_inter_vlc= (avctx->flags & CODEC_FLAG_H263P_AIV) ? 1:0;
876 s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0;
877 s->loop_filter= (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1:0;
878 s->unrestricted_mv= s->obmc || s->loop_filter || s->umvplus;
879 s->h263_slice_structured= (s->flags & CODEC_FLAG_H263P_SLICE_STRUCT) ? 1:0;
882 /* These are just to be sure */
887 s->out_format = FMT_H263;
888 s->h263_flv = 2; /* format = 1; 11-bit codes */
889 s->unrestricted_mv = 1;
890 s->rtp_mode=0; /* don't allow GOB */
895 s->out_format = FMT_H263;
900 s->out_format = FMT_H263;
902 s->unrestricted_mv = 1;
903 s->low_delay= s->max_b_frames ? 0 : 1;
904 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
906 case CODEC_ID_MSMPEG4V1:
907 s->out_format = FMT_H263;
910 s->unrestricted_mv = 1;
911 s->msmpeg4_version= 1;
915 case CODEC_ID_MSMPEG4V2:
916 s->out_format = FMT_H263;
919 s->unrestricted_mv = 1;
920 s->msmpeg4_version= 2;
924 case CODEC_ID_MSMPEG4V3:
925 s->out_format = FMT_H263;
928 s->unrestricted_mv = 1;
929 s->msmpeg4_version= 3;
930 s->flipflop_rounding=1;
935 s->out_format = FMT_H263;
938 s->unrestricted_mv = 1;
939 s->msmpeg4_version= 4;
940 s->flipflop_rounding=1;
945 s->out_format = FMT_H263;
948 s->unrestricted_mv = 1;
949 s->msmpeg4_version= 5;
950 s->flipflop_rounding=1;
959 { /* set up some save defaults, some codecs might override them later */
965 default_mv_penalty= av_mallocz( sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1) );
966 memset(default_mv_penalty, 0, sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1));
967 memset(default_fcode_tab , 0, sizeof(uint8_t)*(2*MAX_MV+1));
969 for(i=-16; i<16; i++){
970 default_fcode_tab[i + MAX_MV]= 1;
974 s->me.mv_penalty= default_mv_penalty;
975 s->fcode_tab= default_fcode_tab;
977 /* dont use mv_penalty table for crap MV as it would be confused */
978 //FIXME remove after fixing / removing old ME
979 if (s->me_method < ME_EPZS) s->me.mv_penalty = default_mv_penalty;
984 if (MPV_common_init(s) < 0)
987 if(s->modified_quant)
988 s->chroma_qscale_table= ff_h263_chroma_qscale_table;
989 s->progressive_frame=
990 s->progressive_sequence= !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME));
992 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
996 #ifdef CONFIG_ENCODERS
998 if (s->out_format == FMT_H263)
1000 if(s->msmpeg4_version)
1001 ff_msmpeg4_encode_init(s);
1003 if (s->out_format == FMT_MPEG1)
1004 ff_mpeg1_encode_init(s);
1007 /* init default q matrix */
1009 int j= s->dsp.idct_permutation[i];
1011 if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
1012 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1013 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1014 }else if(s->out_format == FMT_H263){
1015 s->intra_matrix[j] =
1016 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1020 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1021 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1023 if(s->avctx->intra_matrix)
1024 s->intra_matrix[j] = s->avctx->intra_matrix[i];
1025 if(s->avctx->inter_matrix)
1026 s->inter_matrix[j] = s->avctx->inter_matrix[i];
1029 /* precompute matrix */
1030 /* for mjpeg, we do include qscale in the matrix */
1031 if (s->out_format != FMT_MJPEG) {
1032 convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
1033 s->intra_matrix, s->intra_quant_bias, 1, 31);
1034 convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
1035 s->inter_matrix, s->inter_quant_bias, 1, 31);
1038 if(ff_rate_control_init(s) < 0)
1041 s->picture_number = 0;
1042 s->input_picture_number = 0;
1043 s->picture_in_gop_number = 0;
1044 /* motion detector init */
1051 int MPV_encode_end(AVCodecContext *avctx)
1053 MpegEncContext *s = avctx->priv_data;
1059 ff_rate_control_uninit(s);
1062 if (s->out_format == FMT_MJPEG)
1065 av_freep(&avctx->extradata);
1070 #endif //CONFIG_ENCODERS
1072 void init_rl(RLTable *rl)
1074 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
1075 uint8_t index_run[MAX_RUN+1];
1076 int last, run, level, start, end, i;
1078 /* compute max_level[], max_run[] and index_run[] */
1079 for(last=0;last<2;last++) {
1088 memset(max_level, 0, MAX_RUN + 1);
1089 memset(max_run, 0, MAX_LEVEL + 1);
1090 memset(index_run, rl->n, MAX_RUN + 1);
1091 for(i=start;i<end;i++) {
1092 run = rl->table_run[i];
1093 level = rl->table_level[i];
1094 if (index_run[run] == rl->n)
1096 if (level > max_level[run])
1097 max_level[run] = level;
1098 if (run > max_run[level])
1099 max_run[level] = run;
1101 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1102 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1103 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1104 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1105 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1106 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1110 /* draw the edges of width 'w' of an image of size width, height */
1111 //FIXME check that this is ok for mpeg4 interlaced
1112 static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w)
1114 uint8_t *ptr, *last_line;
1117 last_line = buf + (height - 1) * wrap;
1119 /* top and bottom */
1120 memcpy(buf - (i + 1) * wrap, buf, width);
1121 memcpy(last_line + (i + 1) * wrap, last_line, width);
1123 /* left and right */
1125 for(i=0;i<height;i++) {
1126 memset(ptr - w, ptr[0], w);
1127 memset(ptr + width, ptr[width-1], w);
1132 memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
1133 memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
1134 memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
1135 memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
1139 int ff_find_unused_picture(MpegEncContext *s, int shared){
1143 for(i=0; i<MAX_PICTURE_COUNT; i++){
1144 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
1147 for(i=0; i<MAX_PICTURE_COUNT; i++){
1148 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
1150 for(i=0; i<MAX_PICTURE_COUNT; i++){
1151 if(s->picture[i].data[0]==NULL) return i;
1159 static void update_noise_reduction(MpegEncContext *s){
1162 for(intra=0; intra<2; intra++){
1163 if(s->dct_count[intra] > (1<<16)){
1164 for(i=0; i<64; i++){
1165 s->dct_error_sum[intra][i] >>=1;
1167 s->dct_count[intra] >>= 1;
1170 for(i=0; i<64; i++){
1171 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1177 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1179 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1185 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1187 /* mark&release old frames */
1188 if (s->pict_type != B_TYPE && s->last_picture_ptr && s->last_picture_ptr->data[0]) {
1189 avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
1191 /* release forgotten pictures */
1192 /* if(mpeg124/h263) */
1194 for(i=0; i<MAX_PICTURE_COUNT; i++){
1195 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
1196 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1197 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
1204 /* release non refernce frames */
1205 for(i=0; i<MAX_PICTURE_COUNT; i++){
1206 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1207 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1211 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
1212 pic= (AVFrame*)s->current_picture_ptr; //we allready have a unused image (maybe it was set before reading the header)
1214 i= ff_find_unused_picture(s, 0);
1215 pic= (AVFrame*)&s->picture[i];
1218 pic->reference= s->pict_type != B_TYPE ? 3 : 0;
1220 pic->coded_picture_number= s->coded_picture_number++;
1222 if( alloc_picture(s, (Picture*)pic, 0) < 0)
1225 s->current_picture_ptr= (Picture*)pic;
1226 s->current_picture_ptr->top_field_first= s->top_field_first; //FIXME use only the vars from current_pic
1227 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
1230 s->current_picture_ptr->pict_type= s->pict_type;
1231 // if(s->flags && CODEC_FLAG_QSCALE)
1232 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1233 s->current_picture_ptr->key_frame= s->pict_type == I_TYPE;
1235 copy_picture(&s->current_picture, s->current_picture_ptr);
1237 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1238 if (s->pict_type != B_TYPE) {
1239 s->last_picture_ptr= s->next_picture_ptr;
1240 s->next_picture_ptr= s->current_picture_ptr;
1243 if(s->last_picture_ptr) copy_picture(&s->last_picture, s->last_picture_ptr);
1244 if(s->next_picture_ptr) copy_picture(&s->next_picture, s->next_picture_ptr);
1246 if(s->pict_type != I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL)){
1247 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1248 assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference
1252 assert(s->pict_type == I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1254 if(s->picture_structure!=PICT_FRAME){
1257 if(s->picture_structure == PICT_BOTTOM_FIELD){
1258 s->current_picture.data[i] += s->current_picture.linesize[i];
1260 s->current_picture.linesize[i] *= 2;
1261 s->last_picture.linesize[i] *=2;
1262 s->next_picture.linesize[i] *=2;
1267 s->hurry_up= s->avctx->hurry_up;
1268 s->error_resilience= avctx->error_resilience;
1270 /* set dequantizer, we cant do it during init as it might change for mpeg4
1271 and we cant do it in the header decode as init isnt called for mpeg4 there yet */
1272 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1273 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1274 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1275 }else if(s->out_format == FMT_H263){
1276 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1277 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1279 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1280 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1283 if(s->dct_error_sum){
1284 assert(s->avctx->noise_reduction && s->encoding);
1286 update_noise_reduction(s);
1290 if(s->avctx->xvmc_acceleration)
1291 return XVMC_field_start(s, avctx);
1296 /* generic function for encode/decode called after a frame has been coded/decoded */
1297 void MPV_frame_end(MpegEncContext *s)
1300 /* draw edge for correct motion prediction if outside */
1302 //just to make sure that all data is rendered.
1303 if(s->avctx->xvmc_acceleration){
1307 if(s->unrestricted_mv && s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1308 draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
1309 draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1310 draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1314 s->last_pict_type = s->pict_type;
1315 if(s->pict_type!=B_TYPE){
1316 s->last_non_b_pict_type= s->pict_type;
1319 /* copy back current_picture variables */
1320 for(i=0; i<MAX_PICTURE_COUNT; i++){
1321 if(s->picture[i].data[0] == s->current_picture.data[0]){
1322 s->picture[i]= s->current_picture;
1326 assert(i<MAX_PICTURE_COUNT);
1330 /* release non refernce frames */
1331 for(i=0; i<MAX_PICTURE_COUNT; i++){
1332 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1333 s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1337 // clear copies, to avoid confusion
1339 memset(&s->last_picture, 0, sizeof(Picture));
1340 memset(&s->next_picture, 0, sizeof(Picture));
1341 memset(&s->current_picture, 0, sizeof(Picture));
1346 * draws an line from (ex, ey) -> (sx, sy).
1347 * @param w width of the image
1348 * @param h height of the image
1349 * @param stride stride/linesize of the image
1350 * @param color color of the arrow
1352 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1355 sx= clip(sx, 0, w-1);
1356 sy= clip(sy, 0, h-1);
1357 ex= clip(ex, 0, w-1);
1358 ey= clip(ey, 0, h-1);
1360 buf[sy*stride + sx]+= color;
1362 if(ABS(ex - sx) > ABS(ey - sy)){
1367 buf+= sx + sy*stride;
1369 f= ((ey-sy)<<16)/ex;
1370 for(x= 0; x <= ex; x++){
1371 y= ((x*f) + (1<<15))>>16;
1372 buf[y*stride + x]+= color;
1379 buf+= sx + sy*stride;
1381 if(ey) f= ((ex-sx)<<16)/ey;
1383 for(y= 0; y <= ey; y++){
1384 x= ((y*f) + (1<<15))>>16;
1385 buf[y*stride + x]+= color;
1391 * draws an arrow from (ex, ey) -> (sx, sy).
1392 * @param w width of the image
1393 * @param h height of the image
1394 * @param stride stride/linesize of the image
1395 * @param color color of the arrow
1397 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1400 sx= clip(sx, -100, w+100);
1401 sy= clip(sy, -100, h+100);
1402 ex= clip(ex, -100, w+100);
1403 ey= clip(ey, -100, h+100);
1408 if(dx*dx + dy*dy > 3*3){
1411 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1413 //FIXME subpixel accuracy
1414 rx= ROUNDED_DIV(rx*3<<4, length);
1415 ry= ROUNDED_DIV(ry*3<<4, length);
1417 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1418 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1420 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1424 * prints debuging info for the given picture.
1426 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1428 if(!pict || !pict->mb_type) return;
1430 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1433 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1434 switch (pict->pict_type) {
1435 case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1436 case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1437 case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1438 case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1439 case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1440 case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1442 for(y=0; y<s->mb_height; y++){
1443 for(x=0; x<s->mb_width; x++){
1444 if(s->avctx->debug&FF_DEBUG_SKIP){
1445 int count= s->mbskip_table[x + y*s->mb_stride];
1446 if(count>9) count=9;
1447 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1449 if(s->avctx->debug&FF_DEBUG_QP){
1450 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1452 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1453 int mb_type= pict->mb_type[x + y*s->mb_stride];
1454 //Type & MV direction
1456 av_log(s->avctx, AV_LOG_DEBUG, "P");
1457 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1458 av_log(s->avctx, AV_LOG_DEBUG, "A");
1459 else if(IS_INTRA4x4(mb_type))
1460 av_log(s->avctx, AV_LOG_DEBUG, "i");
1461 else if(IS_INTRA16x16(mb_type))
1462 av_log(s->avctx, AV_LOG_DEBUG, "I");
1463 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1464 av_log(s->avctx, AV_LOG_DEBUG, "d");
1465 else if(IS_DIRECT(mb_type))
1466 av_log(s->avctx, AV_LOG_DEBUG, "D");
1467 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1468 av_log(s->avctx, AV_LOG_DEBUG, "g");
1469 else if(IS_GMC(mb_type))
1470 av_log(s->avctx, AV_LOG_DEBUG, "G");
1471 else if(IS_SKIP(mb_type))
1472 av_log(s->avctx, AV_LOG_DEBUG, "S");
1473 else if(!USES_LIST(mb_type, 1))
1474 av_log(s->avctx, AV_LOG_DEBUG, ">");
1475 else if(!USES_LIST(mb_type, 0))
1476 av_log(s->avctx, AV_LOG_DEBUG, "<");
1478 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1479 av_log(s->avctx, AV_LOG_DEBUG, "X");
1484 av_log(s->avctx, AV_LOG_DEBUG, "+");
1485 else if(IS_16X8(mb_type))
1486 av_log(s->avctx, AV_LOG_DEBUG, "-");
1487 else if(IS_8X16(mb_type))
1488 av_log(s->avctx, AV_LOG_DEBUG, "¦");
1489 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1490 av_log(s->avctx, AV_LOG_DEBUG, " ");
1492 av_log(s->avctx, AV_LOG_DEBUG, "?");
1495 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
1496 av_log(s->avctx, AV_LOG_DEBUG, "=");
1498 av_log(s->avctx, AV_LOG_DEBUG, " ");
1500 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1502 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1506 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1507 const int shift= 1 + s->quarter_sample;
1511 int h_chroma_shift, v_chroma_shift;
1512 s->low_delay=0; //needed to see the vectors without trashing the buffers
1514 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1516 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*s->height:pict->linesize[i]*s->height >> v_chroma_shift);
1517 pict->data[i]= s->visualization_buffer[i];
1519 pict->type= FF_BUFFER_TYPE_COPY;
1522 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1524 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1525 const int mb_index= mb_x + mb_y*s->mb_stride;
1526 if((s->avctx->debug_mv) && pict->motion_val){
1528 for(type=0; type<3; type++){
1531 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
1535 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
1539 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
1544 if(!USES_LIST(pict->mb_type[mb_index], direction))
1547 if(IS_8X8(pict->mb_type[mb_index])){
1550 int sx= mb_x*16 + 4 + 8*(i&1);
1551 int sy= mb_y*16 + 4 + 8*(i>>1);
1552 int xy= 1 + mb_x*2 + (i&1) + (mb_y*2 + 1 + (i>>1))*(s->mb_width*2 + 2);
1553 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1554 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1555 draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
1557 }else if(IS_16X8(pict->mb_type[mb_index])){
1561 int sy=mb_y*16 + 4 + 8*i;
1562 int xy=1 + mb_x*2 + (mb_y*2 + 1 + i)*(s->mb_width*2 + 2);
1563 int mx=(pict->motion_val[direction][xy][0]>>shift) + sx;
1564 int my=(pict->motion_val[direction][xy][1]>>shift) + sy;
1565 draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
1568 int sx= mb_x*16 + 8;
1569 int sy= mb_y*16 + 8;
1570 int xy= 1 + mb_x*2 + (mb_y*2 + 1)*(s->mb_width*2 + 2);
1571 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1572 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1573 draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
1577 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1578 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1581 *(uint64_t*)(pict->data[1] + 8*mb_x + (8*mb_y + y)*pict->linesize[1])= c;
1582 *(uint64_t*)(pict->data[2] + 8*mb_x + (8*mb_y + y)*pict->linesize[2])= c;
1585 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1586 int mb_type= pict->mb_type[mb_index];
1589 #define COLOR(theta, r)\
1590 u= (int)(128 + r*cos(theta*3.141592/180));\
1591 v= (int)(128 + r*sin(theta*3.141592/180));
1595 if(IS_PCM(mb_type)){
1597 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1599 }else if(IS_INTRA4x4(mb_type)){
1601 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1603 }else if(IS_DIRECT(mb_type)){
1605 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1607 }else if(IS_GMC(mb_type)){
1609 }else if(IS_SKIP(mb_type)){
1611 }else if(!USES_LIST(mb_type, 1)){
1613 }else if(!USES_LIST(mb_type, 0)){
1616 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1620 u*= 0x0101010101010101ULL;
1621 v*= 0x0101010101010101ULL;
1623 *(uint64_t*)(pict->data[1] + 8*mb_x + (8*mb_y + y)*pict->linesize[1])= u;
1624 *(uint64_t*)(pict->data[2] + 8*mb_x + (8*mb_y + y)*pict->linesize[2])= v;
1628 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1629 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1630 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1632 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1634 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1637 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1641 s->mbskip_table[mb_index]=0;
1647 #ifdef CONFIG_ENCODERS
1649 static int get_sae(uint8_t *src, int ref, int stride){
1653 for(y=0; y<16; y++){
1654 for(x=0; x<16; x++){
1655 acc+= ABS(src[x+y*stride] - ref);
1662 static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){
1669 for(y=0; y<h; y+=16){
1670 for(x=0; x<w; x+=16){
1671 int offset= x + y*stride;
1672 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride, 16);
1673 int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8;
1674 int sae = get_sae(src + offset, mean, stride);
1676 acc+= sae + 500 < sad;
1683 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
1686 const int encoding_delay= s->max_b_frames;
1690 if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
1691 if(pic_arg->linesize[0] != s->linesize) direct=0;
1692 if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
1693 if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
1695 // av_log(AV_LOG_DEBUG, "%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
1698 i= ff_find_unused_picture(s, 1);
1700 pic= (AVFrame*)&s->picture[i];
1704 pic->data[i]= pic_arg->data[i];
1705 pic->linesize[i]= pic_arg->linesize[i];
1707 alloc_picture(s, (Picture*)pic, 1);
1710 i= ff_find_unused_picture(s, 0);
1712 pic= (AVFrame*)&s->picture[i];
1715 alloc_picture(s, (Picture*)pic, 0);
1717 if( pic->data[0] + offset == pic_arg->data[0]
1718 && pic->data[1] + offset == pic_arg->data[1]
1719 && pic->data[2] + offset == pic_arg->data[2]){
1722 int h_chroma_shift, v_chroma_shift;
1723 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1726 int src_stride= pic_arg->linesize[i];
1727 int dst_stride= i ? s->uvlinesize : s->linesize;
1728 int h_shift= i ? h_chroma_shift : 0;
1729 int v_shift= i ? v_chroma_shift : 0;
1730 int w= s->width >>h_shift;
1731 int h= s->height>>v_shift;
1732 uint8_t *src= pic_arg->data[i];
1733 uint8_t *dst= pic->data[i] + offset;
1735 if(src_stride==dst_stride)
1736 memcpy(dst, src, src_stride*h);
1739 memcpy(dst, src, w);
1747 copy_picture_attributes(pic, pic_arg);
1749 pic->display_picture_number= s->input_picture_number++;
1752 /* shift buffer entries */
1753 for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++)
1754 s->input_picture[i-1]= s->input_picture[i];
1756 s->input_picture[encoding_delay]= (Picture*)pic;
1761 static void select_input_picture(MpegEncContext *s){
1764 for(i=1; i<MAX_PICTURE_COUNT; i++)
1765 s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
1766 s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
1768 /* set next picture types & ordering */
1769 if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
1770 if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
1771 s->reordered_input_picture[0]= s->input_picture[0];
1772 s->reordered_input_picture[0]->pict_type= I_TYPE;
1773 s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
1777 if(s->flags&CODEC_FLAG_PASS2){
1778 for(i=0; i<s->max_b_frames+1; i++){
1779 int pict_num= s->input_picture[0]->display_picture_number + i;
1780 int pict_type= s->rc_context.entry[pict_num].new_pict_type;
1781 s->input_picture[i]->pict_type= pict_type;
1783 if(i + 1 >= s->rc_context.num_entries) break;
1787 if(s->input_picture[0]->pict_type){
1788 /* user selected pict_type */
1789 for(b_frames=0; b_frames<s->max_b_frames+1; b_frames++){
1790 if(s->input_picture[b_frames]->pict_type!=B_TYPE) break;
1793 if(b_frames > s->max_b_frames){
1794 av_log(s->avctx, AV_LOG_ERROR, "warning, too many bframes in a row\n");
1795 b_frames = s->max_b_frames;
1797 }else if(s->avctx->b_frame_strategy==0){
1798 b_frames= s->max_b_frames;
1799 while(b_frames && !s->input_picture[b_frames]) b_frames--;
1800 }else if(s->avctx->b_frame_strategy==1){
1801 for(i=1; i<s->max_b_frames+1; i++){
1802 if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){
1803 s->input_picture[i]->b_frame_score=
1804 get_intra_count(s, s->input_picture[i ]->data[0],
1805 s->input_picture[i-1]->data[0], s->linesize) + 1;
1808 for(i=0; i<s->max_b_frames; i++){
1809 if(s->input_picture[i]==NULL || s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break;
1812 b_frames= FFMAX(0, i-1);
1815 for(i=0; i<b_frames+1; i++){
1816 s->input_picture[i]->b_frame_score=0;
1819 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1824 //static int b_count=0;
1825 //b_count+= b_frames;
1826 //av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
1827 if(s->picture_in_gop_number + b_frames >= s->gop_size){
1828 if(s->flags & CODEC_FLAG_CLOSED_GOP)
1830 s->input_picture[b_frames]->pict_type= I_TYPE;
1833 if( (s->flags & CODEC_FLAG_CLOSED_GOP)
1835 && s->input_picture[b_frames]->pict_type== I_TYPE)
1838 s->reordered_input_picture[0]= s->input_picture[b_frames];
1839 if(s->reordered_input_picture[0]->pict_type != I_TYPE)
1840 s->reordered_input_picture[0]->pict_type= P_TYPE;
1841 s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
1842 for(i=0; i<b_frames; i++){
1843 s->reordered_input_picture[i+1]= s->input_picture[i];
1844 s->reordered_input_picture[i+1]->pict_type= B_TYPE;
1845 s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++;
1850 if(s->reordered_input_picture[0]){
1851 s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0;
1853 copy_picture(&s->new_picture, s->reordered_input_picture[0]);
1855 if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
1856 // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable
1858 int i= ff_find_unused_picture(s, 0);
1859 Picture *pic= &s->picture[i];
1861 /* mark us unused / free shared pic */
1863 s->reordered_input_picture[0]->data[i]= NULL;
1864 s->reordered_input_picture[0]->type= 0;
1866 copy_picture_attributes((AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]);
1867 pic->reference = s->reordered_input_picture[0]->reference;
1869 alloc_picture(s, pic, 0);
1871 s->current_picture_ptr= pic;
1873 // input is not a shared pix -> reuse buffer for current_pix
1875 assert( s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER
1876 || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
1878 s->current_picture_ptr= s->reordered_input_picture[0];
1880 s->new_picture.data[i]+=16;
1883 copy_picture(&s->current_picture, s->current_picture_ptr);
1885 s->picture_number= s->new_picture.display_picture_number;
1886 //printf("dpn:%d\n", s->picture_number);
1888 memset(&s->new_picture, 0, sizeof(Picture));
1892 int MPV_encode_picture(AVCodecContext *avctx,
1893 unsigned char *buf, int buf_size, void *data)
1895 MpegEncContext *s = avctx->priv_data;
1896 AVFrame *pic_arg = data;
1897 int i, stuffing_count;
1899 if(avctx->pix_fmt != PIX_FMT_YUV420P){
1900 av_log(avctx, AV_LOG_ERROR, "this codec supports only YUV420P\n");
1904 init_put_bits(&s->pb, buf, buf_size);
1906 s->picture_in_gop_number++;
1908 load_input_picture(s, pic_arg);
1910 select_input_picture(s);
1913 if(s->new_picture.data[0]){
1914 s->pict_type= s->new_picture.pict_type;
1916 //printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
1917 MPV_frame_start(s, avctx);
1919 encode_picture(s, s->picture_number);
1921 avctx->real_pict_num = s->picture_number;
1922 avctx->header_bits = s->header_bits;
1923 avctx->mv_bits = s->mv_bits;
1924 avctx->misc_bits = s->misc_bits;
1925 avctx->i_tex_bits = s->i_tex_bits;
1926 avctx->p_tex_bits = s->p_tex_bits;
1927 avctx->i_count = s->i_count;
1928 avctx->p_count = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx
1929 avctx->skip_count = s->skip_count;
1933 if (s->out_format == FMT_MJPEG)
1934 mjpeg_picture_trailer(s);
1936 if(s->flags&CODEC_FLAG_PASS1)
1937 ff_write_pass1_stats(s);
1940 avctx->error[i] += s->current_picture_ptr->error[i];
1943 flush_put_bits(&s->pb);
1944 s->frame_bits = (pbBufPtr(&s->pb) - s->pb.buf) * 8;
1946 stuffing_count= ff_vbv_update(s, s->frame_bits);
1948 switch(s->codec_id){
1949 case CODEC_ID_MPEG1VIDEO:
1950 case CODEC_ID_MPEG2VIDEO:
1951 while(stuffing_count--){
1952 put_bits(&s->pb, 8, 0);
1955 case CODEC_ID_MPEG4:
1956 put_bits(&s->pb, 16, 0);
1957 put_bits(&s->pb, 16, 0x1C3);
1958 stuffing_count -= 4;
1959 while(stuffing_count--){
1960 put_bits(&s->pb, 8, 0xFF);
1964 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1966 flush_put_bits(&s->pb);
1967 s->frame_bits = (pbBufPtr(&s->pb) - s->pb.buf) * 8;
1970 /* update mpeg1/2 vbv_delay for CBR */
1971 if(s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate){
1974 assert(s->repeat_first_field==0);
1976 vbv_delay= lrintf(90000 * s->rc_context.buffer_index / s->avctx->rc_max_rate);
1977 assert(vbv_delay < 0xFFFF);
1979 s->vbv_delay_ptr[0] &= 0xF8;
1980 s->vbv_delay_ptr[0] |= vbv_delay>>13;
1981 s->vbv_delay_ptr[1] = vbv_delay>>5;
1982 s->vbv_delay_ptr[2] &= 0x07;
1983 s->vbv_delay_ptr[2] |= vbv_delay<<3;
1985 s->total_bits += s->frame_bits;
1986 avctx->frame_bits = s->frame_bits;
1988 assert((pbBufPtr(&s->pb) == s->pb.buf));
1991 assert((s->frame_bits&7)==0);
1993 return s->frame_bits/8;
1996 #endif //CONFIG_ENCODERS
1998 static inline void gmc1_motion(MpegEncContext *s,
1999 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2001 uint8_t **ref_picture, int src_offset)
2004 int offset, src_x, src_y, linesize, uvlinesize;
2005 int motion_x, motion_y;
2008 motion_x= s->sprite_offset[0][0];
2009 motion_y= s->sprite_offset[0][1];
2010 src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
2011 src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
2012 motion_x<<=(3-s->sprite_warping_accuracy);
2013 motion_y<<=(3-s->sprite_warping_accuracy);
2014 src_x = clip(src_x, -16, s->width);
2015 if (src_x == s->width)
2017 src_y = clip(src_y, -16, s->height);
2018 if (src_y == s->height)
2021 linesize = s->linesize;
2022 uvlinesize = s->uvlinesize;
2024 ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
2026 dest_y+=dest_offset;
2027 if(s->flags&CODEC_FLAG_EMU_EDGE){
2028 if( (unsigned)src_x >= s->h_edge_pos - 17
2029 || (unsigned)src_y >= s->v_edge_pos - 17){
2030 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
2031 ptr= s->edge_emu_buffer;
2035 if((motion_x|motion_y)&7){
2036 s->dsp.gmc1(dest_y , ptr , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
2037 s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
2041 dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
2042 if (s->no_rounding){
2043 s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
2045 s->dsp.put_pixels_tab [0][dxy](dest_y, ptr, linesize, 16);
2049 if(s->flags&CODEC_FLAG_GRAY) return;
2051 motion_x= s->sprite_offset[1][0];
2052 motion_y= s->sprite_offset[1][1];
2053 src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
2054 src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
2055 motion_x<<=(3-s->sprite_warping_accuracy);
2056 motion_y<<=(3-s->sprite_warping_accuracy);
2057 src_x = clip(src_x, -8, s->width>>1);
2058 if (src_x == s->width>>1)
2060 src_y = clip(src_y, -8, s->height>>1);
2061 if (src_y == s->height>>1)
2064 offset = (src_y * uvlinesize) + src_x + (src_offset>>1);
2065 ptr = ref_picture[1] + offset;
2066 if(s->flags&CODEC_FLAG_EMU_EDGE){
2067 if( (unsigned)src_x >= (s->h_edge_pos>>1) - 9
2068 || (unsigned)src_y >= (s->v_edge_pos>>1) - 9){
2069 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2070 ptr= s->edge_emu_buffer;
2074 s->dsp.gmc1(dest_cb + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
2076 ptr = ref_picture[2] + offset;
2078 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2079 ptr= s->edge_emu_buffer;
2081 s->dsp.gmc1(dest_cr + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
2086 static inline void gmc_motion(MpegEncContext *s,
2087 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2089 uint8_t **ref_picture, int src_offset)
2092 int linesize, uvlinesize;
2093 const int a= s->sprite_warping_accuracy;
2096 linesize = s->linesize;
2097 uvlinesize = s->uvlinesize;
2099 ptr = ref_picture[0] + src_offset;
2101 dest_y+=dest_offset;
2103 ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16;
2104 oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16;
2106 s->dsp.gmc(dest_y, ptr, linesize, 16,
2109 s->sprite_delta[0][0], s->sprite_delta[0][1],
2110 s->sprite_delta[1][0], s->sprite_delta[1][1],
2111 a+1, (1<<(2*a+1)) - s->no_rounding,
2112 s->h_edge_pos, s->v_edge_pos);
2113 s->dsp.gmc(dest_y+8, ptr, linesize, 16,
2114 ox + s->sprite_delta[0][0]*8,
2115 oy + s->sprite_delta[1][0]*8,
2116 s->sprite_delta[0][0], s->sprite_delta[0][1],
2117 s->sprite_delta[1][0], s->sprite_delta[1][1],
2118 a+1, (1<<(2*a+1)) - s->no_rounding,
2119 s->h_edge_pos, s->v_edge_pos);
2121 if(s->flags&CODEC_FLAG_GRAY) return;
2124 dest_cb+=dest_offset>>1;
2125 dest_cr+=dest_offset>>1;
2127 ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8;
2128 oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8;
2130 ptr = ref_picture[1] + (src_offset>>1);
2131 s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
2134 s->sprite_delta[0][0], s->sprite_delta[0][1],
2135 s->sprite_delta[1][0], s->sprite_delta[1][1],
2136 a+1, (1<<(2*a+1)) - s->no_rounding,
2137 s->h_edge_pos>>1, s->v_edge_pos>>1);
2139 ptr = ref_picture[2] + (src_offset>>1);
2140 s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
2143 s->sprite_delta[0][0], s->sprite_delta[0][1],
2144 s->sprite_delta[1][0], s->sprite_delta[1][1],
2145 a+1, (1<<(2*a+1)) - s->no_rounding,
2146 s->h_edge_pos>>1, s->v_edge_pos>>1);
2150 * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples.
2151 * @param buf destination buffer
2152 * @param src source buffer
2153 * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
2154 * @param block_w width of block
2155 * @param block_h height of block
2156 * @param src_x x coordinate of the top left sample of the block in the source buffer
2157 * @param src_y y coordinate of the top left sample of the block in the source buffer
2158 * @param w width of the source buffer
2159 * @param h height of the source buffer
2161 void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h,
2162 int src_x, int src_y, int w, int h){
2164 int start_y, start_x, end_y, end_x;
2167 src+= (h-1-src_y)*linesize;
2169 }else if(src_y<=-block_h){
2170 src+= (1-block_h-src_y)*linesize;
2176 }else if(src_x<=-block_w){
2177 src+= (1-block_w-src_x);
2181 start_y= FFMAX(0, -src_y);
2182 start_x= FFMAX(0, -src_x);
2183 end_y= FFMIN(block_h, h-src_y);
2184 end_x= FFMIN(block_w, w-src_x);
2186 // copy existing part
2187 for(y=start_y; y<end_y; y++){
2188 for(x=start_x; x<end_x; x++){
2189 buf[x + y*linesize]= src[x + y*linesize];
2194 for(y=0; y<start_y; y++){
2195 for(x=start_x; x<end_x; x++){
2196 buf[x + y*linesize]= buf[x + start_y*linesize];
2201 for(y=end_y; y<block_h; y++){
2202 for(x=start_x; x<end_x; x++){
2203 buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
2207 for(y=0; y<block_h; y++){
2209 for(x=0; x<start_x; x++){
2210 buf[x + y*linesize]= buf[start_x + y*linesize];
2214 for(x=end_x; x<block_w; x++){
2215 buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
2220 static inline int hpel_motion(MpegEncContext *s,
2221 uint8_t *dest, uint8_t *src,
2222 int src_x, int src_y,
2223 int width, int height, int stride,
2224 int h_edge_pos, int v_edge_pos,
2225 int w, int h, op_pixels_func *pix_op,
2226 int motion_x, int motion_y)
2231 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
2232 src_x += motion_x >> 1;
2233 src_y += motion_y >> 1;
2235 /* WARNING: do no forget half pels */
2236 src_x = clip(src_x, -16, width); //FIXME unneeded for emu?
2239 src_y = clip(src_y, -16, height);
2240 if (src_y == height)
2242 src += src_y * stride + src_x;
2244 if(s->unrestricted_mv && (s->flags&CODEC_FLAG_EMU_EDGE)){
2245 if( (unsigned)src_x > h_edge_pos - (motion_x&1) - w
2246 || (unsigned)src_y > v_edge_pos - (motion_y&1) - h){
2247 ff_emulated_edge_mc(s->edge_emu_buffer, src, stride, w+1, h+1,
2248 src_x, src_y, h_edge_pos, v_edge_pos);
2249 src= s->edge_emu_buffer;
2253 pix_op[dxy](dest, src, stride, h);
2257 /* apply one mpeg motion vector to the three components */
2258 static inline void mpeg_motion(MpegEncContext *s,
2259 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2261 uint8_t **ref_picture, int src_offset,
2262 int field_based, op_pixels_func (*pix_op)[4],
2263 int motion_x, int motion_y, int h)
2266 int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, uvlinesize;
2269 if(s->quarter_sample)
2276 height = s->height >> field_based;
2277 v_edge_pos = s->v_edge_pos >> field_based;
2278 uvlinesize = s->current_picture.linesize[1] << field_based;
2281 dest_y + dest_offset, ref_picture[0] + src_offset,
2282 s->mb_x * 16, s->mb_y * (16 >> field_based),
2283 s->width, height, s->current_picture.linesize[0] << field_based,
2284 s->h_edge_pos, v_edge_pos,
2286 motion_x, motion_y);
2289 if(s->flags&CODEC_FLAG_GRAY) return;
2291 if (s->out_format == FMT_H263) {
2293 if ((motion_x & 3) != 0)
2295 if ((motion_y & 3) != 0)
2302 dxy = ((my & 1) << 1) | (mx & 1);
2307 src_x = s->mb_x * 8 + mx;
2308 src_y = s->mb_y * (8 >> field_based) + my;
2309 src_x = clip(src_x, -8, s->width >> 1);
2310 if (src_x == (s->width >> 1))
2312 src_y = clip(src_y, -8, height >> 1);
2313 if (src_y == (height >> 1))
2315 offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
2316 ptr = ref_picture[1] + offset;
2318 ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based,
2319 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2320 ptr= s->edge_emu_buffer + (src_offset >> 1);
2322 pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
2324 ptr = ref_picture[2] + offset;
2326 ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based,
2327 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2328 ptr= s->edge_emu_buffer + (src_offset >> 1);
2330 pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
2332 //FIXME move to dsputil, avg variant, 16x16 version
2333 static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride){
2335 uint8_t * const top = src[1];
2336 uint8_t * const left = src[2];
2337 uint8_t * const mid = src[0];
2338 uint8_t * const right = src[3];
2339 uint8_t * const bottom= src[4];
2340 #define OBMC_FILTER(x, t, l, m, r, b)\
2341 dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
2342 #define OBMC_FILTER4(x, t, l, m, r, b)\
2343 OBMC_FILTER(x , t, l, m, r, b);\
2344 OBMC_FILTER(x+1 , t, l, m, r, b);\
2345 OBMC_FILTER(x +stride, t, l, m, r, b);\
2346 OBMC_FILTER(x+1+stride, t, l, m, r, b);
2349 OBMC_FILTER (x , 2, 2, 4, 0, 0);
2350 OBMC_FILTER (x+1, 2, 1, 5, 0, 0);
2351 OBMC_FILTER4(x+2, 2, 1, 5, 0, 0);
2352 OBMC_FILTER4(x+4, 2, 0, 5, 1, 0);
2353 OBMC_FILTER (x+6, 2, 0, 5, 1, 0);
2354 OBMC_FILTER (x+7, 2, 0, 4, 2, 0);
2356 OBMC_FILTER (x , 1, 2, 5, 0, 0);
2357 OBMC_FILTER (x+1, 1, 2, 5, 0, 0);
2358 OBMC_FILTER (x+6, 1, 0, 5, 2, 0);
2359 OBMC_FILTER (x+7, 1, 0, 5, 2, 0);
2361 OBMC_FILTER4(x , 1, 2, 5, 0, 0);
2362 OBMC_FILTER4(x+2, 1, 1, 6, 0, 0);
2363 OBMC_FILTER4(x+4, 1, 0, 6, 1, 0);
2364 OBMC_FILTER4(x+6, 1, 0, 5, 2, 0);
2366 OBMC_FILTER4(x , 0, 2, 5, 0, 1);
2367 OBMC_FILTER4(x+2, 0, 1, 6, 0, 1);
2368 OBMC_FILTER4(x+4, 0, 0, 6, 1, 1);
2369 OBMC_FILTER4(x+6, 0, 0, 5, 2, 1);
2371 OBMC_FILTER (x , 0, 2, 5, 0, 1);
2372 OBMC_FILTER (x+1, 0, 2, 5, 0, 1);
2373 OBMC_FILTER4(x+2, 0, 1, 5, 0, 2);
2374 OBMC_FILTER4(x+4, 0, 0, 5, 1, 2);
2375 OBMC_FILTER (x+6, 0, 0, 5, 2, 1);
2376 OBMC_FILTER (x+7, 0, 0, 5, 2, 1);
2378 OBMC_FILTER (x , 0, 2, 4, 0, 2);
2379 OBMC_FILTER (x+1, 0, 1, 5, 0, 2);
2380 OBMC_FILTER (x+6, 0, 0, 5, 1, 2);
2381 OBMC_FILTER (x+7, 0, 0, 4, 2, 2);
2384 /* obmc for 1 8x8 luma block */
2385 static inline void obmc_motion(MpegEncContext *s,
2386 uint8_t *dest, uint8_t *src,
2387 int src_x, int src_y,
2388 op_pixels_func *pix_op,
2389 int16_t mv[5][2]/* mid top left right bottom*/)
2395 assert(s->quarter_sample==0);
2398 if(i && mv[i][0]==mv[MID][0] && mv[i][1]==mv[MID][1]){
2401 ptr[i]= s->edge_emu_buffer + 16 + 8*(i&1) + s->linesize*8*(i>>1);
2402 hpel_motion(s, ptr[i], src,
2404 s->width, s->height, s->linesize,
2405 s->h_edge_pos, s->v_edge_pos,
2407 mv[i][0], mv[i][1]);
2411 put_obmc(dest, ptr, s->linesize);
2414 static inline void qpel_motion(MpegEncContext *s,
2415 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2417 uint8_t **ref_picture, int src_offset,
2418 int field_based, op_pixels_func (*pix_op)[4],
2419 qpel_mc_func (*qpix_op)[16],
2420 int motion_x, int motion_y, int h)
2423 int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
2426 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
2427 src_x = s->mb_x * 16 + (motion_x >> 2);
2428 src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
2430 height = s->height >> field_based;
2431 v_edge_pos = s->v_edge_pos >> field_based;
2432 src_x = clip(src_x, -16, s->width);
2433 if (src_x == s->width)
2435 src_y = clip(src_y, -16, height);
2436 if (src_y == height)
2438 linesize = s->linesize << field_based;
2439 uvlinesize = s->uvlinesize << field_based;
2440 ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
2441 dest_y += dest_offset;
2442 //printf("%d %d %d\n", src_x, src_y, dxy);
2444 if(s->flags&CODEC_FLAG_EMU_EDGE){
2445 if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 16
2446 || (unsigned)src_y > v_edge_pos - (motion_y&3) - h ){
2447 ff_emulated_edge_mc(s->edge_emu_buffer, ptr - src_offset, s->linesize, 17, 17+field_based,
2448 src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
2449 ptr= s->edge_emu_buffer + src_offset;
2454 qpix_op[0][dxy](dest_y, ptr, linesize);
2456 //damn interlaced mode
2457 //FIXME boundary mirroring is not exactly correct here
2458 qpix_op[1][dxy](dest_y , ptr , linesize);
2459 qpix_op[1][dxy](dest_y+8, ptr+8, linesize);
2462 if(s->flags&CODEC_FLAG_GRAY) return;
2467 }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA2){
2468 static const int rtab[8]= {0,0,1,1,0,0,0,1};
2469 mx= (motion_x>>1) + rtab[motion_x&7];
2470 my= (motion_y>>1) + rtab[motion_y&7];
2471 }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){
2472 mx= (motion_x>>1)|(motion_x&1);
2473 my= (motion_y>>1)|(motion_y&1);
2481 dxy= (mx&1) | ((my&1)<<1);
2485 src_x = s->mb_x * 8 + mx;
2486 src_y = s->mb_y * (8 >> field_based) + my;
2487 src_x = clip(src_x, -8, s->width >> 1);
2488 if (src_x == (s->width >> 1))
2490 src_y = clip(src_y, -8, height >> 1);
2491 if (src_y == (height >> 1))
2494 offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
2495 ptr = ref_picture[1] + offset;
2497 ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based,
2498 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2499 ptr= s->edge_emu_buffer + (src_offset >> 1);
2501 pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
2503 ptr = ref_picture[2] + offset;
2505 ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based,
2506 src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2507 ptr= s->edge_emu_buffer + (src_offset >> 1);
2509 pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
2512 inline int ff_h263_round_chroma(int x){
2514 return (h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
2517 return -(h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
2522 * h263 chorma 4mv motion compensation.
2524 static inline void chroma_4mv_motion(MpegEncContext *s,
2525 uint8_t *dest_cb, uint8_t *dest_cr,
2526 uint8_t **ref_picture,
2527 op_pixels_func *pix_op,
2529 int dxy, emu=0, src_x, src_y, offset;
2532 /* In case of 8X8, we construct a single chroma motion vector
2533 with a special rounding */
2534 mx= ff_h263_round_chroma(mx);
2535 my= ff_h263_round_chroma(my);
2537 dxy = ((my & 1) << 1) | (mx & 1);
2541 src_x = s->mb_x * 8 + mx;
2542 src_y = s->mb_y * 8 + my;
2543 src_x = clip(src_x, -8, s->width/2);
2544 if (src_x == s->width/2)
2546 src_y = clip(src_y, -8, s->height/2);
2547 if (src_y == s->height/2)
2550 offset = (src_y * (s->uvlinesize)) + src_x;
2551 ptr = ref_picture[1] + offset;
2552 if(s->flags&CODEC_FLAG_EMU_EDGE){
2553 if( (unsigned)src_x > (s->h_edge_pos>>1) - (dxy &1) - 8
2554 || (unsigned)src_y > (s->v_edge_pos>>1) - (dxy>>1) - 8){
2555 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2556 ptr= s->edge_emu_buffer;
2560 pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
2562 ptr = ref_picture[2] + offset;
2564 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2565 ptr= s->edge_emu_buffer;
2567 pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
2571 * motion compesation of a single macroblock
2573 * @param dest_y luma destination pointer
2574 * @param dest_cb chroma cb/u destination pointer
2575 * @param dest_cr chroma cr/v destination pointer
2576 * @param dir direction (0->forward, 1->backward)
2577 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2578 * @param pic_op halfpel motion compensation function (average or put normally)
2579 * @param pic_op qpel motion compensation function (average or put normally)
2580 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2582 static inline void MPV_motion(MpegEncContext *s,
2583 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2584 int dir, uint8_t **ref_picture,
2585 op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16])
2587 int dxy, mx, my, src_x, src_y, motion_x, motion_y;
2589 uint8_t *ptr, *dest;
2594 if(s->obmc && s->pict_type != B_TYPE){
2595 int16_t mv_cache[4][4][2];
2596 const int xy= s->mb_x + s->mb_y*s->mb_stride;
2597 const int mot_stride= s->mb_width*2 + 2;
2598 const int mot_xy= 1 + mb_x*2 + (mb_y*2 + 1)*mot_stride;
2600 assert(!s->mb_skiped);
2602 memcpy(mv_cache[1][1], s->current_picture.motion_val[0][mot_xy ], sizeof(int16_t)*4);
2603 memcpy(mv_cache[2][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
2604 memcpy(mv_cache[3][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
2606 if(mb_y==0 || IS_INTRA(s->current_picture.mb_type[xy-s->mb_stride])){
2607 memcpy(mv_cache[0][1], mv_cache[1][1], sizeof(int16_t)*4);
2609 memcpy(mv_cache[0][1], s->current_picture.motion_val[0][mot_xy-mot_stride], sizeof(int16_t)*4);
2612 if(mb_x==0 || IS_INTRA(s->current_picture.mb_type[xy-1])){
2613 *(int32_t*)mv_cache[1][0]= *(int32_t*)mv_cache[1][1];
2614 *(int32_t*)mv_cache[2][0]= *(int32_t*)mv_cache[2][1];
2616 *(int32_t*)mv_cache[1][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1];
2617 *(int32_t*)mv_cache[2][0]= *(int32_t*)s->current_picture.motion_val[0][mot_xy-1+mot_stride];
2620 if(mb_x+1>=s->mb_width || IS_INTRA(s->current_picture.mb_type[xy+1])){
2621 *(int32_t*)mv_cache[1][3]= *(int32_t*)mv_cache[1][2];
2622 *(int32_t*)mv_cache[2][3]= *(int32_t*)mv_cache[2][2];
2624 *(int32_t*)mv_cache[1][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2];
2625 *(int32_t*)mv_cache[2][3]= *(int32_t*)s->current_picture.motion_val[0][mot_xy+2+mot_stride];
2631 const int x= (i&1)+1;
2632 const int y= (i>>1)+1;
2634 {mv_cache[y][x ][0], mv_cache[y][x ][1]},
2635 {mv_cache[y-1][x][0], mv_cache[y-1][x][1]},
2636 {mv_cache[y][x-1][0], mv_cache[y][x-1][1]},
2637 {mv_cache[y][x+1][0], mv_cache[y][x+1][1]},
2638 {mv_cache[y+1][x][0], mv_cache[y+1][x][1]}};
2640 obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
2642 mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8,
2649 if(!(s->flags&CODEC_FLAG_GRAY))
2650 chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my);
2655 switch(s->mv_type) {
2659 if(s->real_sprite_warping_points==1){
2660 gmc1_motion(s, dest_y, dest_cb, dest_cr, 0,
2663 gmc_motion(s, dest_y, dest_cb, dest_cr, 0,
2666 }else if(s->quarter_sample){
2667 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
2670 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2672 ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
2673 ref_picture, pix_op,
2674 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2678 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2681 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2687 if(s->quarter_sample){
2689 motion_x = s->mv[dir][i][0];
2690 motion_y = s->mv[dir][i][1];
2692 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
2693 src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
2694 src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
2696 /* WARNING: do no forget half pels */
2697 src_x = clip(src_x, -16, s->width);
2698 if (src_x == s->width)
2700 src_y = clip(src_y, -16, s->height);
2701 if (src_y == s->height)
2704 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
2705 if(s->flags&CODEC_FLAG_EMU_EDGE){
2706 if( (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 8
2707 || (unsigned)src_y > s->v_edge_pos - (motion_y&3) - 8 ){
2708 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
2709 ptr= s->edge_emu_buffer;
2712 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
2713 qpix_op[1][dxy](dest, ptr, s->linesize);
2715 mx += s->mv[dir][i][0]/2;
2716 my += s->mv[dir][i][1]/2;
2720 hpel_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
2722 mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8,
2723 s->width, s->height, s->linesize,
2724 s->h_edge_pos, s->v_edge_pos,
2726 s->mv[dir][i][0], s->mv[dir][i][1]);
2728 mx += s->mv[dir][i][0];
2729 my += s->mv[dir][i][1];
2733 if(!(s->flags&CODEC_FLAG_GRAY))
2734 chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my);
2737 if (s->picture_structure == PICT_FRAME) {
2738 if(s->quarter_sample){
2740 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
2741 ref_picture, s->field_select[dir][0] ? s->linesize : 0,
2743 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2745 qpel_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2746 ref_picture, s->field_select[dir][1] ? s->linesize : 0,
2748 s->mv[dir][1][0], s->mv[dir][1][1], 8);
2751 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2752 ref_picture, s->field_select[dir][0] ? s->linesize : 0,
2754 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2756 mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2757 ref_picture, s->field_select[dir][1] ? s->linesize : 0,
2759 s->mv[dir][1][0], s->mv[dir][1][1], 8);
2763 if(s->picture_structure == s->field_select[dir][0] + 1 || s->pict_type == B_TYPE || s->first_field){
2764 offset= s->field_select[dir][0] ? s->linesize : 0;
2766 ref_picture= s->current_picture.data;
2767 offset= s->field_select[dir][0] ? s->linesize : -s->linesize;
2770 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2771 ref_picture, offset,
2773 s->mv[dir][0][0], s->mv[dir][0][1], 16);
2778 uint8_t ** ref2picture;
2780 if(s->picture_structure == s->field_select[dir][0] + 1 || s->pict_type == B_TYPE || s->first_field){
2781 ref2picture= ref_picture;
2782 offset= s->field_select[dir][0] ? s->linesize : 0;
2784 ref2picture= s->current_picture.data;
2785 offset= s->field_select[dir][0] ? s->linesize : -s->linesize;
2788 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2789 ref2picture, offset,
2791 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2794 if(s->picture_structure == s->field_select[dir][1] + 1 || s->pict_type == B_TYPE || s->first_field){
2795 ref2picture= ref_picture;
2796 offset= s->field_select[dir][1] ? s->linesize : 0;
2798 ref2picture= s->current_picture.data;
2799 offset= s->field_select[dir][1] ? s->linesize : -s->linesize;
2801 // I know it is ugly but this is the only way to fool emu_edge without rewrite mpeg_motion
2802 mpeg_motion(s, dest_y+16*s->linesize, dest_cb+8*s->uvlinesize, dest_cr+8*s->uvlinesize,
2804 ref2picture, offset,
2806 s->mv[dir][1][0], s->mv[dir][1][1]+16, 8);
2812 op_pixels_func (*dmv_pix_op)[4];
2815 dmv_pix_op = s->dsp.put_pixels_tab;
2817 if(s->picture_structure == PICT_FRAME){
2818 //put top field from top field
2819 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2822 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2823 //put bottom field from bottom field
2824 mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2825 ref_picture, s->linesize,
2827 s->mv[dir][0][0], s->mv[dir][0][1], 8);
2829 dmv_pix_op = s->dsp.avg_pixels_tab;
2831 //avg top field from bottom field
2832 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2833 ref_picture, s->linesize,
2835 s->mv[dir][2][0], s->mv[dir][2][1], 8);
2836 //avg bottom field from top field
2837 mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2840 s->mv[dir][3][0], s->mv[dir][3][1], 8);
2843 offset=(s->picture_structure == PICT_BOTTOM_FIELD)?
2846 //put field from the same parity
2847 //same parity is never in the same frame
2848 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2851 s->mv[dir][0][0],s->mv[dir][0][1],16);
2853 // after put we make avg of the same block
2854 dmv_pix_op=s->dsp.avg_pixels_tab;
2856 //opposite parity is always in the same frame if this is second field
2857 if(!s->first_field){
2858 ref_picture = s->current_picture.data;
2859 //top field is one linesize from frame beginig
2860 offset=(s->picture_structure == PICT_BOTTOM_FIELD)?
2861 -s->linesize : s->linesize;
2863 offset=(s->picture_structure == PICT_BOTTOM_FIELD)?
2866 //avg field from the opposite parity
2867 mpeg_motion(s, dest_y, dest_cb, dest_cr,0,
2868 ref_picture, offset,
2870 s->mv[dir][2][0],s->mv[dir][2][1],16);
2879 /* put block[] to dest[] */
2880 static inline void put_dct(MpegEncContext *s,
2881 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2883 s->dct_unquantize_intra(s, block, i, qscale);
2884 s->dsp.idct_put (dest, line_size, block);
2887 /* add block[] to dest[] */
2888 static inline void add_dct(MpegEncContext *s,
2889 DCTELEM *block, int i, uint8_t *dest, int line_size)
2891 if (s->block_last_index[i] >= 0) {
2892 s->dsp.idct_add (dest, line_size, block);
2896 static inline void add_dequant_dct(MpegEncContext *s,
2897 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2899 if (s->block_last_index[i] >= 0) {
2900 s->dct_unquantize_inter(s, block, i, qscale);
2902 s->dsp.idct_add (dest, line_size, block);
2907 * cleans dc, ac, coded_block for the current non intra MB
2909 void ff_clean_intra_table_entries(MpegEncContext *s)
2911 int wrap = s->block_wrap[0];
2912 int xy = s->block_index[0];
2915 s->dc_val[0][xy + 1 ] =
2916 s->dc_val[0][xy + wrap] =
2917 s->dc_val[0][xy + 1 + wrap] = 1024;
2919 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2920 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2921 if (s->msmpeg4_version>=3) {
2922 s->coded_block[xy ] =
2923 s->coded_block[xy + 1 ] =
2924 s->coded_block[xy + wrap] =
2925 s->coded_block[xy + 1 + wrap] = 0;
2928 wrap = s->block_wrap[4];
2929 xy = s->mb_x + 1 + (s->mb_y + 1) * wrap;
2931 s->dc_val[2][xy] = 1024;
2933 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2934 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2936 s->mbintra_table[s->mb_x + s->mb_y*s->mb_stride]= 0;
2939 /* generic function called after a macroblock has been parsed by the
2940 decoder or after it has been encoded by the encoder.
2942 Important variables used:
2943 s->mb_intra : true if intra macroblock
2944 s->mv_dir : motion vector direction
2945 s->mv_type : motion vector type
2946 s->mv : motion vector
2947 s->interlaced_dct : true if interlaced dct used (mpeg2)
2949 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
2952 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2954 if(s->avctx->xvmc_acceleration){
2955 XVMC_decode_mb(s);//xvmc uses pblocks
2963 s->current_picture.qscale_table[mb_xy]= s->qscale;
2965 /* update DC predictors for P macroblocks */
2967 if (s->h263_pred || s->h263_aic) {
2968 if(s->mbintra_table[mb_xy])
2969 ff_clean_intra_table_entries(s);
2973 s->last_dc[2] = 128 << s->intra_dc_precision;
2976 else if (s->h263_pred || s->h263_aic)
2977 s->mbintra_table[mb_xy]=1;
2979 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { //FIXME precalc
2980 uint8_t *dest_y, *dest_cb, *dest_cr;
2981 int dct_linesize, dct_offset;
2982 op_pixels_func (*op_pix)[4];
2983 qpel_mc_func (*op_qpix)[16];
2984 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
2985 const int uvlinesize= s->current_picture.linesize[1];
2986 const int readable= s->pict_type != B_TYPE || s->encoding || s->avctx->draw_horiz_band;
2988 /* avoid copy if macroblock skipped in last frame too */
2989 /* skip only during decoding as we might trash the buffers during encoding a bit */
2991 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2992 const int age= s->current_picture.age;
2998 assert(s->pict_type!=I_TYPE);
3000 (*mbskip_ptr) ++; /* indicate that this time we skiped it */
3001 if(*mbskip_ptr >99) *mbskip_ptr= 99;
3003 /* if previous was skipped too, then nothing to do ! */
3004 if (*mbskip_ptr >= age && s->current_picture.reference){
3007 } else if(!s->current_picture.reference){
3008 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
3009 if(*mbskip_ptr >99) *mbskip_ptr= 99;
3011 *mbskip_ptr = 0; /* not skipped */
3015 if (s->interlaced_dct) {
3016 dct_linesize = linesize * 2;
3017 dct_offset = linesize;
3019 dct_linesize = linesize;
3020 dct_offset = linesize * 8;
3024 dest_cb= s->dest[1];
3025 dest_cr= s->dest[2];
3027 dest_y = s->edge_emu_buffer+32; //FIXME cleanup scratchpad pointers
3028 dest_cb= s->edge_emu_buffer+48;
3029 dest_cr= s->edge_emu_buffer+56;
3032 /* motion handling */
3033 /* decoding or more than one mb_type (MC was allready done otherwise) */
3035 if ((!s->no_rounding) || s->pict_type==B_TYPE){
3036 op_pix = s->dsp.put_pixels_tab;
3037 op_qpix= s->dsp.put_qpel_pixels_tab;
3039 op_pix = s->dsp.put_no_rnd_pixels_tab;
3040 op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
3043 if (s->mv_dir & MV_DIR_FORWARD) {
3044 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
3045 op_pix = s->dsp.avg_pixels_tab;
3046 op_qpix= s->dsp.avg_qpel_pixels_tab;
3048 if (s->mv_dir & MV_DIR_BACKWARD) {
3049 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
3053 /* skip dequant / idct if we are really late ;) */
3054 if(s->hurry_up>1) return;
3056 /* add dct residue */
3057 if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
3058 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
3059 add_dequant_dct(s, block[0], 0, dest_y, dct_linesize, s->qscale);
3060 add_dequant_dct(s, block[1], 1, dest_y + 8, dct_linesize, s->qscale);
3061 add_dequant_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize, s->qscale);
3062 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize, s->qscale);
3064 if(!(s->flags&CODEC_FLAG_GRAY)){
3065 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3066 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3068 } else if(s->codec_id != CODEC_ID_WMV2){
3069 add_dct(s, block[0], 0, dest_y, dct_linesize);
3070 add_dct(s, block[1], 1, dest_y + 8, dct_linesize);
3071 add_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
3072 add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
3074 if(!(s->flags&CODEC_FLAG_GRAY)){
3075 add_dct(s, block[4], 4, dest_cb, uvlinesize);
3076 add_dct(s, block[5], 5, dest_cr, uvlinesize);
3081 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3085 /* dct only in intra block */
3086 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
3087 put_dct(s, block[0], 0, dest_y, dct_linesize, s->qscale);
3088 put_dct(s, block[1], 1, dest_y + 8, dct_linesize, s->qscale);
3089 put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize, s->qscale);
3090 put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize, s->qscale);
3092 if(!(s->flags&CODEC_FLAG_GRAY)){
3093 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3094 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3097 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
3098 s->dsp.idct_put(dest_y + 8, dct_linesize, block[1]);
3099 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
3100 s->dsp.idct_put(dest_y + dct_offset + 8, dct_linesize, block[3]);
3102 if(!(s->flags&CODEC_FLAG_GRAY)){
3103 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
3104 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
3109 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3110 s->dsp.put_pixels_tab[1][0](s->dest[1], dest_cb, uvlinesize, 8);
3111 s->dsp.put_pixels_tab[1][0](s->dest[2], dest_cr, uvlinesize, 8);
3116 #ifdef CONFIG_ENCODERS
3118 static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
3120 static const char tab[64]=
3132 DCTELEM *block= s->block[n];
3133 const int last_index= s->block_last_index[n];
3138 threshold= -threshold;
3142 /* are all which we could set to zero are allready zero? */
3143 if(last_index<=skip_dc - 1) return;
3145 for(i=0; i<=last_index; i++){
3146 const int j = s->intra_scantable.permutated[i];
3147 const int level = ABS(block[j]);
3149 if(skip_dc && i==0) continue;
3158 if(score >= threshold) return;
3159 for(i=skip_dc; i<=last_index; i++){
3160 const int j = s->intra_scantable.permutated[i];
3163 if(block[0]) s->block_last_index[n]= 0;
3164 else s->block_last_index[n]= -1;
3167 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index)
3170 const int maxlevel= s->max_qcoeff;
3171 const int minlevel= s->min_qcoeff;
3175 i=1; //skip clipping of intra dc
3179 for(;i<=last_index; i++){
3180 const int j= s->intra_scantable.permutated[i];
3181 int level = block[j];
3183 if (level>maxlevel){
3186 }else if(level<minlevel){
3194 if(overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
3195 av_log(s->avctx, AV_LOG_INFO, "warning, cliping %d dct coefficents to %d..%d\n", overflow, minlevel, maxlevel);
3198 #endif //CONFIG_ENCODERS
3202 * @param h is the normal height, this will be reduced automatically if needed for the last row
3204 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
3205 if (s->avctx->draw_horiz_band) {
3209 if(s->picture_structure != PICT_FRAME){
3212 if(s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
3215 h= FFMIN(h, s->height - y);
3217 if(s->pict_type==B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
3218 src= (AVFrame*)s->current_picture_ptr;
3219 else if(s->last_picture_ptr)
3220 src= (AVFrame*)s->last_picture_ptr;
3224 if(s->pict_type==B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
3230 offset[0]= y * s->linesize;;
3232 offset[2]= (y>>1) * s->uvlinesize;;
3238 s->avctx->draw_horiz_band(s->avctx, src, offset,
3239 y, s->picture_structure, h);
3243 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3244 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
3245 const int uvlinesize= s->current_picture.linesize[1];
3247 s->block_index[0]= s->block_wrap[0]*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3248 s->block_index[1]= s->block_wrap[0]*(s->mb_y*2 + 1) + s->mb_x*2;
3249 s->block_index[2]= s->block_wrap[0]*(s->mb_y*2 + 2) - 1 + s->mb_x*2;
3250 s->block_index[3]= s->block_wrap[0]*(s->mb_y*2 + 2) + s->mb_x*2;
3251 s->block_index[4]= s->block_wrap[4]*(s->mb_y + 1) + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x;
3252 s->block_index[5]= s->block_wrap[4]*(s->mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x;
3254 if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME){
3255 s->dest[0] = s->current_picture.data[0] + s->mb_x * 16 - 16;
3256 s->dest[1] = s->current_picture.data[1] + s->mb_x * 8 - 8;
3257 s->dest[2] = s->current_picture.data[2] + s->mb_x * 8 - 8;
3259 s->dest[0] = s->current_picture.data[0] + (s->mb_y * 16* linesize ) + s->mb_x * 16 - 16;
3260 s->dest[1] = s->current_picture.data[1] + (s->mb_y * 8 * uvlinesize) + s->mb_x * 8 - 8;
3261 s->dest[2] = s->current_picture.data[2] + (s->mb_y * 8 * uvlinesize) + s->mb_x * 8 - 8;
3265 #ifdef CONFIG_ENCODERS
3267 static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
3269 const int mb_x= s->mb_x;
3270 const int mb_y= s->mb_y;
3273 int dct_offset = s->linesize*8; //default for progressive frames
3275 for(i=0; i<6; i++) skip_dct[i]=0;
3277 if(s->adaptive_quant){
3278 const int last_qp= s->qscale;
3279 const int mb_xy= mb_x + mb_y*s->mb_stride;
3281 s->lambda= s->lambda_table[mb_xy];
3284 if(!(s->flags&CODEC_FLAG_QP_RD)){
3285 s->dquant= s->qscale - last_qp;
3287 if(s->out_format==FMT_H263)
3288 s->dquant= clip(s->dquant, -2, 2); //FIXME RD
3290 if(s->codec_id==CODEC_ID_MPEG4){
3292 if((s->mv_dir&MV_DIRECT) || s->mv_type==MV_TYPE_8X8)
3297 ff_set_qscale(s, last_qp + s->dquant);
3305 wrap_y = s->linesize;
3306 ptr = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
3308 if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
3309 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
3310 ptr= s->edge_emu_buffer;
3314 if(s->flags&CODEC_FLAG_INTERLACED_DCT){
3315 int progressive_score, interlaced_score;
3317 s->interlaced_dct=0;
3318 progressive_score= s->dsp.ildct_cmp[4](s, ptr , NULL, wrap_y, 8)
3319 +s->dsp.ildct_cmp[4](s, ptr + wrap_y*8, NULL, wrap_y, 8) - 400;
3321 if(progressive_score > 0){
3322 interlaced_score = s->dsp.ildct_cmp[4](s, ptr , NULL, wrap_y*2, 8)
3323 +s->dsp.ildct_cmp[4](s, ptr + wrap_y , NULL, wrap_y*2, 8);
3324 if(progressive_score > interlaced_score){
3325 s->interlaced_dct=1;
3333 s->dsp.get_pixels(s->block[0], ptr , wrap_y);
3334 s->dsp.get_pixels(s->block[1], ptr + 8, wrap_y);
3335 s->dsp.get_pixels(s->block[2], ptr + dct_offset , wrap_y);
3336 s->dsp.get_pixels(s->block[3], ptr + dct_offset + 8, wrap_y);
3338 if(s->flags&CODEC_FLAG_GRAY){
3342 int wrap_c = s->uvlinesize;
3343 ptr = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
3345 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
3346 ptr= s->edge_emu_buffer;
3348 s->dsp.get_pixels(s->block[4], ptr, wrap_c);
3350 ptr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
3352 ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
3353 ptr= s->edge_emu_buffer;
3355 s->dsp.get_pixels(s->block[5], ptr, wrap_c);
3358 op_pixels_func (*op_pix)[4];
3359 qpel_mc_func (*op_qpix)[16];
3360 uint8_t *dest_y, *dest_cb, *dest_cr;
3361 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
3365 dest_y = s->dest[0];
3366 dest_cb = s->dest[1];
3367 dest_cr = s->dest[2];
3368 wrap_y = s->linesize;
3369 wrap_c = s->uvlinesize;
3370 ptr_y = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
3371 ptr_cb = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
3372 ptr_cr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
3374 if ((!s->no_rounding) || s->pict_type==B_TYPE){
3375 op_pix = s->dsp.put_pixels_tab;
3376 op_qpix= s->dsp.put_qpel_pixels_tab;
3378 op_pix = s->dsp.put_no_rnd_pixels_tab;
3379 op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
3382 if (s->mv_dir & MV_DIR_FORWARD) {
3383 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
3384 op_pix = s->dsp.avg_pixels_tab;
3385 op_qpix= s->dsp.avg_qpel_pixels_tab;
3387 if (s->mv_dir & MV_DIR_BACKWARD) {
3388 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
3391 if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
3392 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
3393 ptr_y= s->edge_emu_buffer;
3397 if(s->flags&CODEC_FLAG_INTERLACED_DCT){
3398 int progressive_score, interlaced_score;
3400 s->interlaced_dct=0;
3401 progressive_score= s->dsp.ildct_cmp[0](s, dest_y , ptr_y , wrap_y, 8)
3402 +s->dsp.ildct_cmp[0](s, dest_y + wrap_y*8, ptr_y + wrap_y*8, wrap_y, 8) - 400;
3404 if(s->avctx->ildct_cmp == FF_CMP_VSSE) progressive_score -= 400;
3406 if(progressive_score>0){
3407 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y , ptr_y , wrap_y*2, 8)
3408 +s->dsp.ildct_cmp[0](s, dest_y + wrap_y , ptr_y + wrap_y , wrap_y*2, 8);
3410 if(progressive_score > interlaced_score){
3411 s->interlaced_dct=1;
3419 s->dsp.diff_pixels(s->block[0], ptr_y , dest_y , wrap_y);
3420 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
3421 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset , dest_y + dct_offset , wrap_y);
3422 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y);
3424 if(s->flags&CODEC_FLAG_GRAY){
3429 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
3430 ptr_cb= s->edge_emu_buffer;
3432 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
3434 ff_emulated_edge_mc(s->edge_emu_buffer, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
3435 ptr_cr= s->edge_emu_buffer;
3437 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
3439 /* pre quantization */
3440 if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){
3442 if(s->dsp.sad[1](NULL, ptr_y , dest_y , wrap_y, 8) < 20*s->qscale) skip_dct[0]= 1;
3443 if(s->dsp.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20*s->qscale) skip_dct[1]= 1;
3444 if(s->dsp.sad[1](NULL, ptr_y +dct_offset , dest_y +dct_offset , wrap_y, 8) < 20*s->qscale) skip_dct[2]= 1;
3445 if(s->dsp.sad[1](NULL, ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y, 8) < 20*s->qscale) skip_dct[3]= 1;
3446 if(s->dsp.sad[1](NULL, ptr_cb , dest_cb , wrap_c, 8) < 20*s->qscale) skip_dct[4]= 1;
3447 if(s->dsp.sad[1](NULL, ptr_cr , dest_cr , wrap_c, 8) < 20*s->qscale) skip_dct[5]= 1;
3453 if(skip_dct[i]) num++;
3456 if(s->mb_x==0 && s->mb_y==0){
3458 printf("%6d %1d\n", stat[i], i);
3467 /* DCT & quantize */
3468 if(s->out_format==FMT_MJPEG){
3471 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, 8, &overflow);
3472 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
3478 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
3479 // FIXME we could decide to change to quantizer instead of clipping
3480 // JS: I don't think that would be a good idea it could lower quality instead
3481 // of improve it. Just INTRADC clipping deserves changes in quantizer
3482 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
3484 s->block_last_index[i]= -1;
3487 if(s->luma_elim_threshold && !s->mb_intra)
3489 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
3490 if(s->chroma_elim_threshold && !s->mb_intra)
3492 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
3494 if(s->flags & CODEC_FLAG_CBP_RD){
3496 if(s->block_last_index[i] == -1)
3497 s->coded_score[i]= INT_MAX/256;
3502 if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){
3503 s->block_last_index[4]=
3504 s->block_last_index[5]= 0;
3506 s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale;
3509 //non c quantize code returns incorrect block_last_index FIXME
3510 if(s->alternate_scan && s->dct_quantize != dct_quantize_c){
3513 if(s->block_last_index[i]>0){
3514 for(j=63; j>0; j--){
3515 if(s->block[i][ s->intra_scantable.permutated[j] ]) break;
3517 s->block_last_index[i]= j;
3522 /* huffman encode */
3523 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
3524 case CODEC_ID_MPEG1VIDEO:
3525 case CODEC_ID_MPEG2VIDEO:
3526 mpeg1_encode_mb(s, s->block, motion_x, motion_y); break;
3528 case CODEC_ID_MPEG4:
3529 mpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
3530 case CODEC_ID_MSMPEG4V2:
3531 case CODEC_ID_MSMPEG4V3:
3533 msmpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
3535 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); break;
3537 case CODEC_ID_H263P:
3540 h263_encode_mb(s, s->block, motion_x, motion_y); break;
3542 case CODEC_ID_MJPEG:
3543 mjpeg_encode_mb(s, s->block); break;
3549 #endif //CONFIG_ENCODERS
3552 * combines the (truncated) bitstream to a complete frame
3553 * @returns -1 if no complete frame could be created
3555 int ff_combine_frame( MpegEncContext *s, int next, uint8_t **buf, int *buf_size){
3556 ParseContext *pc= &s->parse_context;
3560 printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index);
3561 printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
3565 /* copy overreaded byes from last frame into buffer */
3566 for(; pc->overread>0; pc->overread--){
3567 pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
3570 pc->last_index= pc->index;
3572 /* copy into buffer end return */
3573 if(next == END_NOT_FOUND){
3574 pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
3576 memcpy(&pc->buffer[pc->index], *buf, *buf_size);
3577 pc->index += *buf_size;
3582 pc->overread_index= pc->index + next;
3584 /* append to buffer */
3586 pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
3588 memcpy(&pc->buffer[pc->index], *buf, next + FF_INPUT_BUFFER_PADDING_SIZE );
3593 /* store overread bytes */
3594 for(;next < 0; next++){
3595 pc->state = (pc->state<<8) | pc->buffer[pc->last_index + next];
3601 printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index);
3602 printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
3609 void ff_mpeg_flush(AVCodecContext *avctx){
3611 MpegEncContext *s = avctx->priv_data;
3613 if(s==NULL || s->picture==NULL)
3616 for(i=0; i<MAX_PICTURE_COUNT; i++){
3617 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
3618 || s->picture[i].type == FF_BUFFER_TYPE_USER))
3619 avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
3621 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3623 s->parse_context.state= -1;
3624 s->parse_context.frame_start_found= 0;
3625 s->parse_context.overread= 0;
3626 s->parse_context.overread_index= 0;
3627 s->parse_context.index= 0;
3628 s->parse_context.last_index= 0;
3631 #ifdef CONFIG_ENCODERS
3632 void ff_copy_bits(PutBitContext *pb, uint8_t *src, int length)
3634 int bytes= length>>4;
3635 int bits= length&15;
3638 if(length==0) return;
3640 for(i=0; i<bytes; i++) put_bits(pb, 16, be2me_16(((uint16_t*)src)[i]));
3641 put_bits(pb, bits, be2me_16(((uint16_t*)src)[i])>>(16-bits));
3644 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
3647 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
3650 d->mb_skip_run= s->mb_skip_run;
3652 d->last_dc[i]= s->last_dc[i];
3655 d->mv_bits= s->mv_bits;
3656 d->i_tex_bits= s->i_tex_bits;
3657 d->p_tex_bits= s->p_tex_bits;
3658 d->i_count= s->i_count;
3659 d->f_count= s->f_count;
3660 d->b_count= s->b_count;
3661 d->skip_count= s->skip_count;
3662 d->misc_bits= s->misc_bits;
3666 d->qscale= s->qscale;
3667 d->dquant= s->dquant;
3670 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
3673 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
3674 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
3677 d->mb_skip_run= s->mb_skip_run;
3679 d->last_dc[i]= s->last_dc[i];
3682 d->mv_bits= s->mv_bits;
3683 d->i_tex_bits= s->i_tex_bits;
3684 d->p_tex_bits= s->p_tex_bits;
3685 d->i_count= s->i_count;
3686 d->f_count= s->f_count;
3687 d->b_count= s->b_count;
3688 d->skip_count= s->skip_count;
3689 d->misc_bits= s->misc_bits;
3691 d->mb_intra= s->mb_intra;
3692 d->mb_skiped= s->mb_skiped;
3693 d->mv_type= s->mv_type;
3694 d->mv_dir= s->mv_dir;
3696 if(s->data_partitioning){
3698 d->tex_pb= s->tex_pb;
3702 d->block_last_index[i]= s->block_last_index[i];
3703 d->interlaced_dct= s->interlaced_dct;
3704 d->qscale= s->qscale;
3707 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
3708 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
3709 int *dmin, int *next_block, int motion_x, int motion_y)
3712 uint8_t *dest_backup[3];
3714 copy_context_before_encode(s, backup, type);
3716 s->block= s->blocks[*next_block];
3717 s->pb= pb[*next_block];
3718 if(s->data_partitioning){
3719 s->pb2 = pb2 [*next_block];
3720 s->tex_pb= tex_pb[*next_block];
3724 memcpy(dest_backup, s->dest, sizeof(s->dest));
3725 s->dest[0] = s->me.scratchpad;
3726 s->dest[1] = s->me.scratchpad + 16;
3727 s->dest[2] = s->me.scratchpad + 16 + 8;
3728 assert(2*s->uvlinesize == s->linesize); //should be no prob for encoding
3729 assert(s->linesize >= 64); //FIXME
3732 encode_mb(s, motion_x, motion_y);
3734 score= get_bit_count(&s->pb);
3735 if(s->data_partitioning){
3736 score+= get_bit_count(&s->pb2);
3737 score+= get_bit_count(&s->tex_pb);
3740 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
3741 MPV_decode_mb(s, s->block);
3743 score *= s->lambda2;
3744 score += sse_mb(s) << FF_LAMBDA_SHIFT;
3748 memcpy(s->dest, dest_backup, sizeof(s->dest));
3755 copy_context_after_encode(best, s, type);
3759 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
3760 uint32_t *sq = squareTbl + 256;
3765 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
3766 else if(w==8 && h==8)
3767 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
3771 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
3780 static int sse_mb(MpegEncContext *s){
3784 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3785 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3788 return s->dsp.sse[0](NULL, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
3789 +s->dsp.sse[1](NULL, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
3790 +s->dsp.sse[1](NULL, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
3792 return sse(s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
3793 +sse(s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
3794 +sse(s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
3797 static void encode_picture(MpegEncContext *s, int picture_number)
3799 int mb_x, mb_y, pdif = 0;
3802 MpegEncContext best_s, backup_s;
3803 uint8_t bit_buf[2][3000];
3804 uint8_t bit_buf2[2][3000];
3805 uint8_t bit_buf_tex[2][3000];
3806 PutBitContext pb[2], pb2[2], tex_pb[2];
3809 init_put_bits(&pb [i], bit_buf [i], 3000);
3810 init_put_bits(&pb2 [i], bit_buf2 [i], 3000);
3811 init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000);
3814 s->picture_number = picture_number;
3816 /* Reset the average MB variance */
3817 s->current_picture.mb_var_sum = 0;
3818 s->current_picture.mc_mb_var_sum = 0;
3821 /* we need to initialize some time vars before we can encode b-frames */
3822 // RAL: Condition added for MPEG1VIDEO
3823 if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->h263_msmpeg4))
3824 ff_set_mpeg4_time(s, s->picture_number);
3827 s->scene_change_score=0;
3829 s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME ratedistoration
3831 if(s->pict_type==I_TYPE){
3832 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3833 else s->no_rounding=0;
3834 }else if(s->pict_type!=B_TYPE){
3835 if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
3836 s->no_rounding ^= 1;
3839 /* Estimate motion for every MB */
3840 s->mb_intra=0; //for the rate distoration & bit compare functions
3841 if(s->pict_type != I_TYPE){
3842 if(s->pict_type != B_TYPE){
3843 if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){
3845 s->me.dia_size= s->avctx->pre_dia_size;
3847 for(mb_y=s->mb_height-1; mb_y >=0 ; mb_y--) {
3849 for(mb_x=s->mb_width-1; mb_x >=0 ; mb_x--) {
3851 ff_pre_estimate_p_frame_motion(s, mb_x, mb_y);
3858 s->me.dia_size= s->avctx->dia_size;
3859 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3861 s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
3862 s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
3863 s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
3864 s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
3865 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3867 s->block_index[0]+=2;
3868 s->block_index[1]+=2;
3869 s->block_index[2]+=2;
3870 s->block_index[3]+=2;
3872 /* compute motion vector & mb_type and store in context */
3873 if(s->pict_type==B_TYPE)
3874 ff_estimate_b_frame_motion(s, mb_x, mb_y);
3876 ff_estimate_p_frame_motion(s, mb_x, mb_y);
3879 }else /* if(s->pict_type == I_TYPE) */{
3881 for(i=0; i<s->mb_stride*s->mb_height; i++)
3882 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3884 if(!s->fixed_qscale){
3885 /* finding spatial complexity for I-frame rate control */
3886 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3887 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3890 uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
3892 int sum = s->dsp.pix_sum(pix, s->linesize);
3894 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8;
3896 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
3897 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
3898 s->current_picture.mb_var_sum += varc;
3905 if(s->scene_change_score > s->avctx->scenechange_threshold && s->pict_type == P_TYPE){
3906 s->pict_type= I_TYPE;
3907 for(i=0; i<s->mb_stride*s->mb_height; i++)
3908 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3909 //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3913 if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) {
3914 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3916 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3918 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3919 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3920 s->f_code= FFMAX(s->f_code, FFMAX(a,b));
3923 ff_fix_long_p_mvs(s);
3924 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3925 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3928 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3929 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3934 if(s->pict_type==B_TYPE){
3937 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3938 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3939 s->f_code = FFMAX(a, b);
3941 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3942 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3943 s->b_code = FFMAX(a, b);
3945 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3946 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3947 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3948 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3949 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3951 for(dir=0; dir<2; dir++){
3954 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3955 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3956 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3957 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3965 if (!s->fixed_qscale)
3966 s->current_picture.quality = ff_rate_estimate_qscale(s);
3968 if(s->adaptive_quant){
3970 switch(s->codec_id){
3971 case CODEC_ID_MPEG4:
3972 ff_clean_mpeg4_qscales(s);
3975 case CODEC_ID_H263P:
3977 ff_clean_h263_qscales(s);
3982 s->lambda= s->lambda_table[0];
3985 s->lambda= s->current_picture.quality;
3986 //printf("%d %d\n", s->avctx->global_quality, s->current_picture.quality);
3989 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==I_TYPE && !(s->flags & CODEC_FLAG_QSCALE))
3990 s->qscale= 3; //reduce cliping problems
3992 if (s->out_format == FMT_MJPEG) {
3993 /* for mjpeg, we do include qscale in the matrix */
3994 s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0];
3996 int j= s->dsp.idct_permutation[i];
3998 s->intra_matrix[j] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
4000 convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
4001 s->intra_matrix, s->intra_quant_bias, 8, 8);
4004 //FIXME var duplication
4005 s->current_picture.key_frame= s->pict_type == I_TYPE;
4006 s->current_picture.pict_type= s->pict_type;
4008 if(s->current_picture.key_frame)
4009 s->picture_in_gop_number=0;
4011 s->last_bits= get_bit_count(&s->pb);
4012 switch(s->out_format) {
4014 mjpeg_picture_header(s);
4018 if (s->codec_id == CODEC_ID_WMV2)
4019 ff_wmv2_encode_picture_header(s, picture_number);
4020 else if (s->h263_msmpeg4)
4021 msmpeg4_encode_picture_header(s, picture_number);
4022 else if (s->h263_pred)
4023 mpeg4_encode_picture_header(s, picture_number);
4024 else if (s->codec_id == CODEC_ID_RV10)
4025 rv10_encode_picture_header(s, picture_number);
4026 else if (s->codec_id == CODEC_ID_FLV1)
4027 ff_flv_encode_picture_header(s, picture_number);
4029 h263_encode_picture_header(s, picture_number);
4033 mpeg1_encode_picture_header(s, picture_number);
4040 bits= get_bit_count(&s->pb);
4041 s->header_bits= bits - s->last_bits;
4053 /* init last dc values */
4054 /* note: quant matrix value (8) is implied here */
4055 s->last_dc[i] = 128;
4057 s->current_picture_ptr->error[i] = 0;
4060 memset(s->last_mv, 0, sizeof(s->last_mv));
4065 switch(s->codec_id){
4067 case CODEC_ID_H263P:
4069 s->gob_index = ff_h263_get_gob_height(s);
4071 case CODEC_ID_MPEG4:
4072 if(s->partitioned_frame)
4073 ff_mpeg4_init_partitions(s);
4080 s->first_slice_line = 1;
4081 s->ptr_lastgob = s->pb.buf;
4082 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
4086 ff_set_qscale(s, s->qscale);
4087 ff_init_block_index(s);
4089 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
4090 const int xy= mb_y*s->mb_stride + mb_x;
4091 int mb_type= s->mb_type[xy];
4097 ff_update_block_index(s);
4099 /* write gob / video packet header */
4102 int current_packet_size, is_gob_start;
4104 current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob;
4106 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
4108 switch(s->codec_id){
4110 case CODEC_ID_H263P:
4111 if(!s->h263_slice_structured)
4112 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
4114 case CODEC_ID_MPEG2VIDEO:
4115 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
4116 case CODEC_ID_MPEG1VIDEO:
4117 if(s->mb_skip_run) is_gob_start=0;
4122 if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame){
4123 ff_mpeg4_merge_partitions(s);
4124 ff_mpeg4_init_partitions(s);
4127 if(s->codec_id==CODEC_ID_MPEG4)
4128 ff_mpeg4_stuffing(&s->pb);
4130 align_put_bits(&s->pb);
4131 flush_put_bits(&s->pb);
4133 assert((get_bit_count(&s->pb)&7) == 0);
4134 current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob;
4136 if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
4137 int r= get_bit_count(&s->pb)/8 + s->picture_number + s->codec_id + s->mb_x + s->mb_y;
4138 int d= 100 / s->avctx->error_rate;
4140 current_packet_size=0;
4141 #ifndef ALT_BITSTREAM_WRITER
4142 s->pb.buf_ptr= s->ptr_lastgob;
4144 assert(pbBufPtr(&s->pb) == s->ptr_lastgob);
4148 if (s->avctx->rtp_callback)
4149 s->avctx->rtp_callback(s->ptr_lastgob, current_packet_size, 0);
4151 switch(s->codec_id){
4152 case CODEC_ID_MPEG4:
4153 ff_mpeg4_encode_video_packet_header(s);
4154 ff_mpeg4_clean_buffers(s);
4156 case CODEC_ID_MPEG1VIDEO:
4157 case CODEC_ID_MPEG2VIDEO:
4158 ff_mpeg1_encode_slice_header(s);
4159 ff_mpeg1_clean_buffers(s);
4162 case CODEC_ID_H263P:
4163 h263_encode_gob_header(s, mb_y);
4167 if(s->flags&CODEC_FLAG_PASS1){
4168 int bits= get_bit_count(&s->pb);
4169 s->misc_bits+= bits - s->last_bits;
4173 s->ptr_lastgob += current_packet_size;
4174 s->first_slice_line=1;
4175 s->resync_mb_x=mb_x;
4176 s->resync_mb_y=mb_y;
4181 if( (s->resync_mb_x == s->mb_x)
4182 && s->resync_mb_y+1 == s->mb_y){
4183 s->first_slice_line=0;
4187 s->dquant=0; //only for QP_RD
4189 if(mb_type & (mb_type-1) || (s->flags & CODEC_FLAG_QP_RD)){ // more than 1 MB type possible
4191 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
4193 copy_context_before_encode(&backup_s, s, -1);
4195 best_s.data_partitioning= s->data_partitioning;
4196 best_s.partitioned_frame= s->partitioned_frame;
4197 if(s->data_partitioning){
4198 backup_s.pb2= s->pb2;
4199 backup_s.tex_pb= s->tex_pb;
4202 if(mb_type&CANDIDATE_MB_TYPE_INTER){
4203 s->mv_dir = MV_DIR_FORWARD;
4204 s->mv_type = MV_TYPE_16X16;
4206 s->mv[0][0][0] = s->p_mv_table[xy][0];
4207 s->mv[0][0][1] = s->p_mv_table[xy][1];
4208 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
4209 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
4211 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
4212 s->mv_dir = MV_DIR_FORWARD;
4213 s->mv_type = MV_TYPE_FIELD;
4216 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
4217 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
4218 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
4220 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
4221 &dmin, &next_block, 0, 0);
4223 if(mb_type&CANDIDATE_MB_TYPE_SKIPED){
4224 s->mv_dir = MV_DIR_FORWARD;
4225 s->mv_type = MV_TYPE_16X16;
4229 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPED, pb, pb2, tex_pb,
4230 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
4232 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
4233 s->mv_dir = MV_DIR_FORWARD;
4234 s->mv_type = MV_TYPE_8X8;
4237 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
4238 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
4240 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
4241 &dmin, &next_block, 0, 0);
4243 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
4244 s->mv_dir = MV_DIR_FORWARD;
4245 s->mv_type = MV_TYPE_16X16;
4247 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
4248 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
4249 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
4250 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
4252 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
4253 s->mv_dir = MV_DIR_BACKWARD;
4254 s->mv_type = MV_TYPE_16X16;
4256 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
4257 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
4258 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
4259 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
4261 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
4262 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
4263 s->mv_type = MV_TYPE_16X16;
4265 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
4266 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
4267 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
4268 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
4269 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
4270 &dmin, &next_block, 0, 0);
4272 if(mb_type&CANDIDATE_MB_TYPE_DIRECT){
4273 int mx= s->b_direct_mv_table[xy][0];
4274 int my= s->b_direct_mv_table[xy][1];
4276 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
4279 ff_mpeg4_set_direct_mv(s, mx, my);
4281 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
4282 &dmin, &next_block, mx, my);
4284 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
4285 s->mv_dir = MV_DIR_FORWARD;
4286 s->mv_type = MV_TYPE_FIELD;
4289 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
4290 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
4291 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
4293 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
4294 &dmin, &next_block, 0, 0);
4296 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
4297 s->mv_dir = MV_DIR_BACKWARD;
4298 s->mv_type = MV_TYPE_FIELD;
4301 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
4302 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
4303 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
4305 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
4306 &dmin, &next_block, 0, 0);
4308 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
4309 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
4310 s->mv_type = MV_TYPE_FIELD;
4312 for(dir=0; dir<2; dir++){
4314 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
4315 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
4316 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
4319 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
4320 &dmin, &next_block, 0, 0);
4322 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
4324 s->mv_type = MV_TYPE_16X16;
4328 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
4329 &dmin, &next_block, 0, 0);
4330 if(s->h263_pred || s->h263_aic){
4332 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
4334 ff_clean_intra_table_entries(s); //old mode?
4338 if(s->flags & CODEC_FLAG_QP_RD){
4339 if(best_s.mv_type==MV_TYPE_16X16 && !(best_s.mv_dir&MV_DIRECT)){
4340 const int last_qp= backup_s.qscale;
4341 int dquant, dir, qp, dc[6];
4343 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
4345 assert(backup_s.dquant == 0);
4348 s->mv_dir= best_s.mv_dir;
4349 s->mv_type = MV_TYPE_16X16;
4350 s->mb_intra= best_s.mb_intra;
4351 s->mv[0][0][0] = best_s.mv[0][0][0];
4352 s->mv[0][0][1] = best_s.mv[0][0][1];
4353 s->mv[1][0][0] = best_s.mv[1][0][0];
4354 s->mv[1][0][1] = best_s.mv[1][0][1];
4356 dir= s->pict_type == B_TYPE ? 2 : 1;
4357 if(last_qp + dir > s->avctx->qmax) dir= -dir;
4358 for(dquant= dir; dquant<=2 && dquant>=-2; dquant += dir){
4359 qp= last_qp + dquant;
4360 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
4362 backup_s.dquant= dquant;
4365 dc[i]= s->dc_val[0][ s->block_index[i] ];
4366 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16);
4370 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
4371 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
4372 if(best_s.qscale != qp){
4375 s->dc_val[0][ s->block_index[i] ]= dc[i];
4376 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16);
4379 if(dir > 0 && dquant==dir){
4387 s->current_picture.qscale_table[xy]= qp;
4391 copy_context_after_encode(s, &best_s, -1);
4393 pb_bits_count= get_bit_count(&s->pb);
4394 flush_put_bits(&s->pb);
4395 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
4398 if(s->data_partitioning){
4399 pb2_bits_count= get_bit_count(&s->pb2);
4400 flush_put_bits(&s->pb2);
4401 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
4402 s->pb2= backup_s.pb2;
4404 tex_pb_bits_count= get_bit_count(&s->tex_pb);
4405 flush_put_bits(&s->tex_pb);
4406 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
4407 s->tex_pb= backup_s.tex_pb;
4409 s->last_bits= get_bit_count(&s->pb);
4412 if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
4413 ff_h263_update_motion_val(s);
4417 s->dsp.put_pixels_tab[0][0](s->dest[0], s->me.scratchpad , s->linesize ,16);
4418 s->dsp.put_pixels_tab[1][0](s->dest[1], s->me.scratchpad + 16, s->uvlinesize, 8);
4419 s->dsp.put_pixels_tab[1][0](s->dest[2], s->me.scratchpad + 24, s->uvlinesize, 8);
4422 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
4423 MPV_decode_mb(s, s->block);
4425 int motion_x, motion_y;
4426 s->mv_type=MV_TYPE_16X16;
4427 // only one MB-Type possible
4430 case CANDIDATE_MB_TYPE_INTRA:
4433 motion_x= s->mv[0][0][0] = 0;
4434 motion_y= s->mv[0][0][1] = 0;
4436 case CANDIDATE_MB_TYPE_INTER:
4437 s->mv_dir = MV_DIR_FORWARD;
4439 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
4440 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
4442 case CANDIDATE_MB_TYPE_INTER_I:
4443 s->mv_dir = MV_DIR_FORWARD;
4444 s->mv_type = MV_TYPE_FIELD;
4447 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
4448 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
4449 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
4451 motion_x = motion_y = 0;
4453 case CANDIDATE_MB_TYPE_INTER4V:
4454 s->mv_dir = MV_DIR_FORWARD;
4455 s->mv_type = MV_TYPE_8X8;
4458 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
4459 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
4461 motion_x= motion_y= 0;
4463 case CANDIDATE_MB_TYPE_DIRECT:
4464 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
4466 motion_x=s->b_direct_mv_table[xy][0];
4467 motion_y=s->b_direct_mv_table[xy][1];
4469 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
4472 case CANDIDATE_MB_TYPE_BIDIR:
4473 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
4477 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
4478 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
4479 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
4480 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
4482 case CANDIDATE_MB_TYPE_BACKWARD:
4483 s->mv_dir = MV_DIR_BACKWARD;
4485 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
4486 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
4488 case CANDIDATE_MB_TYPE_FORWARD:
4489 s->mv_dir = MV_DIR_FORWARD;
4491 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
4492 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
4493 // printf(" %d %d ", motion_x, motion_y);
4495 case CANDIDATE_MB_TYPE_FORWARD_I:
4496 s->mv_dir = MV_DIR_FORWARD;
4497 s->mv_type = MV_TYPE_FIELD;
4500 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
4501 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
4502 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
4504 motion_x=motion_y=0;
4506 case CANDIDATE_MB_TYPE_BACKWARD_I:
4507 s->mv_dir = MV_DIR_BACKWARD;
4508 s->mv_type = MV_TYPE_FIELD;
4511 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
4512 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
4513 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
4515 motion_x=motion_y=0;
4517 case CANDIDATE_MB_TYPE_BIDIR_I:
4518 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
4519 s->mv_type = MV_TYPE_FIELD;
4521 for(dir=0; dir<2; dir++){
4523 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
4524 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
4525 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
4528 motion_x=motion_y=0;
4531 motion_x=motion_y=0; //gcc warning fix
4532 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
4535 encode_mb(s, motion_x, motion_y);
4537 // RAL: Update last macrobloc type
4538 s->last_mv_dir = s->mv_dir;
4541 if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
4542 ff_h263_update_motion_val(s);
4545 MPV_decode_mb(s, s->block);
4548 /* clean the MV table in IPS frames for direct mode in B frames */
4549 if(s->mb_intra /* && I,P,S_TYPE */){
4550 s->p_mv_table[xy][0]=0;
4551 s->p_mv_table[xy][1]=0;
4554 if(s->flags&CODEC_FLAG_PSNR){
4558 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
4559 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
4561 s->current_picture_ptr->error[0] += sse(
4562 s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
4563 s->dest[0], w, h, s->linesize);
4564 s->current_picture_ptr->error[1] += sse(
4565 s, s->new_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
4566 s->dest[1], w>>1, h>>1, s->uvlinesize);
4567 s->current_picture_ptr->error[2] += sse(
4568 s, s->new_picture .data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
4569 s->dest[2], w>>1, h>>1, s->uvlinesize);
4572 ff_h263_loop_filter(s);
4573 //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, get_bit_count(&s->pb));
4579 if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame)
4580 ff_mpeg4_merge_partitions(s);
4582 if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE)
4583 msmpeg4_encode_ext_header(s);
4585 if(s->codec_id==CODEC_ID_MPEG4)
4586 ff_mpeg4_stuffing(&s->pb);
4589 /* Send the last GOB if RTP */
4590 if (s->avctx->rtp_callback) {
4591 flush_put_bits(&s->pb);
4592 pdif = pbBufPtr(&s->pb) - s->ptr_lastgob;
4593 /* Call the RTP callback to send the last GOB */
4594 s->avctx->rtp_callback(s->ptr_lastgob, pdif, 0);
4598 #endif //CONFIG_ENCODERS
4600 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block){
4601 const int intra= s->mb_intra;
4604 s->dct_count[intra]++;
4606 for(i=0; i<64; i++){
4607 int level= block[i];
4611 s->dct_error_sum[intra][i] += level;
4612 level -= s->dct_offset[intra][i];
4613 if(level<0) level=0;
4615 s->dct_error_sum[intra][i] -= level;
4616 level += s->dct_offset[intra][i];
4617 if(level>0) level=0;
4624 #ifdef CONFIG_ENCODERS
4626 static int dct_quantize_trellis_c(MpegEncContext *s,
4627 DCTELEM *block, int n,
4628 int qscale, int *overflow){
4630 const uint8_t *scantable= s->intra_scantable.scantable;
4631 const uint8_t *perm_scantable= s->intra_scantable.permutated;
4633 unsigned int threshold1, threshold2;
4645 int coeff_count[64];
4646 int qmul, qadd, start_i, last_non_zero, i, dc;
4647 const int esc_length= s->ac_esc_length;
4649 uint8_t * last_length;
4650 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4652 s->dsp.fdct (block);
4654 if(s->dct_error_sum)
4655 s->denoise_dct(s, block);
4657 qadd= ((qscale-1)|1)*8;
4668 /* For AIC we skip quant/dequant of INTRADC */
4673 /* note: block[0] is assumed to be positive */
4674 block[0] = (block[0] + (q >> 1)) / q;
4677 qmat = s->q_intra_matrix[qscale];
4678 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4679 bias= 1<<(QMAT_SHIFT-1);
4680 length = s->intra_ac_vlc_length;
4681 last_length= s->intra_ac_vlc_last_length;
4685 qmat = s->q_inter_matrix[qscale];
4686 length = s->inter_ac_vlc_length;
4687 last_length= s->inter_ac_vlc_last_length;
4691 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4692 threshold2= (threshold1<<1);
4694 for(i=63; i>=start_i; i--) {
4695 const int j = scantable[i];
4696 int level = block[j] * qmat[j];
4698 if(((unsigned)(level+threshold1))>threshold2){
4704 for(i=start_i; i<=last_non_zero; i++) {
4705 const int j = scantable[i];
4706 int level = block[j] * qmat[j];
4708 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4709 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4710 if(((unsigned)(level+threshold1))>threshold2){
4712 level= (bias + level)>>QMAT_SHIFT;
4714 coeff[1][i]= level-1;
4715 // coeff[2][k]= level-2;
4717 level= (bias - level)>>QMAT_SHIFT;
4718 coeff[0][i]= -level;
4719 coeff[1][i]= -level+1;
4720 // coeff[2][k]= -level+2;
4722 coeff_count[i]= FFMIN(level, 2);
4723 assert(coeff_count[i]);
4726 coeff[0][i]= (level>>31)|1;
4731 *overflow= s->max_qcoeff < max; //overflow might have happend
4733 if(last_non_zero < start_i){
4734 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
4735 return last_non_zero;
4738 score_tab[start_i]= 0;
4739 survivor[0]= start_i;
4742 for(i=start_i; i<=last_non_zero; i++){
4744 const int dct_coeff= ABS(block[ scantable[i] ]);
4745 const int zero_distoration= dct_coeff*dct_coeff;
4746 int best_score=256*256*256*120;
4747 for(level_index=0; level_index < coeff_count[i]; level_index++){
4749 int level= coeff[level_index][i];
4750 const int alevel= ABS(level);
4755 if(s->out_format == FMT_H263){
4756 unquant_coeff= alevel*qmul + qadd;
4758 j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
4760 unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
4761 unquant_coeff = (unquant_coeff - 1) | 1;
4763 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
4764 unquant_coeff = (unquant_coeff - 1) | 1;
4769 distoration= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distoration;
4771 if((level&(~127)) == 0){
4772 for(j=survivor_count-1; j>=0; j--){
4773 int run= i - survivor[j];
4774 int score= distoration + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4775 score += score_tab[i-run];
4777 if(score < best_score){
4780 level_tab[i+1]= level-64;
4784 if(s->out_format == FMT_H263){
4785 for(j=survivor_count-1; j>=0; j--){
4786 int run= i - survivor[j];
4787 int score= distoration + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4788 score += score_tab[i-run];
4789 if(score < last_score){
4792 last_level= level-64;
4798 distoration += esc_length*lambda;
4799 for(j=survivor_count-1; j>=0; j--){
4800 int run= i - survivor[j];
4801 int score= distoration + score_tab[i-run];
4803 if(score < best_score){
4806 level_tab[i+1]= level-64;
4810 if(s->out_format == FMT_H263){
4811 for(j=survivor_count-1; j>=0; j--){
4812 int run= i - survivor[j];
4813 int score= distoration + score_tab[i-run];
4814 if(score < last_score){
4817 last_level= level-64;
4825 score_tab[i+1]= best_score;
4827 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
4828 if(last_non_zero <= 27){
4829 for(; survivor_count; survivor_count--){
4830 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4834 for(; survivor_count; survivor_count--){
4835 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4840 survivor[ survivor_count++ ]= i+1;
4843 if(s->out_format != FMT_H263){
4844 last_score= 256*256*256*120;
4845 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4846 int score= score_tab[i];
4847 if(i) score += lambda*2; //FIXME exacter?
4849 if(score < last_score){
4852 last_level= level_tab[i];
4853 last_run= run_tab[i];
4858 s->coded_score[n] = last_score;
4861 last_non_zero= last_i - 1;
4862 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
4864 if(last_non_zero < start_i)
4865 return last_non_zero;
4867 if(last_non_zero == 0 && start_i == 0){
4869 int best_score= dc * dc;
4871 for(i=0; i<coeff_count[0]; i++){
4872 int level= coeff[i][0];
4873 int alevel= ABS(level);
4874 int unquant_coeff, score, distortion;
4876 if(s->out_format == FMT_H263){
4877 unquant_coeff= (alevel*qmul + qadd)>>3;
4879 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
4880 unquant_coeff = (unquant_coeff - 1) | 1;
4882 unquant_coeff = (unquant_coeff + 4) >> 3;
4883 unquant_coeff<<= 3 + 3;
4885 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4887 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4888 else score= distortion + esc_length*lambda;
4890 if(score < best_score){
4892 best_level= level - 64;
4895 block[0]= best_level;
4896 s->coded_score[n] = best_score - dc*dc;
4897 if(best_level == 0) return -1;
4898 else return last_non_zero;
4904 block[ perm_scantable[last_non_zero] ]= last_level;
4907 for(; i>start_i; i -= run_tab[i] + 1){
4908 block[ perm_scantable[i-1] ]= level_tab[i];
4911 return last_non_zero;
4914 static int dct_quantize_c(MpegEncContext *s,
4915 DCTELEM *block, int n,
4916 int qscale, int *overflow)
4918 int i, j, level, last_non_zero, q, start_i;
4920 const uint8_t *scantable= s->intra_scantable.scantable;
4923 unsigned int threshold1, threshold2;
4925 s->dsp.fdct (block);
4927 if(s->dct_error_sum)
4928 s->denoise_dct(s, block);
4938 /* For AIC we skip quant/dequant of INTRADC */
4941 /* note: block[0] is assumed to be positive */
4942 block[0] = (block[0] + (q >> 1)) / q;
4945 qmat = s->q_intra_matrix[qscale];
4946 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4950 qmat = s->q_inter_matrix[qscale];
4951 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4953 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4954 threshold2= (threshold1<<1);
4955 for(i=63;i>=start_i;i--) {
4957 level = block[j] * qmat[j];
4959 if(((unsigned)(level+threshold1))>threshold2){
4966 for(i=start_i; i<=last_non_zero; i++) {
4968 level = block[j] * qmat[j];
4970 // if( bias+level >= (1<<QMAT_SHIFT)
4971 // || bias-level >= (1<<QMAT_SHIFT)){
4972 if(((unsigned)(level+threshold1))>threshold2){
4974 level= (bias + level)>>QMAT_SHIFT;
4977 level= (bias - level)>>QMAT_SHIFT;
4985 *overflow= s->max_qcoeff < max; //overflow might have happend
4987 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4988 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4989 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4991 return last_non_zero;
4994 #endif //CONFIG_ENCODERS
4996 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
4997 DCTELEM *block, int n, int qscale)
4999 int i, level, nCoeffs;
5000 const uint16_t *quant_matrix;
5002 nCoeffs= s->block_last_index[n];
5005 block[0] = block[0] * s->y_dc_scale;
5007 block[0] = block[0] * s->c_dc_scale;
5008 /* XXX: only mpeg1 */
5009 quant_matrix = s->intra_matrix;
5010 for(i=1;i<=nCoeffs;i++) {
5011 int j= s->intra_scantable.permutated[i];
5016 level = (int)(level * qscale * quant_matrix[j]) >> 3;
5017 level = (level - 1) | 1;
5020 level = (int)(level * qscale * quant_matrix[j]) >> 3;
5021 level = (level - 1) | 1;
5028 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
5029 DCTELEM *block, int n, int qscale)
5031 int i, level, nCoeffs;
5032 const uint16_t *quant_matrix;
5034 nCoeffs= s->block_last_index[n];
5036 quant_matrix = s->inter_matrix;
5037 for(i=0; i<=nCoeffs; i++) {
5038 int j= s->intra_scantable.permutated[i];
5043 level = (((level << 1) + 1) * qscale *
5044 ((int) (quant_matrix[j]))) >> 4;
5045 level = (level - 1) | 1;
5048 level = (((level << 1) + 1) * qscale *
5049 ((int) (quant_matrix[j]))) >> 4;
5050 level = (level - 1) | 1;
5057 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
5058 DCTELEM *block, int n, int qscale)
5060 int i, level, nCoeffs;
5061 const uint16_t *quant_matrix;
5063 if(s->alternate_scan) nCoeffs= 63;
5064 else nCoeffs= s->block_last_index[n];
5067 block[0] = block[0] * s->y_dc_scale;
5069 block[0] = block[0] * s->c_dc_scale;
5070 quant_matrix = s->intra_matrix;
5071 for(i=1;i<=nCoeffs;i++) {
5072 int j= s->intra_scantable.permutated[i];
5077 level = (int)(level * qscale * quant_matrix[j]) >> 3;
5080 level = (int)(level * qscale * quant_matrix[j]) >> 3;
5087 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
5088 DCTELEM *block, int n, int qscale)
5090 int i, level, nCoeffs;
5091 const uint16_t *quant_matrix;
5094 if(s->alternate_scan) nCoeffs= 63;
5095 else nCoeffs= s->block_last_index[n];
5097 quant_matrix = s->inter_matrix;
5098 for(i=0; i<=nCoeffs; i++) {
5099 int j= s->intra_scantable.permutated[i];
5104 level = (((level << 1) + 1) * qscale *
5105 ((int) (quant_matrix[j]))) >> 4;
5108 level = (((level << 1) + 1) * qscale *
5109 ((int) (quant_matrix[j]))) >> 4;
5118 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
5119 DCTELEM *block, int n, int qscale)
5121 int i, level, qmul, qadd;
5124 assert(s->block_last_index[n]>=0);
5130 block[0] = block[0] * s->y_dc_scale;
5132 block[0] = block[0] * s->c_dc_scale;
5133 qadd = (qscale - 1) | 1;
5140 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
5142 for(i=1; i<=nCoeffs; i++) {
5146 level = level * qmul - qadd;
5148 level = level * qmul + qadd;
5155 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
5156 DCTELEM *block, int n, int qscale)
5158 int i, level, qmul, qadd;
5161 assert(s->block_last_index[n]>=0);
5163 qadd = (qscale - 1) | 1;
5166 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
5168 for(i=0; i<=nCoeffs; i++) {
5172 level = level * qmul - qadd;
5174 level = level * qmul + qadd;
5181 static const AVOption mpeg4_options[] =
5183 AVOPTION_CODEC_INT("bitrate", "desired video bitrate", bit_rate, 4, 240000000, 800000),
5184 AVOPTION_CODEC_INT("ratetol", "number of bits the bitstream is allowed to diverge from the reference"
5185 "the reference can be CBR (for CBR pass1) or VBR (for pass2)",
5186 bit_rate_tolerance, 4, 240000000, 8000),
5187 AVOPTION_CODEC_INT("qmin", "minimum quantizer", qmin, 1, 31, 2),
5188 AVOPTION_CODEC_INT("qmax", "maximum quantizer", qmax, 1, 31, 31),
5189 AVOPTION_CODEC_STRING("rc_eq", "rate control equation",
5190 rc_eq, "tex^qComp,option1,options2", 0),
5191 AVOPTION_CODEC_INT("rc_minrate", "rate control minimum bitrate",
5192 rc_min_rate, 4, 24000000, 0),
5193 AVOPTION_CODEC_INT("rc_maxrate", "rate control maximum bitrate",
5194 rc_max_rate, 4, 24000000, 0),
5195 AVOPTION_CODEC_DOUBLE("rc_buf_aggresivity", "rate control buffer aggresivity",
5196 rc_buffer_aggressivity, 4, 24000000, 0),
5197 AVOPTION_CODEC_DOUBLE("rc_initial_cplx", "initial complexity for pass1 ratecontrol",
5198 rc_initial_cplx, 0., 9999999., 0),
5199 AVOPTION_CODEC_DOUBLE("i_quant_factor", "qscale factor between p and i frames",
5200 i_quant_factor, 0., 0., 0),
5201 AVOPTION_CODEC_DOUBLE("i_quant_offset", "qscale offset between p and i frames",
5202 i_quant_factor, -999999., 999999., 0),
5203 AVOPTION_CODEC_INT("dct_algo", "dct alghorithm",
5204 dct_algo, 0, 5, 0), // fixme - "Auto,FastInt,Int,MMX,MLib,Altivec"
5205 AVOPTION_CODEC_DOUBLE("lumi_masking", "luminance masking",
5206 lumi_masking, 0., 999999., 0),
5207 AVOPTION_CODEC_DOUBLE("temporal_cplx_masking", "temporary complexity masking",
5208 temporal_cplx_masking, 0., 999999., 0),
5209 AVOPTION_CODEC_DOUBLE("spatial_cplx_masking", "spatial complexity masking",
5210 spatial_cplx_masking, 0., 999999., 0),
5211 AVOPTION_CODEC_DOUBLE("p_masking", "p block masking",
5212 p_masking, 0., 999999., 0),
5213 AVOPTION_CODEC_DOUBLE("dark_masking", "darkness masking",
5214 dark_masking, 0., 999999., 0),
5215 AVOPTION_CODEC_INT("idct_algo", "idct alghorithm",
5216 idct_algo, 0, 8, 0), // fixme - "Auto,Int,Simple,SimpleMMX,LibMPEG2MMX,PS2,MLib,ARM,Altivec"
5218 AVOPTION_CODEC_INT("mb_qmin", "minimum MB quantizer",
5220 AVOPTION_CODEC_INT("mb_qmax", "maximum MB quantizer",
5223 AVOPTION_CODEC_INT("me_cmp", "ME compare function",
5224 me_cmp, 0, 24000000, 0),
5225 AVOPTION_CODEC_INT("me_sub_cmp", "subpixel ME compare function",
5226 me_sub_cmp, 0, 24000000, 0),
5229 AVOPTION_CODEC_INT("dia_size", "ME diamond size & shape",
5230 dia_size, 0, 24000000, 0),
5231 AVOPTION_CODEC_INT("last_predictor_count", "amount of previous MV predictors",
5232 last_predictor_count, 0, 24000000, 0),
5234 AVOPTION_CODEC_INT("pre_me", "pre pass for ME",
5235 pre_me, 0, 24000000, 0),
5236 AVOPTION_CODEC_INT("me_pre_cmp", "ME pre pass compare function",
5237 me_pre_cmp, 0, 24000000, 0),
5239 AVOPTION_CODEC_INT("me_range", "maximum ME search range",
5240 me_range, 0, 24000000, 0),
5241 AVOPTION_CODEC_INT("pre_dia_size", "ME pre pass diamod size & shape",
5242 pre_dia_size, 0, 24000000, 0),
5243 AVOPTION_CODEC_INT("me_subpel_quality", "subpel ME quality",
5244 me_subpel_quality, 0, 24000000, 0),
5245 AVOPTION_CODEC_INT("me_range", "maximum ME search range",
5246 me_range, 0, 24000000, 0),
5247 AVOPTION_CODEC_FLAG("psnr", "calculate PSNR of compressed frames",
5248 flags, CODEC_FLAG_PSNR, 0),
5249 AVOPTION_CODEC_RCOVERRIDE("rc_override", "ratecontrol override (=startframe,endframe,qscale,quality_factor)",
5251 AVOPTION_SUB(avoptions_common),
5255 #ifdef CONFIG_ENCODERS
5257 AVCodec h263_encoder = {
5261 sizeof(MpegEncContext),
5267 AVCodec h263p_encoder = {
5271 sizeof(MpegEncContext),
5277 AVCodec flv_encoder = {
5281 sizeof(MpegEncContext),
5287 AVCodec rv10_encoder = {
5291 sizeof(MpegEncContext),
5297 AVCodec mpeg4_encoder = {
5301 sizeof(MpegEncContext),
5305 .options = mpeg4_options,
5308 AVCodec msmpeg4v1_encoder = {
5312 sizeof(MpegEncContext),
5316 .options = mpeg4_options,
5319 AVCodec msmpeg4v2_encoder = {
5323 sizeof(MpegEncContext),
5327 .options = mpeg4_options,
5330 AVCodec msmpeg4v3_encoder = {
5334 sizeof(MpegEncContext),
5338 .options = mpeg4_options,
5341 AVCodec wmv1_encoder = {
5345 sizeof(MpegEncContext),
5349 .options = mpeg4_options,
5354 AVCodec mjpeg_encoder = {
5358 sizeof(MpegEncContext),
5364 #endif //CONFIG_ENCODERS