2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard.
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
25 #include "mpegvideo.h"
28 #include "fastmemcpy.h"
31 static void encode_picture(MpegEncContext *s, int picture_number);
32 static void dct_unquantize_mpeg1_c(MpegEncContext *s,
33 DCTELEM *block, int n, int qscale);
34 static void dct_unquantize_mpeg2_c(MpegEncContext *s,
35 DCTELEM *block, int n, int qscale);
36 static void dct_unquantize_h263_c(MpegEncContext *s,
37 DCTELEM *block, int n, int qscale);
38 static void draw_edges_c(UINT8 *buf, int wrap, int width, int height, int w);
39 static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
41 void (*draw_edges)(UINT8 *buf, int wrap, int width, int height, int w)= draw_edges_c;
42 static void emulated_edge_mc(MpegEncContext *s, UINT8 *src, int linesize, int block_w, int block_h,
43 int src_x, int src_y, int w, int h);
47 /* enable all paranoid tests for rounding, overflows, etc... */
53 /* for jpeg fast DCT */
56 static const unsigned short aanscales[64] = {
57 /* precomputed values scaled up by 14 bits */
58 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
59 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270,
60 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906,
61 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315,
62 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
63 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552,
64 8867, 12299, 11585, 10426, 8867, 6967, 4799, 2446,
65 4520, 6270, 5906, 5315, 4520, 3552, 2446, 1247
68 static UINT8 h263_chroma_roundtab[16] = {
69 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
72 static UINT16 default_mv_penalty[MAX_FCODE+1][MAX_MV*2+1];
73 static UINT8 default_fcode_tab[MAX_MV*2+1];
75 extern UINT8 zigzag_end[64];
77 /* default motion estimation */
78 int motion_estimation_method = ME_EPZS;
80 static void convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16)[64], uint16_t (*qmat16_bias)[64],
81 const UINT16 *quant_matrix, int bias)
85 for(qscale=1; qscale<32; qscale++){
87 if (s->fdct == ff_jpeg_fdct_islow) {
89 const int j= block_permute_op(i);
90 /* 16 <= qscale * quant_matrix[i] <= 7905 */
91 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
92 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
93 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
95 qmat[qscale][j] = (int)((UINT64_C(1) << QMAT_SHIFT) /
96 (qscale * quant_matrix[j]));
98 } else if (s->fdct == fdct_ifast) {
100 const int j= block_permute_op(i);
101 /* 16 <= qscale * quant_matrix[i] <= 7905 */
102 /* 19952 <= aanscales[i] * qscale * quant_matrix[i] <= 249205026 */
103 /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
104 /* 3444240 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
106 qmat[qscale][j] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
107 (aanscales[i] * qscale * quant_matrix[j]));
111 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
112 So 16 <= qscale * quant_matrix[i] <= 7905
113 so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
114 so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67
116 qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
117 qmat16[qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[block_permute_op(i)]);
119 if(qmat16[qscale][i]==0 || qmat16[qscale][i]==128*256) qmat16[qscale][i]=128*256-1;
120 qmat16_bias[qscale][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][i]);
125 // move into common.c perhaps
126 #define CHECKED_ALLOCZ(p, size)\
128 p= av_mallocz(size);\
135 /* init common structure for both encoder and decoder */
136 int MPV_common_init(MpegEncContext *s)
141 s->dct_unquantize_h263 = dct_unquantize_h263_c;
142 s->dct_unquantize_mpeg1 = dct_unquantize_mpeg1_c;
143 s->dct_unquantize_mpeg2 = dct_unquantize_mpeg2_c;
144 s->dct_quantize= dct_quantize_c;
146 if(s->avctx->dct_algo==FF_DCT_FASTINT)
147 s->fdct = fdct_ifast;
149 s->fdct = ff_jpeg_fdct_islow;
152 MPV_common_init_mmx(s);
155 MPV_common_init_axp(s);
158 MPV_common_init_mlib(s);
161 s->mb_width = (s->width + 15) / 16;
162 s->mb_height = (s->height + 15) / 16;
164 /* set default edge pos, will be overriden in decode_header if needed */
165 s->h_edge_pos= s->mb_width*16;
166 s->v_edge_pos= s->mb_height*16;
168 /* convert fourcc to upper case */
169 s->avctx->fourcc= toupper( s->avctx->fourcc &0xFF)
170 + (toupper((s->avctx->fourcc>>8 )&0xFF)<<8 )
171 + (toupper((s->avctx->fourcc>>16)&0xFF)<<16)
172 + (toupper((s->avctx->fourcc>>24)&0xFF)<<24);
174 s->mb_num = s->mb_width * s->mb_height;
175 if(!(s->flags&CODEC_FLAG_DR1)){
176 s->linesize = s->mb_width * 16 + 2 * EDGE_WIDTH;
177 s->uvlinesize = s->mb_width * 8 + EDGE_WIDTH;
180 int w, h, shift, pict_start;
183 h = s->mb_height * 16 + 2 * EDGE_WIDTH;
184 shift = (i == 0) ? 0 : 1;
185 c_size = (s->linesize>>shift) * (h >> shift);
186 pict_start = (s->linesize>>shift) * (EDGE_WIDTH >> shift) + (EDGE_WIDTH >> shift);
188 CHECKED_ALLOCZ(pict, c_size)
189 s->last_picture_base[i] = pict;
190 s->last_picture[i] = pict + pict_start;
191 if(i>0) memset(s->last_picture_base[i], 128, c_size);
193 CHECKED_ALLOCZ(pict, c_size)
194 s->next_picture_base[i] = pict;
195 s->next_picture[i] = pict + pict_start;
196 if(i>0) memset(s->next_picture_base[i], 128, c_size);
198 if (s->has_b_frames || s->codec_id==CODEC_ID_MPEG4) {
199 /* Note the MPEG4 stuff is here cuz of buggy encoders which dont set the low_delay flag but
200 do low-delay encoding, so we cant allways distinguish b-frame containing streams from low_delay streams */
201 CHECKED_ALLOCZ(pict, c_size)
202 s->aux_picture_base[i] = pict;
203 s->aux_picture[i] = pict + pict_start;
204 if(i>0) memset(s->aux_picture_base[i], 128, c_size);
207 s->ip_buffer_count= 2;
210 CHECKED_ALLOCZ(s->edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
214 int mv_table_size= (s->mb_width+2)*(s->mb_height+2);
216 CHECKED_ALLOCZ(s->mb_var , s->mb_num * sizeof(INT16))
217 CHECKED_ALLOCZ(s->mc_mb_var, s->mb_num * sizeof(INT16))
218 CHECKED_ALLOCZ(s->mb_mean , s->mb_num * sizeof(INT8))
220 /* Allocate MV tables */
221 CHECKED_ALLOCZ(s->p_mv_table , mv_table_size * 2 * sizeof(INT16))
222 CHECKED_ALLOCZ(s->b_forw_mv_table , mv_table_size * 2 * sizeof(INT16))
223 CHECKED_ALLOCZ(s->b_back_mv_table , mv_table_size * 2 * sizeof(INT16))
224 CHECKED_ALLOCZ(s->b_bidir_forw_mv_table , mv_table_size * 2 * sizeof(INT16))
225 CHECKED_ALLOCZ(s->b_bidir_back_mv_table , mv_table_size * 2 * sizeof(INT16))
226 CHECKED_ALLOCZ(s->b_direct_forw_mv_table, mv_table_size * 2 * sizeof(INT16))
227 CHECKED_ALLOCZ(s->b_direct_back_mv_table, mv_table_size * 2 * sizeof(INT16))
228 CHECKED_ALLOCZ(s->b_direct_mv_table , mv_table_size * 2 * sizeof(INT16))
230 CHECKED_ALLOCZ(s->me_scratchpad, s->linesize*16*3*sizeof(uint8_t))
232 CHECKED_ALLOCZ(s->me_map , ME_MAP_SIZE*sizeof(uint32_t))
233 CHECKED_ALLOCZ(s->me_score_map, ME_MAP_SIZE*sizeof(uint16_t))
236 for(j=0; j<REORDER_BUFFER_SIZE; j++){
242 h = s->mb_height * 16;
243 shift = (i == 0) ? 0 : 1;
244 c_size = (w >> shift) * (h >> shift);
246 CHECKED_ALLOCZ(pict, c_size);
247 s->picture_buffer[j][i] = pict;
252 if(s->codec_id==CODEC_ID_MPEG4){
253 CHECKED_ALLOCZ(s->tex_pb_buffer, PB_BUFFER_SIZE);
254 CHECKED_ALLOCZ( s->pb2_buffer, PB_BUFFER_SIZE);
257 if(s->msmpeg4_version){
258 CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
260 CHECKED_ALLOCZ(s->avctx->stats_out, 256);
263 if (s->out_format == FMT_H263 || s->encoding) {
265 /* Allocate MB type table */
266 CHECKED_ALLOCZ(s->mb_type , s->mb_num * sizeof(UINT8))
269 size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
270 CHECKED_ALLOCZ(s->motion_val, size * 2 * sizeof(INT16));
273 if(s->codec_id==CODEC_ID_MPEG4){
274 /* 4mv and interlaced direct mode decoding tables */
275 CHECKED_ALLOCZ(s->co_located_type_table, s->mb_num * sizeof(UINT8))
276 CHECKED_ALLOCZ(s->field_mv_table, s->mb_num*2*2 * sizeof(INT16))
277 CHECKED_ALLOCZ(s->field_select_table, s->mb_num*2* sizeof(INT8))
280 if (s->h263_pred || s->h263_plus) {
281 int y_size, c_size, i, size;
285 y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
286 c_size = (s->mb_width + 2) * (s->mb_height + 2);
287 size = y_size + 2 * c_size;
288 CHECKED_ALLOCZ(s->dc_val[0], size * sizeof(INT16));
289 s->dc_val[1] = s->dc_val[0] + y_size;
290 s->dc_val[2] = s->dc_val[1] + c_size;
292 s->dc_val[0][i] = 1024;
295 CHECKED_ALLOCZ(s->ac_val[0], size * sizeof(INT16) * 16);
296 s->ac_val[1] = s->ac_val[0] + y_size;
297 s->ac_val[2] = s->ac_val[1] + c_size;
300 CHECKED_ALLOCZ(s->coded_block, y_size);
302 /* divx501 bitstream reorder buffer */
303 CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE);
305 /* cbp, ac_pred, pred_dir */
306 CHECKED_ALLOCZ(s->cbp_table , s->mb_num * sizeof(UINT8))
307 CHECKED_ALLOCZ(s->pred_dir_table, s->mb_num * sizeof(UINT8))
309 CHECKED_ALLOCZ(s->qscale_table , s->mb_num * sizeof(UINT8))
311 /* which mb is a intra block */
312 CHECKED_ALLOCZ(s->mbintra_table, s->mb_num);
313 memset(s->mbintra_table, 1, s->mb_num);
315 /* default structure is frame */
316 s->picture_structure = PICT_FRAME;
318 /* init macroblock skip table */
319 CHECKED_ALLOCZ(s->mbskip_table, s->mb_num);
321 s->block= s->blocks[0];
323 s->context_initialized = 1;
333 /* init common structure for both encoder and decoder */
334 void MPV_common_end(MpegEncContext *s)
338 av_freep(&s->mb_type);
339 av_freep(&s->mb_var);
340 av_freep(&s->mc_mb_var);
341 av_freep(&s->mb_mean);
342 av_freep(&s->p_mv_table);
343 av_freep(&s->b_forw_mv_table);
344 av_freep(&s->b_back_mv_table);
345 av_freep(&s->b_bidir_forw_mv_table);
346 av_freep(&s->b_bidir_back_mv_table);
347 av_freep(&s->b_direct_forw_mv_table);
348 av_freep(&s->b_direct_back_mv_table);
349 av_freep(&s->b_direct_mv_table);
350 av_freep(&s->motion_val);
351 av_freep(&s->dc_val[0]);
352 av_freep(&s->ac_val[0]);
353 av_freep(&s->coded_block);
354 av_freep(&s->mbintra_table);
355 av_freep(&s->cbp_table);
356 av_freep(&s->pred_dir_table);
357 av_freep(&s->qscale_table);
358 av_freep(&s->me_scratchpad);
359 av_freep(&s->me_map);
360 av_freep(&s->me_score_map);
362 av_freep(&s->mbskip_table);
363 av_freep(&s->bitstream_buffer);
364 av_freep(&s->tex_pb_buffer);
365 av_freep(&s->pb2_buffer);
366 av_freep(&s->edge_emu_buffer);
367 av_freep(&s->co_located_type_table);
368 av_freep(&s->field_mv_table);
369 av_freep(&s->field_select_table);
370 av_freep(&s->avctx->stats_out);
371 av_freep(&s->ac_stats);
375 if(!(s->flags&CODEC_FLAG_DR1)){
376 av_freep(&s->last_picture_base[i]);
377 av_freep(&s->next_picture_base[i]);
378 av_freep(&s->aux_picture_base[i]);
380 s->last_picture_base[i]=
381 s->next_picture_base[i]=
382 s->aux_picture_base [i] = NULL;
385 s->aux_picture [i] = NULL;
387 for(j=0; j<REORDER_BUFFER_SIZE; j++){
388 av_freep(&s->picture_buffer[j][i]);
391 s->context_initialized = 0;
394 /* init video encoder */
395 int MPV_encode_init(AVCodecContext *avctx)
397 MpegEncContext *s = avctx->priv_data;
400 avctx->pix_fmt = PIX_FMT_YUV420P;
402 s->bit_rate = avctx->bit_rate;
403 s->bit_rate_tolerance = avctx->bit_rate_tolerance;
404 s->frame_rate = avctx->frame_rate;
405 s->width = avctx->width;
406 s->height = avctx->height;
407 if(avctx->gop_size > 600){
408 fprintf(stderr, "Warning keyframe interval too large! reducing it ...\n");
411 s->gop_size = avctx->gop_size;
412 s->rtp_mode = avctx->rtp_mode;
413 s->rtp_payload_size = avctx->rtp_payload_size;
414 if (avctx->rtp_callback)
415 s->rtp_callback = avctx->rtp_callback;
416 s->qmin= avctx->qmin;
417 s->qmax= avctx->qmax;
418 s->max_qdiff= avctx->max_qdiff;
419 s->qcompress= avctx->qcompress;
420 s->qblur= avctx->qblur;
422 s->aspect_ratio_info= avctx->aspect_ratio_info;
423 if (avctx->aspect_ratio_info == FF_ASPECT_EXTENDED)
425 s->aspected_width = avctx->aspected_width;
426 s->aspected_height = avctx->aspected_height;
428 s->flags= avctx->flags;
429 s->max_b_frames= avctx->max_b_frames;
430 s->b_frame_strategy= avctx->b_frame_strategy;
431 s->codec_id= avctx->codec->id;
432 s->luma_elim_threshold = avctx->luma_elim_threshold;
433 s->chroma_elim_threshold= avctx->chroma_elim_threshold;
434 s->strict_std_compliance= avctx->strict_std_compliance;
435 s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
436 s->mpeg_quant= avctx->mpeg_quant;
438 if (s->gop_size <= 1) {
446 if (avctx->me_method == 0)
447 /* For compatibility */
448 s->me_method = motion_estimation_method;
450 s->me_method = avctx->me_method;
453 s->fixed_qscale = (avctx->flags & CODEC_FLAG_QSCALE);
455 s->adaptive_quant= ( s->avctx->lumi_masking
456 || s->avctx->dark_masking
457 || s->avctx->temporal_cplx_masking
458 || s->avctx->spatial_cplx_masking
459 || s->avctx->p_masking)
462 s->progressive_sequence= !(avctx->flags & CODEC_FLAG_INTERLACED_DCT);
464 switch(avctx->codec->id) {
465 case CODEC_ID_MPEG1VIDEO:
466 s->out_format = FMT_MPEG1;
467 avctx->delay=0; //FIXME not sure, should check the spec
470 s->out_format = FMT_MJPEG;
471 s->intra_only = 1; /* force intra only for jpeg */
472 s->mjpeg_write_tables = 1; /* write all tables */
473 s->mjpeg_data_only_frames = 0; /* write all the needed headers */
474 s->mjpeg_vsample[0] = 2; /* set up default sampling factors */
475 s->mjpeg_vsample[1] = 1; /* the only currently supported values */
476 s->mjpeg_vsample[2] = 1;
477 s->mjpeg_hsample[0] = 2;
478 s->mjpeg_hsample[1] = 1;
479 s->mjpeg_hsample[2] = 1;
480 if (mjpeg_init(s) < 0)
485 if (h263_get_picture_format(s->width, s->height) == 7) {
486 printf("Input picture size isn't suitable for h263 codec! try h263+\n");
489 s->out_format = FMT_H263;
493 s->out_format = FMT_H263;
495 s->rtp_payload_size = 1200;
497 s->unrestricted_mv = 1;
500 /* These are just to be sure */
506 s->out_format = FMT_H263;
511 s->out_format = FMT_H263;
513 s->unrestricted_mv = 1;
514 s->has_b_frames= s->max_b_frames ? 1 : 0;
516 avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
518 case CODEC_ID_MSMPEG4V1:
519 s->out_format = FMT_H263;
522 s->unrestricted_mv = 1;
523 s->msmpeg4_version= 1;
526 case CODEC_ID_MSMPEG4V2:
527 s->out_format = FMT_H263;
530 s->unrestricted_mv = 1;
531 s->msmpeg4_version= 2;
534 case CODEC_ID_MSMPEG4V3:
535 s->out_format = FMT_H263;
538 s->unrestricted_mv = 1;
539 s->msmpeg4_version= 3;
543 s->out_format = FMT_H263;
546 s->unrestricted_mv = 1;
547 s->msmpeg4_version= 4;
551 s->out_format = FMT_H263;
554 s->unrestricted_mv = 1;
555 s->msmpeg4_version= 5;
562 { /* set up some save defaults, some codecs might override them later */
567 memset(default_mv_penalty, 0, sizeof(UINT16)*(MAX_FCODE+1)*(2*MAX_MV+1));
568 memset(default_fcode_tab , 0, sizeof(UINT8)*(2*MAX_MV+1));
570 for(i=-16; i<16; i++){
571 default_fcode_tab[i + MAX_MV]= 1;
575 s->mv_penalty= default_mv_penalty;
576 s->fcode_tab= default_fcode_tab;
578 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
580 if (s->out_format == FMT_H263)
582 else if (s->out_format == FMT_MPEG1)
583 ff_mpeg1_encode_init(s);
584 if(s->msmpeg4_version)
585 ff_msmpeg4_encode_init(s);
587 /* dont use mv_penalty table for crap MV as it would be confused */
588 if (s->me_method < ME_EPZS) s->mv_penalty = default_mv_penalty;
593 if (MPV_common_init(s) < 0)
596 /* init default q matrix */
598 if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
599 s->intra_matrix[i] = ff_mpeg4_default_intra_matrix[i];
600 s->inter_matrix[i] = ff_mpeg4_default_non_intra_matrix[i];
601 }else if(s->out_format == FMT_H263){
603 s->inter_matrix[i] = ff_mpeg1_default_non_intra_matrix[i];
605 s->intra_matrix[i] = ff_mpeg1_default_intra_matrix[i];
606 s->inter_matrix[i] = ff_mpeg1_default_non_intra_matrix[i];
610 /* precompute matrix */
611 /* for mjpeg, we do include qscale in the matrix */
612 if (s->out_format != FMT_MJPEG) {
613 convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, s->q_intra_matrix16_bias,
614 s->intra_matrix, s->intra_quant_bias);
615 convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16, s->q_inter_matrix16_bias,
616 s->inter_matrix, s->inter_quant_bias);
619 if(ff_rate_control_init(s) < 0)
622 s->picture_number = 0;
623 s->picture_in_gop_number = 0;
624 s->fake_picture_number = 0;
625 /* motion detector init */
632 int MPV_encode_end(AVCodecContext *avctx)
634 MpegEncContext *s = avctx->priv_data;
640 ff_rate_control_uninit(s);
643 if (s->out_format == FMT_MJPEG)
649 /* draw the edges of width 'w' of an image of size width, height */
650 //FIXME check that this is ok for mpeg4 interlaced
651 static void draw_edges_c(UINT8 *buf, int wrap, int width, int height, int w)
653 UINT8 *ptr, *last_line;
656 last_line = buf + (height - 1) * wrap;
659 memcpy(buf - (i + 1) * wrap, buf, width);
660 memcpy(last_line + (i + 1) * wrap, last_line, width);
664 for(i=0;i<height;i++) {
665 memset(ptr - w, ptr[0], w);
666 memset(ptr + width, ptr[width-1], w);
671 memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
672 memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
673 memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
674 memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
678 /* generic function for encode/decode called before a frame is coded/decoded */
679 void MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
686 avctx->mbskip_table= s->mbskip_table;
688 if(avctx->flags&CODEC_FLAG_DR1){
689 avctx->get_buffer_callback(avctx, s->width, s->height, s->pict_type);
691 s->linesize = avctx->dr_stride;
692 s->uvlinesize= avctx->dr_uvstride;
693 s->ip_buffer_count= avctx->dr_ip_buffer_count;
695 avctx->dr_ip_buffer_count= s->ip_buffer_count;
697 if (s->pict_type == B_TYPE) {
699 if(avctx->flags&CODEC_FLAG_DR1)
700 s->aux_picture[i]= avctx->dr_buffer[i];
702 //FIXME the following should never be needed, the decoder should drop b frames if no reference is available
703 if(s->next_picture[i]==NULL)
704 s->next_picture[i]= s->aux_picture[i];
705 if(s->last_picture[i]==NULL)
706 s->last_picture[i]= s->next_picture[i];
708 s->current_picture[i] = s->aux_picture[i];
712 /* swap next and last */
713 if(avctx->flags&CODEC_FLAG_DR1)
714 tmp= avctx->dr_buffer[i];
716 tmp = s->last_picture[i];
718 s->last_picture[i] = s->next_picture[i];
719 s->next_picture[i] = tmp;
720 s->current_picture[i] = tmp;
722 if(s->last_picture[i]==NULL)
723 s->last_picture[i]= s->next_picture[i];
725 s->last_dr_opaque= s->next_dr_opaque;
726 s->next_dr_opaque= avctx->dr_opaque_frame;
728 if(s->has_b_frames && s->last_dr_opaque && s->codec_id!=CODEC_ID_SVQ1)
729 avctx->dr_opaque_frame= s->last_dr_opaque;
731 avctx->dr_opaque_frame= s->next_dr_opaque;
734 /* set dequantizer, we cant do it during init as it might change for mpeg4
735 and we cant do it in the header decode as init isnt called for mpeg4 there yet */
736 if(s->out_format == FMT_H263){
738 s->dct_unquantize = s->dct_unquantize_mpeg2;
740 s->dct_unquantize = s->dct_unquantize_h263;
742 s->dct_unquantize = s->dct_unquantize_mpeg1;
745 /* generic function for encode/decode called after a frame has been coded/decoded */
746 void MPV_frame_end(MpegEncContext *s)
748 s->avctx->key_frame = (s->pict_type == I_TYPE);
749 s->avctx->pict_type = s->pict_type;
751 /* draw edge for correct motion prediction if outside */
752 if (s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
753 draw_edges(s->current_picture[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
754 draw_edges(s->current_picture[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
755 draw_edges(s->current_picture[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
759 s->last_pict_type = s->pict_type;
760 if(s->pict_type!=B_TYPE){
761 s->last_non_b_pict_type= s->pict_type;
762 s->num_available_buffers++;
763 if(s->num_available_buffers>2) s->num_available_buffers= 2;
767 /* reorder input for encoding */
768 void reorder_input(MpegEncContext *s, AVPicture *pict)
772 if(s->max_b_frames > FF_MAX_B_FRAMES) s->max_b_frames= FF_MAX_B_FRAMES;
774 // delay= s->max_b_frames+1; (or 0 if no b frames cuz decoder diff)
776 for(j=0; j<REORDER_BUFFER_SIZE-1; j++){
777 s->coded_order[j]= s->coded_order[j+1];
779 s->coded_order[j].picture[0]= s->coded_order[j].picture[1]= s->coded_order[j].picture[2]= NULL; //catch uninitalized buffers
780 s->coded_order[j].pict_type=0;
782 switch(s->input_pict_type){
787 index= s->max_b_frames - s->b_frames_since_non_b;
788 s->b_frames_since_non_b=0;
791 index= s->max_b_frames + 1;
792 s->b_frames_since_non_b++;
795 //printf("index:%d type:%d strides: %d %d\n", index, s->input_pict_type, pict->linesize[0], s->linesize);
796 if( (index==0 || (s->flags&CODEC_FLAG_INPUT_PRESERVED))
797 && pict->linesize[0] == s->linesize
798 && pict->linesize[1] == s->uvlinesize
799 && pict->linesize[2] == s->uvlinesize){
802 s->coded_order[index].picture[i]= pict->data[i];
807 uint8_t *src = pict->data[i];
809 int src_wrap = pict->linesize[i];
810 int dest_wrap = s->linesize;
814 if(index==0) dest= s->last_picture[i]+16; //is current_picture indeed but the switch hapens after reordering
815 else dest= s->picture_buffer[s->picture_buffer_index][i];
823 s->coded_order[index].picture[i]= dest;
825 memcpy(dest, src, w);
831 s->picture_buffer_index++;
832 if(s->picture_buffer_index >= REORDER_BUFFER_SIZE) s->picture_buffer_index=0;
835 s->coded_order[index].pict_type = s->input_pict_type;
836 s->coded_order[index].qscale = s->input_qscale;
837 s->coded_order[index].force_type= s->force_input_type;
838 s->coded_order[index].picture_in_gop_number= s->input_picture_in_gop_number;
839 s->coded_order[index].picture_number= s->input_picture_number;
842 s->new_picture[i]= s->coded_order[0].picture[i];
846 int MPV_encode_picture(AVCodecContext *avctx,
847 unsigned char *buf, int buf_size, void *data)
849 MpegEncContext *s = avctx->priv_data;
850 AVPicture *pict = data;
852 s->input_qscale = avctx->quality;
854 init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
856 if(avctx->flags&CODEC_FLAG_TYPE){
858 s->force_input_type= avctx->key_frame ? I_TYPE : P_TYPE;
859 }else if(s->flags&CODEC_FLAG_PASS2){
861 s->force_input_type= s->rc_context.entry[s->input_picture_number].new_pict_type;
863 s->force_input_type=0;
864 if (!s->intra_only) {
865 /* first picture of GOP is intra */
866 if (s->input_picture_in_gop_number % s->gop_size==0){
867 s->input_pict_type = I_TYPE;
868 }else if(s->max_b_frames==0){
869 s->input_pict_type = P_TYPE;
871 if(s->b_frames_since_non_b < s->max_b_frames) //FIXME more IQ
872 s->input_pict_type = B_TYPE;
874 s->input_pict_type = P_TYPE;
877 s->input_pict_type = I_TYPE;
881 if(s->input_pict_type==I_TYPE)
882 s->input_picture_in_gop_number=0;
884 reorder_input(s, pict);
887 if(s->coded_order[0].picture[0]){
889 s->pict_type= s->coded_order[0].pict_type;
890 if (s->fixed_qscale) /* the ratecontrol needs the last qscale so we dont touch it for CBR */
891 s->qscale= s->coded_order[0].qscale;
892 s->force_type= s->coded_order[0].force_type;
893 s->picture_in_gop_number= s->coded_order[0].picture_in_gop_number;
894 s->picture_number= s->coded_order[0].picture_number;
896 MPV_frame_start(s, avctx);
898 encode_picture(s, s->picture_number);
900 avctx->real_pict_num = s->picture_number;
901 avctx->header_bits = s->header_bits;
902 avctx->mv_bits = s->mv_bits;
903 avctx->misc_bits = s->misc_bits;
904 avctx->i_tex_bits = s->i_tex_bits;
905 avctx->p_tex_bits = s->p_tex_bits;
906 avctx->i_count = s->i_count;
907 avctx->p_count = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx
908 avctx->skip_count = s->skip_count;
912 if (s->out_format == FMT_MJPEG)
913 mjpeg_picture_trailer(s);
916 avctx->quality = s->qscale;
918 if(s->flags&CODEC_FLAG_PASS1)
919 ff_write_pass1_stats(s);
923 s->input_picture_number++;
924 s->input_picture_in_gop_number++;
926 flush_put_bits(&s->pb);
927 s->frame_bits = (pbBufPtr(&s->pb) - s->pb.buf) * 8;
929 s->total_bits += s->frame_bits;
930 avctx->frame_bits = s->frame_bits;
931 //printf("fcode: %d, type: %d, head: %d, mv: %d, misc: %d, frame: %d, itex: %d, ptex: %d\n",
932 //s->f_code, avctx->key_frame, s->header_bits, s->mv_bits, s->misc_bits, s->frame_bits, s->i_tex_bits, s->p_tex_bits);
933 #if 0 //dump some stats to stats.txt for testing/debuging
934 if(s->max_b_frames==0)
937 if(!f) f= fopen("stats.txt", "wb");
938 get_psnr(pict->data, s->current_picture,
939 pict->linesize, s->linesize, avctx);
940 fprintf(f, "%7d, %7d, %2.4f\n", pbBufPtr(&s->pb) - s->pb.buf, s->qscale, avctx->psnr_y);
944 if (avctx->get_psnr) {
945 /* At this point pict->data should have the original frame */
946 /* an s->current_picture should have the coded/decoded frame */
947 get_psnr(pict->data, s->current_picture,
948 pict->linesize, s->linesize, avctx);
949 // printf("%f\n", avctx->psnr_y);
951 return pbBufPtr(&s->pb) - s->pb.buf;
954 static inline void gmc1_motion(MpegEncContext *s,
955 UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
957 UINT8 **ref_picture, int src_offset,
961 int offset, src_x, src_y, linesize, uvlinesize;
962 int motion_x, motion_y;
965 if(s->real_sprite_warping_points>1) printf("more than 1 warp point isnt supported\n");
966 motion_x= s->sprite_offset[0][0];
967 motion_y= s->sprite_offset[0][1];
968 src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
969 src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
970 motion_x<<=(3-s->sprite_warping_accuracy);
971 motion_y<<=(3-s->sprite_warping_accuracy);
972 src_x = clip(src_x, -16, s->width);
973 if (src_x == s->width)
975 src_y = clip(src_y, -16, s->height);
976 if (src_y == s->height)
979 linesize = s->linesize;
980 uvlinesize = s->uvlinesize;
981 ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
984 if(s->flags&CODEC_FLAG_EMU_EDGE){
985 if(src_x<0 || src_y<0 || src_x + (motion_x&15) + 16 > s->h_edge_pos
986 || src_y + (motion_y&15) + h > s->v_edge_pos){
987 emulated_edge_mc(s, ptr, linesize, 17, h+1, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
988 ptr= s->edge_emu_buffer;
992 gmc1(dest_y , ptr , linesize, h, motion_x&15, motion_y&15, s->no_rounding);
993 gmc1(dest_y+8, ptr+8, linesize, h, motion_x&15, motion_y&15, s->no_rounding);
995 motion_x= s->sprite_offset[1][0];
996 motion_y= s->sprite_offset[1][1];
997 src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
998 src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
999 motion_x<<=(3-s->sprite_warping_accuracy);
1000 motion_y<<=(3-s->sprite_warping_accuracy);
1001 src_x = clip(src_x, -8, s->width>>1);
1002 if (src_x == s->width>>1)
1004 src_y = clip(src_y, -8, s->height>>1);
1005 if (src_y == s->height>>1)
1008 offset = (src_y * uvlinesize) + src_x + (src_offset>>1);
1009 ptr = ref_picture[1] + offset;
1011 emulated_edge_mc(s, ptr, uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1012 ptr= s->edge_emu_buffer;
1014 gmc1(dest_cb + (dest_offset>>1), ptr, uvlinesize, h>>1, motion_x&15, motion_y&15, s->no_rounding);
1016 ptr = ref_picture[2] + offset;
1018 emulated_edge_mc(s, ptr, uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1019 ptr= s->edge_emu_buffer;
1021 gmc1(dest_cr + (dest_offset>>1), ptr, uvlinesize, h>>1, motion_x&15, motion_y&15, s->no_rounding);
1026 static void emulated_edge_mc(MpegEncContext *s, UINT8 *src, int linesize, int block_w, int block_h,
1027 int src_x, int src_y, int w, int h){
1029 int start_y, start_x, end_y, end_x;
1030 UINT8 *buf= s->edge_emu_buffer;
1033 src+= (h-1-src_y)*linesize;
1035 }else if(src_y<=-block_h){
1036 src+= (1-block_h-src_y)*linesize;
1042 }else if(src_x<=-block_w){
1043 src+= (1-block_w-src_x);
1047 start_y= MAX(0, -src_y);
1048 start_x= MAX(0, -src_x);
1049 end_y= MIN(block_h, h-src_y);
1050 end_x= MIN(block_w, w-src_x);
1052 // copy existing part
1053 for(y=start_y; y<end_y; y++){
1054 for(x=start_x; x<end_x; x++){
1055 buf[x + y*linesize]= src[x + y*linesize];
1060 for(y=0; y<start_y; y++){
1061 for(x=start_x; x<end_x; x++){
1062 buf[x + y*linesize]= buf[x + start_y*linesize];
1067 for(y=end_y; y<block_h; y++){
1068 for(x=start_x; x<end_x; x++){
1069 buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
1073 for(y=0; y<block_h; y++){
1075 for(x=0; x<start_x; x++){
1076 buf[x + y*linesize]= buf[start_x + y*linesize];
1080 for(x=end_x; x<block_w; x++){
1081 buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
1087 /* apply one mpeg motion vector to the three components */
1088 static inline void mpeg_motion(MpegEncContext *s,
1089 UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1091 UINT8 **ref_picture, int src_offset,
1092 int field_based, op_pixels_func (*pix_op)[4],
1093 int motion_x, int motion_y, int h)
1096 int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1099 if(s->quarter_sample)
1105 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
1106 src_x = s->mb_x * 16 + (motion_x >> 1);
1107 src_y = s->mb_y * (16 >> field_based) + (motion_y >> 1);
1109 /* WARNING: do no forget half pels */
1110 height = s->height >> field_based;
1111 v_edge_pos = s->v_edge_pos >> field_based;
1112 src_x = clip(src_x, -16, s->width);
1113 if (src_x == s->width)
1115 src_y = clip(src_y, -16, height);
1116 if (src_y == height)
1118 linesize = s->linesize << field_based;
1119 uvlinesize = s->uvlinesize << field_based;
1120 ptr = ref_picture[0] + (src_y * linesize) + (src_x) + src_offset;
1121 dest_y += dest_offset;
1123 if(s->flags&CODEC_FLAG_EMU_EDGE){
1124 if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 16 > s->h_edge_pos
1125 || src_y + (motion_y&1) + h > v_edge_pos){
1126 emulated_edge_mc(s, ptr, linesize, 17, h+1, src_x, src_y, s->h_edge_pos, v_edge_pos);
1127 ptr= s->edge_emu_buffer;
1131 pix_op[0][dxy](dest_y, ptr, linesize, h);
1133 if(s->flags&CODEC_FLAG_GRAY) return;
1135 if (s->out_format == FMT_H263) {
1137 if ((motion_x & 3) != 0)
1139 if ((motion_y & 3) != 0)
1146 dxy = ((my & 1) << 1) | (mx & 1);
1151 src_x = s->mb_x * 8 + mx;
1152 src_y = s->mb_y * (8 >> field_based) + my;
1153 src_x = clip(src_x, -8, s->width >> 1);
1154 if (src_x == (s->width >> 1))
1156 src_y = clip(src_y, -8, height >> 1);
1157 if (src_y == (height >> 1))
1159 offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
1160 ptr = ref_picture[1] + offset;
1162 emulated_edge_mc(s, ptr, uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, v_edge_pos>>1);
1163 ptr= s->edge_emu_buffer;
1165 pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1167 ptr = ref_picture[2] + offset;
1169 emulated_edge_mc(s, ptr, uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, v_edge_pos>>1);
1170 ptr= s->edge_emu_buffer;
1172 pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1175 static inline void qpel_motion(MpegEncContext *s,
1176 UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1178 UINT8 **ref_picture, int src_offset,
1179 int field_based, op_pixels_func (*pix_op)[4],
1180 qpel_mc_func (*qpix_op)[16],
1181 int motion_x, int motion_y, int h)
1184 int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1187 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
1188 src_x = s->mb_x * 16 + (motion_x >> 2);
1189 src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
1191 height = s->height >> field_based;
1192 v_edge_pos = s->v_edge_pos >> field_based;
1193 src_x = clip(src_x, -16, s->width);
1194 if (src_x == s->width)
1196 src_y = clip(src_y, -16, height);
1197 if (src_y == height)
1199 linesize = s->linesize << field_based;
1200 uvlinesize = s->uvlinesize << field_based;
1201 ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
1202 dest_y += dest_offset;
1203 //printf("%d %d %d\n", src_x, src_y, dxy);
1205 if(s->flags&CODEC_FLAG_EMU_EDGE){
1206 if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 16 > s->h_edge_pos
1207 || src_y + (motion_y&3) + h > v_edge_pos){
1208 emulated_edge_mc(s, ptr, linesize, 17, h+1, src_x, src_y, s->h_edge_pos, v_edge_pos);
1209 ptr= s->edge_emu_buffer;
1214 qpix_op[0][dxy](dest_y, ptr, linesize);
1216 //damn interlaced mode
1217 //FIXME boundary mirroring is not exactly correct here
1218 qpix_op[1][dxy](dest_y , ptr , linesize);
1219 qpix_op[1][dxy](dest_y+8, ptr+8, linesize);
1222 if(s->flags&CODEC_FLAG_GRAY) return;
1227 }else if(s->divx_version){
1228 mx= (motion_x>>1)|(motion_x&1);
1229 my= (motion_y>>1)|(motion_y&1);
1236 dxy= (mx&1) | ((my&1)<<1);
1240 src_x = s->mb_x * 8 + mx;
1241 src_y = s->mb_y * (8 >> field_based) + my;
1242 src_x = clip(src_x, -8, s->width >> 1);
1243 if (src_x == (s->width >> 1))
1245 src_y = clip(src_y, -8, height >> 1);
1246 if (src_y == (height >> 1))
1249 offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
1250 ptr = ref_picture[1] + offset;
1252 emulated_edge_mc(s, ptr, uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, v_edge_pos>>1);
1253 ptr= s->edge_emu_buffer;
1255 pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1257 ptr = ref_picture[2] + offset;
1259 emulated_edge_mc(s, ptr, uvlinesize, 9, (h>>1)+1, src_x, src_y, s->h_edge_pos>>1, v_edge_pos>>1);
1260 ptr= s->edge_emu_buffer;
1262 pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1266 static inline void MPV_motion(MpegEncContext *s,
1267 UINT8 *dest_y, UINT8 *dest_cb, UINT8 *dest_cr,
1268 int dir, UINT8 **ref_picture,
1269 op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16])
1271 int dxy, offset, mx, my, src_x, src_y, motion_x, motion_y;
1279 switch(s->mv_type) {
1282 gmc1_motion(s, dest_y, dest_cb, dest_cr, 0,
1285 }else if(s->quarter_sample){
1286 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
1289 s->mv[dir][0][0], s->mv[dir][0][1], 16);
1291 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
1294 s->mv[dir][0][0], s->mv[dir][0][1], 16);
1300 if(s->quarter_sample){
1302 motion_x = s->mv[dir][i][0];
1303 motion_y = s->mv[dir][i][1];
1305 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
1306 src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
1307 src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
1309 /* WARNING: do no forget half pels */
1310 src_x = clip(src_x, -16, s->width);
1311 if (src_x == s->width)
1313 src_y = clip(src_y, -16, s->height);
1314 if (src_y == s->height)
1317 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
1318 if(s->flags&CODEC_FLAG_EMU_EDGE){
1319 if(src_x<0 || src_y<0 || src_x + (motion_x&3) + 8 > s->h_edge_pos
1320 || src_y + (motion_y&3) + 8 > s->v_edge_pos){
1321 emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1322 ptr= s->edge_emu_buffer;
1325 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
1326 qpix_op[1][dxy](dest, ptr, s->linesize);
1328 mx += s->mv[dir][i][0]/2;
1329 my += s->mv[dir][i][1]/2;
1333 motion_x = s->mv[dir][i][0];
1334 motion_y = s->mv[dir][i][1];
1336 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
1337 src_x = mb_x * 16 + (motion_x >> 1) + (i & 1) * 8;
1338 src_y = mb_y * 16 + (motion_y >> 1) + (i >>1) * 8;
1340 /* WARNING: do no forget half pels */
1341 src_x = clip(src_x, -16, s->width);
1342 if (src_x == s->width)
1344 src_y = clip(src_y, -16, s->height);
1345 if (src_y == s->height)
1348 ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
1349 if(s->flags&CODEC_FLAG_EMU_EDGE){
1350 if(src_x<0 || src_y<0 || src_x + (motion_x&1) + 8 > s->h_edge_pos
1351 || src_y + (motion_y&1) + 8 > s->v_edge_pos){
1352 emulated_edge_mc(s, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1353 ptr= s->edge_emu_buffer;
1356 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
1357 pix_op[1][dxy](dest, ptr, s->linesize, 8);
1359 mx += s->mv[dir][i][0];
1360 my += s->mv[dir][i][1];
1364 if(s->flags&CODEC_FLAG_GRAY) break;
1365 /* In case of 8X8, we construct a single chroma motion vector
1366 with a special rounding */
1370 mx = (h263_chroma_roundtab[mx & 0xf] + ((mx >> 3) & ~1));
1373 mx = -(h263_chroma_roundtab[mx & 0xf] + ((mx >> 3) & ~1));
1376 my = (h263_chroma_roundtab[my & 0xf] + ((my >> 3) & ~1));
1379 my = -(h263_chroma_roundtab[my & 0xf] + ((my >> 3) & ~1));
1381 dxy = ((my & 1) << 1) | (mx & 1);
1385 src_x = mb_x * 8 + mx;
1386 src_y = mb_y * 8 + my;
1387 src_x = clip(src_x, -8, s->width/2);
1388 if (src_x == s->width/2)
1390 src_y = clip(src_y, -8, s->height/2);
1391 if (src_y == s->height/2)
1394 offset = (src_y * (s->uvlinesize)) + src_x;
1395 ptr = ref_picture[1] + offset;
1396 if(s->flags&CODEC_FLAG_EMU_EDGE){
1397 if(src_x<0 || src_y<0 || src_x + (dxy &1) + 8 > s->h_edge_pos>>1
1398 || src_y + (dxy>>1) + 8 > s->v_edge_pos>>1){
1399 emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1400 ptr= s->edge_emu_buffer;
1404 pix_op[1][dxy](dest_cb, ptr, s->uvlinesize, 8);
1406 ptr = ref_picture[2] + offset;
1408 emulated_edge_mc(s, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1409 ptr= s->edge_emu_buffer;
1411 pix_op[1][dxy](dest_cr, ptr, s->uvlinesize, 8);
1414 if (s->picture_structure == PICT_FRAME) {
1415 if(s->quarter_sample){
1417 qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
1418 ref_picture, s->field_select[dir][0] ? s->linesize : 0,
1420 s->mv[dir][0][0], s->mv[dir][0][1], 8);
1422 qpel_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
1423 ref_picture, s->field_select[dir][1] ? s->linesize : 0,
1425 s->mv[dir][1][0], s->mv[dir][1][1], 8);
1428 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
1429 ref_picture, s->field_select[dir][0] ? s->linesize : 0,
1431 s->mv[dir][0][0], s->mv[dir][0][1], 8);
1433 mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
1434 ref_picture, s->field_select[dir][1] ? s->linesize : 0,
1436 s->mv[dir][1][0], s->mv[dir][1][1], 8);
1447 /* put block[] to dest[] */
1448 static inline void put_dct(MpegEncContext *s,
1449 DCTELEM *block, int i, UINT8 *dest, int line_size)
1452 s->dct_unquantize(s, block, i, s->qscale);
1453 ff_idct_put (dest, line_size, block);
1456 /* add block[] to dest[] */
1457 static inline void add_dct(MpegEncContext *s,
1458 DCTELEM *block, int i, UINT8 *dest, int line_size)
1460 if (s->block_last_index[i] >= 0) {
1461 ff_idct_add (dest, line_size, block);
1465 static inline void add_dequant_dct(MpegEncContext *s,
1466 DCTELEM *block, int i, UINT8 *dest, int line_size)
1468 if (s->block_last_index[i] >= 0) {
1469 s->dct_unquantize(s, block, i, s->qscale);
1471 ff_idct_add (dest, line_size, block);
1476 * cleans dc, ac, coded_block for the current non intra MB
1478 void ff_clean_intra_table_entries(MpegEncContext *s)
1480 int wrap = s->block_wrap[0];
1481 int xy = s->block_index[0];
1484 s->dc_val[0][xy + 1 ] =
1485 s->dc_val[0][xy + wrap] =
1486 s->dc_val[0][xy + 1 + wrap] = 1024;
1488 memset(s->ac_val[0][xy ], 0, 32 * sizeof(INT16));
1489 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(INT16));
1490 if (s->msmpeg4_version>=3) {
1491 s->coded_block[xy ] =
1492 s->coded_block[xy + 1 ] =
1493 s->coded_block[xy + wrap] =
1494 s->coded_block[xy + 1 + wrap] = 0;
1497 wrap = s->block_wrap[4];
1498 xy = s->mb_x + 1 + (s->mb_y + 1) * wrap;
1500 s->dc_val[2][xy] = 1024;
1502 memset(s->ac_val[1][xy], 0, 16 * sizeof(INT16));
1503 memset(s->ac_val[2][xy], 0, 16 * sizeof(INT16));
1505 s->mbintra_table[s->mb_x + s->mb_y*s->mb_width]= 0;
1508 /* generic function called after a macroblock has been parsed by the
1509 decoder or after it has been encoded by the encoder.
1511 Important variables used:
1512 s->mb_intra : true if intra macroblock
1513 s->mv_dir : motion vector direction
1514 s->mv_type : motion vector type
1515 s->mv : motion vector
1516 s->interlaced_dct : true if interlaced dct used (mpeg2)
1518 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
1521 const int mb_xy = s->mb_y * s->mb_width + s->mb_x;
1526 #ifdef FF_POSTPROCESS
1527 /* Obsolete. Exists for compatibility with mplayer only. */
1528 quant_store[mb_y][mb_x]=s->qscale;
1529 //printf("[%02d][%02d] %d\n",mb_x,mb_y,s->qscale);
1531 /* even more obsolete, exists for mplayer xp only */
1532 if(s->avctx->quant_store) s->avctx->quant_store[mb_y*s->avctx->qstride+mb_x] = s->qscale;
1534 s->qscale_table[mb_xy]= s->qscale;
1536 /* update DC predictors for P macroblocks */
1538 if (s->h263_pred || s->h263_aic) {
1539 if(s->mbintra_table[mb_xy])
1540 ff_clean_intra_table_entries(s);
1544 s->last_dc[2] = 128 << s->intra_dc_precision;
1547 else if (s->h263_pred || s->h263_aic)
1548 s->mbintra_table[mb_xy]=1;
1550 /* update motion predictor, not for B-frames as they need the motion_val from the last P/S-Frame */
1551 if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE) { //FIXME move into h263.c if possible, format specific stuff shouldnt be here
1553 const int wrap = s->block_wrap[0];
1554 const int xy = s->block_index[0];
1555 const int mb_index= s->mb_x + s->mb_y*s->mb_width;
1556 if(s->mv_type == MV_TYPE_8X8){
1557 s->co_located_type_table[mb_index]= CO_LOCATED_TYPE_4MV;
1559 int motion_x, motion_y;
1563 if(s->co_located_type_table)
1564 s->co_located_type_table[mb_index]= 0;
1565 } else if (s->mv_type == MV_TYPE_16X16) {
1566 motion_x = s->mv[0][0][0];
1567 motion_y = s->mv[0][0][1];
1568 if(s->co_located_type_table)
1569 s->co_located_type_table[mb_index]= 0;
1570 } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
1572 motion_x = s->mv[0][0][0] + s->mv[0][1][0];
1573 motion_y = s->mv[0][0][1] + s->mv[0][1][1];
1574 motion_x = (motion_x>>1) | (motion_x&1);
1576 s->field_mv_table[mb_index][i][0]= s->mv[0][i][0];
1577 s->field_mv_table[mb_index][i][1]= s->mv[0][i][1];
1578 s->field_select_table[mb_index][i]= s->field_select[0][i];
1580 s->co_located_type_table[mb_index]= CO_LOCATED_TYPE_FIELDMV;
1582 /* no update if 8X8 because it has been done during parsing */
1583 s->motion_val[xy][0] = motion_x;
1584 s->motion_val[xy][1] = motion_y;
1585 s->motion_val[xy + 1][0] = motion_x;
1586 s->motion_val[xy + 1][1] = motion_y;
1587 s->motion_val[xy + wrap][0] = motion_x;
1588 s->motion_val[xy + wrap][1] = motion_y;
1589 s->motion_val[xy + 1 + wrap][0] = motion_x;
1590 s->motion_val[xy + 1 + wrap][1] = motion_y;
1594 if (!(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) {
1595 UINT8 *dest_y, *dest_cb, *dest_cr;
1596 int dct_linesize, dct_offset;
1597 op_pixels_func (*op_pix)[4];
1598 qpel_mc_func (*op_qpix)[16];
1600 /* avoid copy if macroblock skipped in last frame too
1601 dont touch it for B-frames as they need the skip info from the next p-frame */
1602 if (s->pict_type != B_TYPE) {
1603 UINT8 *mbskip_ptr = &s->mbskip_table[mb_xy];
1607 (*mbskip_ptr) ++; /* indicate that this time we skiped it */
1608 if(*mbskip_ptr >99) *mbskip_ptr= 99;
1610 /* if previous was skipped too, then nothing to do !
1611 skip only during decoding as we might trash the buffers during encoding a bit */
1612 if (*mbskip_ptr >= s->ip_buffer_count && !s->encoding)
1615 *mbskip_ptr = 0; /* not skipped */
1619 dest_y = s->current_picture [0] + (mb_y * 16* s->linesize ) + mb_x * 16;
1620 dest_cb = s->current_picture[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
1621 dest_cr = s->current_picture[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
1623 if (s->interlaced_dct) {
1624 dct_linesize = s->linesize * 2;
1625 dct_offset = s->linesize;
1627 dct_linesize = s->linesize;
1628 dct_offset = s->linesize * 8;
1632 /* motion handling */
1633 /* decoding or more than one mb_type (MC was allready done otherwise) */
1634 if((!s->encoding) || (s->mb_type[mb_xy]&(s->mb_type[mb_xy]-1))){
1635 if ((!s->no_rounding) || s->pict_type==B_TYPE){
1636 op_pix = put_pixels_tab;
1637 op_qpix= put_qpel_pixels_tab;
1639 op_pix = put_no_rnd_pixels_tab;
1640 op_qpix= put_no_rnd_qpel_pixels_tab;
1643 if (s->mv_dir & MV_DIR_FORWARD) {
1644 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture, op_pix, op_qpix);
1645 op_pix = avg_pixels_tab;
1646 op_qpix= avg_qpel_pixels_tab;
1648 if (s->mv_dir & MV_DIR_BACKWARD) {
1649 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture, op_pix, op_qpix);
1653 /* skip dequant / idct if we are really late ;) */
1654 if(s->hurry_up>1) goto the_end;
1656 /* add dct residue */
1657 if(s->encoding || !(s->mpeg2 || s->h263_msmpeg4 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
1658 add_dequant_dct(s, block[0], 0, dest_y, dct_linesize);
1659 add_dequant_dct(s, block[1], 1, dest_y + 8, dct_linesize);
1660 add_dequant_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
1661 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
1663 if(!(s->flags&CODEC_FLAG_GRAY)){
1664 add_dequant_dct(s, block[4], 4, dest_cb, s->uvlinesize);
1665 add_dequant_dct(s, block[5], 5, dest_cr, s->uvlinesize);
1668 add_dct(s, block[0], 0, dest_y, dct_linesize);
1669 add_dct(s, block[1], 1, dest_y + 8, dct_linesize);
1670 add_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
1671 add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
1673 if(!(s->flags&CODEC_FLAG_GRAY)){
1674 add_dct(s, block[4], 4, dest_cb, s->uvlinesize);
1675 add_dct(s, block[5], 5, dest_cr, s->uvlinesize);
1679 /* dct only in intra block */
1680 put_dct(s, block[0], 0, dest_y, dct_linesize);
1681 put_dct(s, block[1], 1, dest_y + 8, dct_linesize);
1682 put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
1683 put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
1685 if(!(s->flags&CODEC_FLAG_GRAY)){
1686 put_dct(s, block[4], 4, dest_cb, s->uvlinesize);
1687 put_dct(s, block[5], 5, dest_cr, s->uvlinesize);
1692 emms_c(); //FIXME remove
1695 static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
1697 static const char tab[64]=
1709 DCTELEM *block= s->block[n];
1710 const int last_index= s->block_last_index[n];
1715 threshold= -threshold;
1719 /* are all which we could set to zero are allready zero? */
1720 if(last_index<=skip_dc - 1) return;
1722 for(i=0; i<=last_index; i++){
1723 const int j = zigzag_direct[i];
1724 const int level = ABS(block[j]);
1726 if(skip_dc && i==0) continue;
1735 if(score >= threshold) return;
1736 for(i=skip_dc; i<=last_index; i++){
1737 const int j = zigzag_direct[i];
1740 if(block[0]) s->block_last_index[n]= 0;
1741 else s->block_last_index[n]= -1;
1744 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index)
1747 const int maxlevel= s->max_qcoeff;
1748 const int minlevel= s->min_qcoeff;
1750 for(i=0;i<=last_index; i++){
1751 const int j = zigzag_direct[i];
1752 int level = block[j];
1754 if (level>maxlevel) level=maxlevel;
1755 else if(level<minlevel) level=minlevel;
1760 static inline void requantize_coeffs(MpegEncContext *s, DCTELEM block[64], int oldq, int newq, int n)
1765 //FIXME requantize, note (mpeg1/h263/h263p-aic dont need it,...)
1770 for(;i<=s->block_last_index[n]; i++){
1771 const int j = zigzag_direct[i];
1772 int level = block[j];
1774 block[j]= ROUNDED_DIV(level*oldq, newq);
1777 for(i=s->block_last_index[n]; i>=0; i--){
1778 const int j = zigzag_direct[i]; //FIXME other scantabs
1781 s->block_last_index[n]= i;
1784 static inline void auto_requantize_coeffs(MpegEncContext *s, DCTELEM block[6][64])
1787 const int maxlevel= s->max_qcoeff;
1788 const int minlevel= s->min_qcoeff;
1789 int largest=0, smallest=0;
1791 assert(s->adaptive_quant);
1794 if(s->mb_intra) i=1;
1797 for(;i<=s->block_last_index[n]; i++){
1798 const int j = zigzag_direct[i]; //FIXME other scantabs
1799 int level = block[n][j];
1800 if(largest < level) largest = level;
1801 if(smallest > level) smallest= level;
1805 for(newq=s->qscale+1; newq<32; newq++){
1806 if( ROUNDED_DIV(smallest*s->qscale, newq) >= minlevel
1807 && ROUNDED_DIV(largest *s->qscale, newq) <= maxlevel)
1811 if(s->out_format==FMT_H263){
1812 /* h263 like formats cannot change qscale by more than 2 easiely */
1813 if(s->avctx->qmin + 2 < newq)
1814 newq= s->avctx->qmin + 2;
1818 requantize_coeffs(s, block[n], s->qscale, newq, n);
1819 clip_coeffs(s, block[n], s->block_last_index[n]);
1822 s->dquant+= newq - s->qscale;
1826 static int pix_vcmp16x8(UINT8 *s, int stride){ //FIXME move to dsputil & optimize
1831 for(x=0; x<16; x+=4){
1832 score+= ABS(s[x ] - s[x +stride]) + ABS(s[x+1] - s[x+1+stride])
1833 +ABS(s[x+2] - s[x+2+stride]) + ABS(s[x+3] - s[x+3+stride]);
1841 static int pix_diff_vcmp16x8(UINT8 *s1, UINT8*s2, int stride){ //FIXME move to dsputil & optimize
1846 for(x=0; x<16; x++){
1847 score+= ABS(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
1856 #define SQ(a) ((a)*(a))
1858 static int pix_vcmp16x8(UINT8 *s, int stride){ //FIXME move to dsputil & optimize
1863 for(x=0; x<16; x+=4){
1864 score+= SQ(s[x ] - s[x +stride]) + SQ(s[x+1] - s[x+1+stride])
1865 +SQ(s[x+2] - s[x+2+stride]) + SQ(s[x+3] - s[x+3+stride]);
1873 static int pix_diff_vcmp16x8(UINT8 *s1, UINT8*s2, int stride){ //FIXME move to dsputil & optimize
1878 for(x=0; x<16; x++){
1879 score+= SQ(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
1889 static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
1891 const int mb_x= s->mb_x;
1892 const int mb_y= s->mb_y;
1895 int dct_offset = s->linesize*8; //default for progressive frames
1897 for(i=0; i<6; i++) skip_dct[i]=0;
1899 if(s->adaptive_quant){
1900 s->dquant= s->qscale_table[mb_x + mb_y*s->mb_width] - s->qscale;
1902 if(s->out_format==FMT_H263){
1903 if (s->dquant> 2) s->dquant= 2;
1904 else if(s->dquant<-2) s->dquant=-2;
1907 if(s->codec_id==CODEC_ID_MPEG4){
1909 assert(s->dquant==0 || s->mv_type!=MV_TYPE_8X8);
1911 if(s->mv_dir&MV_DIRECT)
1915 s->qscale+= s->dquant;
1916 s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
1917 s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
1925 wrap_y = s->linesize;
1926 ptr = s->new_picture[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
1928 if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
1929 emulated_edge_mc(s, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
1930 ptr= s->edge_emu_buffer;
1934 if(s->flags&CODEC_FLAG_INTERLACED_DCT){
1935 int progressive_score, interlaced_score;
1937 progressive_score= pix_vcmp16x8(ptr, wrap_y ) + pix_vcmp16x8(ptr + wrap_y*8, wrap_y );
1938 interlaced_score = pix_vcmp16x8(ptr, wrap_y*2) + pix_vcmp16x8(ptr + wrap_y , wrap_y*2);
1940 if(progressive_score > interlaced_score + 100){
1941 s->interlaced_dct=1;
1946 s->interlaced_dct=0;
1949 get_pixels(s->block[0], ptr , wrap_y);
1950 get_pixels(s->block[1], ptr + 8, wrap_y);
1951 get_pixels(s->block[2], ptr + dct_offset , wrap_y);
1952 get_pixels(s->block[3], ptr + dct_offset + 8, wrap_y);
1954 if(s->flags&CODEC_FLAG_GRAY){
1958 int wrap_c = s->uvlinesize;
1959 ptr = s->new_picture[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
1961 emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
1962 ptr= s->edge_emu_buffer;
1964 get_pixels(s->block[4], ptr, wrap_c);
1966 ptr = s->new_picture[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
1968 emulated_edge_mc(s, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
1969 ptr= s->edge_emu_buffer;
1971 get_pixels(s->block[5], ptr, wrap_c);
1974 op_pixels_func (*op_pix)[4];
1975 qpel_mc_func (*op_qpix)[16];
1976 UINT8 *dest_y, *dest_cb, *dest_cr;
1977 UINT8 *ptr_y, *ptr_cb, *ptr_cr;
1981 dest_y = s->current_picture[0] + (mb_y * 16 * s->linesize ) + mb_x * 16;
1982 dest_cb = s->current_picture[1] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
1983 dest_cr = s->current_picture[2] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
1984 wrap_y = s->linesize;
1985 wrap_c = s->uvlinesize;
1986 ptr_y = s->new_picture[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
1987 ptr_cb = s->new_picture[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
1988 ptr_cr = s->new_picture[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
1990 if ((!s->no_rounding) || s->pict_type==B_TYPE){
1991 op_pix = put_pixels_tab;
1992 op_qpix= put_qpel_pixels_tab;
1994 op_pix = put_no_rnd_pixels_tab;
1995 op_qpix= put_no_rnd_qpel_pixels_tab;
1998 if (s->mv_dir & MV_DIR_FORWARD) {
1999 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture, op_pix, op_qpix);
2000 op_pix = avg_pixels_tab;
2001 op_qpix= avg_qpel_pixels_tab;
2003 if (s->mv_dir & MV_DIR_BACKWARD) {
2004 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture, op_pix, op_qpix);
2007 if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
2008 emulated_edge_mc(s, ptr_y, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
2009 ptr_y= s->edge_emu_buffer;
2013 if(s->flags&CODEC_FLAG_INTERLACED_DCT){
2014 int progressive_score, interlaced_score;
2016 progressive_score= pix_diff_vcmp16x8(ptr_y , dest_y , wrap_y )
2017 + pix_diff_vcmp16x8(ptr_y + wrap_y*8, dest_y + wrap_y*8, wrap_y );
2018 interlaced_score = pix_diff_vcmp16x8(ptr_y , dest_y , wrap_y*2)
2019 + pix_diff_vcmp16x8(ptr_y + wrap_y , dest_y + wrap_y , wrap_y*2);
2021 if(progressive_score > interlaced_score + 600){
2022 s->interlaced_dct=1;
2027 s->interlaced_dct=0;
2030 diff_pixels(s->block[0], ptr_y , dest_y , wrap_y);
2031 diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2032 diff_pixels(s->block[2], ptr_y + dct_offset , dest_y + dct_offset , wrap_y);
2033 diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y);
2035 if(s->flags&CODEC_FLAG_GRAY){
2040 emulated_edge_mc(s, ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2041 ptr_cb= s->edge_emu_buffer;
2043 diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2045 emulated_edge_mc(s, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2046 ptr_cr= s->edge_emu_buffer;
2048 diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2051 /* pre quantization */
2052 if(s->mc_mb_var[s->mb_width*mb_y+ mb_x]<2*s->qscale*s->qscale){
2054 if(pix_abs8x8(ptr_y , dest_y , wrap_y) < 20*s->qscale) skip_dct[0]= 1;
2055 if(pix_abs8x8(ptr_y + 8, dest_y + 8, wrap_y) < 20*s->qscale) skip_dct[1]= 1;
2056 if(pix_abs8x8(ptr_y +dct_offset , dest_y +dct_offset , wrap_y) < 20*s->qscale) skip_dct[2]= 1;
2057 if(pix_abs8x8(ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y) < 20*s->qscale) skip_dct[3]= 1;
2058 if(pix_abs8x8(ptr_cb , dest_cb , wrap_y) < 20*s->qscale) skip_dct[4]= 1;
2059 if(pix_abs8x8(ptr_cr , dest_cr , wrap_y) < 20*s->qscale) skip_dct[5]= 1;
2065 if(skip_dct[i]) num++;
2068 if(s->mb_x==0 && s->mb_y==0){
2070 printf("%6d %1d\n", stat[i], i);
2083 adap_parm = ((s->avg_mb_var << 1) + s->mb_var[s->mb_width*mb_y+mb_x] + 1.0) /
2084 ((s->mb_var[s->mb_width*mb_y+mb_x] << 1) + s->avg_mb_var + 1.0);
2086 printf("\ntype=%c qscale=%2d adap=%0.2f dquant=%4.2f var=%4d avgvar=%4d",
2087 (s->mb_type[s->mb_width*mb_y+mb_x] > 0) ? 'I' : 'P',
2088 s->qscale, adap_parm, s->qscale*adap_parm,
2089 s->mb_var[s->mb_width*mb_y+mb_x], s->avg_mb_var);
2092 /* DCT & quantize */
2093 if(s->out_format==FMT_MJPEG){
2096 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, 8, &overflow);
2097 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
2103 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2104 // FIXME we could decide to change to quantizer instead of clipping
2105 // JS: I don't think that would be a good idea it could lower quality instead
2106 // of improve it. Just INTRADC clipping deserves changes in quantizer
2107 if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
2109 s->block_last_index[i]= -1;
2111 if(s->luma_elim_threshold && !s->mb_intra)
2113 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2114 if(s->chroma_elim_threshold && !s->mb_intra)
2116 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2119 if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){
2120 s->block_last_index[4]=
2121 s->block_last_index[5]= 0;
2123 s->block[5][0]= 128;
2126 /* huffman encode */
2127 switch(s->out_format) {
2129 mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2132 if (s->h263_msmpeg4)
2133 msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2134 else if(s->h263_pred)
2135 mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2137 h263_encode_mb(s, s->block, motion_x, motion_y);
2140 mjpeg_encode_mb(s, s->block);
2145 void ff_copy_bits(PutBitContext *pb, UINT8 *src, int length)
2147 int bytes= length>>4;
2148 int bits= length&15;
2151 if(length==0) return;
2153 for(i=0; i<bytes; i++) put_bits(pb, 16, be2me_16(((uint16_t*)src)[i]));
2154 put_bits(pb, bits, be2me_16(((uint16_t*)src)[i])>>(16-bits));
2157 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2160 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
2163 d->mb_incr= s->mb_incr;
2165 d->last_dc[i]= s->last_dc[i];
2168 d->mv_bits= s->mv_bits;
2169 d->i_tex_bits= s->i_tex_bits;
2170 d->p_tex_bits= s->p_tex_bits;
2171 d->i_count= s->i_count;
2172 d->f_count= s->f_count;
2173 d->b_count= s->b_count;
2174 d->skip_count= s->skip_count;
2175 d->misc_bits= s->misc_bits;
2178 d->mb_skiped= s->mb_skiped;
2181 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2184 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2185 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
2188 d->mb_incr= s->mb_incr;
2190 d->last_dc[i]= s->last_dc[i];
2193 d->mv_bits= s->mv_bits;
2194 d->i_tex_bits= s->i_tex_bits;
2195 d->p_tex_bits= s->p_tex_bits;
2196 d->i_count= s->i_count;
2197 d->f_count= s->f_count;
2198 d->b_count= s->b_count;
2199 d->skip_count= s->skip_count;
2200 d->misc_bits= s->misc_bits;
2202 d->mb_intra= s->mb_intra;
2203 d->mb_skiped= s->mb_skiped;
2204 d->mv_type= s->mv_type;
2205 d->mv_dir= s->mv_dir;
2207 if(s->data_partitioning){
2209 d->tex_pb= s->tex_pb;
2213 d->block_last_index[i]= s->block_last_index[i];
2216 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2217 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2218 int *dmin, int *next_block, int motion_x, int motion_y)
2222 copy_context_before_encode(s, backup, type);
2224 s->block= s->blocks[*next_block];
2225 s->pb= pb[*next_block];
2226 if(s->data_partitioning){
2227 s->pb2 = pb2 [*next_block];
2228 s->tex_pb= tex_pb[*next_block];
2231 encode_mb(s, motion_x, motion_y);
2233 bits_count= get_bit_count(&s->pb);
2234 if(s->data_partitioning){
2235 bits_count+= get_bit_count(&s->pb2);
2236 bits_count+= get_bit_count(&s->tex_pb);
2239 if(bits_count<*dmin){
2243 copy_context_after_encode(best, s, type);
2247 static void encode_picture(MpegEncContext *s, int picture_number)
2249 int mb_x, mb_y, last_gob, pdif = 0;
2252 MpegEncContext best_s, backup_s;
2253 UINT8 bit_buf[2][3000];
2254 UINT8 bit_buf2[2][3000];
2255 UINT8 bit_buf_tex[2][3000];
2256 PutBitContext pb[2], pb2[2], tex_pb[2];
2259 init_put_bits(&pb [i], bit_buf [i], 3000, NULL, NULL);
2260 init_put_bits(&pb2 [i], bit_buf2 [i], 3000, NULL, NULL);
2261 init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000, NULL, NULL);
2264 s->picture_number = picture_number;
2269 s->block_wrap[3]= s->mb_width*2 + 2;
2271 s->block_wrap[5]= s->mb_width + 2;
2273 /* Reset the average MB variance */
2275 s->mc_mb_var_sum = 0;
2277 /* we need to initialize some time vars before we can encode b-frames */
2278 if (s->h263_pred && !s->h263_msmpeg4)
2279 ff_set_mpeg4_time(s, s->picture_number);
2281 s->scene_change_score=0;
2283 s->qscale= (int)(s->frame_qscale + 0.5); //FIXME qscale / ... stuff for ME ratedistoration
2285 /* Estimate motion for every MB */
2286 if(s->pict_type != I_TYPE){
2287 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
2288 s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
2289 s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
2290 s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
2291 s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
2292 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2295 s->block_index[0]+=2;
2296 s->block_index[1]+=2;
2297 s->block_index[2]+=2;
2298 s->block_index[3]+=2;
2300 /* compute motion vector & mb_type and store in context */
2301 if(s->pict_type==B_TYPE)
2302 ff_estimate_b_frame_motion(s, mb_x, mb_y);
2304 ff_estimate_p_frame_motion(s, mb_x, mb_y);
2305 // s->mb_type[mb_y*s->mb_width + mb_x]=MB_TYPE_INTER;
2309 }else /* if(s->pict_type == I_TYPE) */{
2311 //FIXME do we need to zero them?
2312 memset(s->motion_val[0], 0, sizeof(INT16)*(s->mb_width*2 + 2)*(s->mb_height*2 + 2)*2);
2313 memset(s->p_mv_table , 0, sizeof(INT16)*(s->mb_width+2)*(s->mb_height+2)*2);
2314 memset(s->mb_type , MB_TYPE_INTRA, sizeof(UINT8)*s->mb_width*s->mb_height);
2316 if(!s->fixed_qscale){
2317 /* finding spatial complexity for I-frame rate control */
2318 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
2319 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2322 uint8_t *pix = s->new_picture[0] + (yy * s->linesize) + xx;
2324 int sum = pix_sum(pix, s->linesize);
2327 varc = (pix_norm1(pix, s->linesize) - sum*sum + 500 + 128)>>8;
2329 s->mb_var [s->mb_width * mb_y + mb_x] = varc;
2330 s->mb_mean[s->mb_width * mb_y + mb_x] = (sum+7)>>4;
2331 s->mb_var_sum += varc;
2336 if(s->scene_change_score > 0 && s->pict_type == P_TYPE){
2337 s->pict_type= I_TYPE;
2338 memset(s->mb_type , MB_TYPE_INTRA, sizeof(UINT8)*s->mb_width*s->mb_height);
2339 if(s->max_b_frames==0){
2340 s->input_pict_type= I_TYPE;
2341 s->input_picture_in_gop_number=0;
2343 //printf("Scene change detected, encoding as I Frame %d %d\n", s->mb_var_sum, s->mc_mb_var_sum);
2346 if(s->pict_type==P_TYPE || s->pict_type==S_TYPE)
2347 s->f_code= ff_get_best_fcode(s, s->p_mv_table, MB_TYPE_INTER);
2348 ff_fix_long_p_mvs(s);
2349 if(s->pict_type==B_TYPE){
2350 s->f_code= ff_get_best_fcode(s, s->b_forw_mv_table, MB_TYPE_FORWARD);
2351 s->b_code= ff_get_best_fcode(s, s->b_back_mv_table, MB_TYPE_BACKWARD);
2353 ff_fix_long_b_mvs(s, s->b_forw_mv_table, s->f_code, MB_TYPE_FORWARD);
2354 ff_fix_long_b_mvs(s, s->b_back_mv_table, s->b_code, MB_TYPE_BACKWARD);
2355 ff_fix_long_b_mvs(s, s->b_bidir_forw_mv_table, s->f_code, MB_TYPE_BIDIR);
2356 ff_fix_long_b_mvs(s, s->b_bidir_back_mv_table, s->b_code, MB_TYPE_BIDIR);
2359 if (s->fixed_qscale)
2360 s->frame_qscale = s->avctx->quality;
2362 s->frame_qscale = ff_rate_estimate_qscale(s);
2364 if(s->adaptive_quant){
2365 switch(s->codec_id){
2366 case CODEC_ID_MPEG4:
2367 ff_clean_mpeg4_qscales(s);
2370 case CODEC_ID_H263P:
2371 ff_clean_h263_qscales(s);
2375 s->qscale= s->qscale_table[0];
2377 s->qscale= (int)(s->frame_qscale + 0.5);
2379 if (s->out_format == FMT_MJPEG) {
2380 /* for mjpeg, we do include qscale in the matrix */
2381 s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0];
2383 s->intra_matrix[i] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
2384 convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
2385 s->q_intra_matrix16_bias, s->intra_matrix, s->intra_quant_bias);
2388 s->last_bits= get_bit_count(&s->pb);
2389 switch(s->out_format) {
2391 mjpeg_picture_header(s);
2394 if (s->h263_msmpeg4)
2395 msmpeg4_encode_picture_header(s, picture_number);
2396 else if (s->h263_pred)
2397 mpeg4_encode_picture_header(s, picture_number);
2398 else if (s->h263_rv10)
2399 rv10_encode_picture_header(s, picture_number);
2401 h263_encode_picture_header(s, picture_number);
2404 mpeg1_encode_picture_header(s, picture_number);
2407 bits= get_bit_count(&s->pb);
2408 s->header_bits= bits - s->last_bits;
2419 /* init last dc values */
2420 /* note: quant matrix value (8) is implied here */
2421 s->last_dc[0] = 128;
2422 s->last_dc[1] = 128;
2423 s->last_dc[2] = 128;
2425 s->last_mv[0][0][0] = 0;
2426 s->last_mv[0][0][1] = 0;
2428 /* Get the GOB height based on picture height */
2429 if (s->out_format == FMT_H263 && !s->h263_pred && !s->h263_msmpeg4) {
2430 if (s->height <= 400)
2432 else if (s->height <= 800)
2436 }else if(s->codec_id==CODEC_ID_MPEG4){
2440 if(s->codec_id==CODEC_ID_MPEG4 && s->data_partitioning && s->pict_type!=B_TYPE)
2441 ff_mpeg4_init_partitions(s);
2445 for(mb_y=0; mb_y < s->mb_height; mb_y++) {
2446 /* Put GOB header based on RTP MTU for formats which support it per line (H263*)*/
2447 /* TODO: Put all this stuff in a separate generic function */
2450 s->ptr_lastgob = s->pb.buf;
2451 s->ptr_last_mb_line = s->pb.buf;
2452 } else if (s->out_format == FMT_H263 && !s->h263_pred && !s->h263_msmpeg4 && !(mb_y % s->gob_index)) {
2453 // MN: we could move the space check from h263 -> here, as its not h263 specific
2454 last_gob = h263_encode_gob_header(s, mb_y);
2456 s->first_slice_line = 1;
2458 /*MN: we reset it here instead at the end of each line cuz mpeg4 can have
2459 slice lines starting & ending in the middle*/
2460 s->first_slice_line = 0;
2465 s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
2466 s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
2468 s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
2469 s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
2470 s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
2471 s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
2472 s->block_index[4]= s->block_wrap[4]*(mb_y + 1) + s->block_wrap[0]*(s->mb_height*2 + 2);
2473 s->block_index[5]= s->block_wrap[4]*(mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2);
2474 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2475 const int mb_type= s->mb_type[mb_y * s->mb_width + mb_x];
2476 const int xy= (mb_y+1) * (s->mb_width+2) + mb_x + 1;
2482 s->block_index[0]+=2;
2483 s->block_index[1]+=2;
2484 s->block_index[2]+=2;
2485 s->block_index[3]+=2;
2486 s->block_index[4]++;
2487 s->block_index[5]++;
2489 /* write gob / video packet header for formats which support it at any MB (MPEG4) */
2490 if(s->rtp_mode && s->mb_y>0 && s->codec_id==CODEC_ID_MPEG4){
2491 int pdif= pbBufPtr(&s->pb) - s->ptr_lastgob;
2493 //the *2 is there so we stay below the requested size
2494 if(pdif + s->mb_line_avgsize/s->mb_width >= s->rtp_payload_size){
2495 if(s->codec_id==CODEC_ID_MPEG4){
2496 if(s->data_partitioning && s->pict_type!=B_TYPE){
2497 ff_mpeg4_merge_partitions(s);
2498 ff_mpeg4_init_partitions(s);
2500 ff_mpeg4_encode_video_packet_header(s);
2502 if(s->flags&CODEC_FLAG_PASS1){
2503 int bits= get_bit_count(&s->pb);
2504 s->misc_bits+= bits - s->last_bits;
2507 ff_mpeg4_clean_buffers(s);
2509 s->ptr_lastgob = pbBufPtr(&s->pb);
2510 s->first_slice_line=1;
2511 s->resync_mb_x=mb_x;
2512 s->resync_mb_y=mb_y;
2515 if( (s->resync_mb_x == s->mb_x)
2516 && s->resync_mb_y+1 == s->mb_y){
2517 s->first_slice_line=0;
2521 if(mb_type & (mb_type-1)){ // more than 1 MB type possible
2523 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2525 copy_context_before_encode(&backup_s, s, -1);
2527 best_s.data_partitioning= s->data_partitioning;
2528 if(s->data_partitioning){
2529 backup_s.pb2= s->pb2;
2530 backup_s.tex_pb= s->tex_pb;
2533 if(mb_type&MB_TYPE_INTER){
2534 s->mv_dir = MV_DIR_FORWARD;
2535 s->mv_type = MV_TYPE_16X16;
2537 s->mv[0][0][0] = s->p_mv_table[xy][0];
2538 s->mv[0][0][1] = s->p_mv_table[xy][1];
2539 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER, pb, pb2, tex_pb,
2540 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2542 if(mb_type&MB_TYPE_INTER4V){
2543 s->mv_dir = MV_DIR_FORWARD;
2544 s->mv_type = MV_TYPE_8X8;
2547 s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
2548 s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
2550 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER4V, pb, pb2, tex_pb,
2551 &dmin, &next_block, 0, 0);
2553 if(mb_type&MB_TYPE_FORWARD){
2554 s->mv_dir = MV_DIR_FORWARD;
2555 s->mv_type = MV_TYPE_16X16;
2557 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2558 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2559 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_FORWARD, pb, pb2, tex_pb,
2560 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2562 if(mb_type&MB_TYPE_BACKWARD){
2563 s->mv_dir = MV_DIR_BACKWARD;
2564 s->mv_type = MV_TYPE_16X16;
2566 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2567 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2568 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2569 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2571 if(mb_type&MB_TYPE_BIDIR){
2572 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2573 s->mv_type = MV_TYPE_16X16;
2575 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2576 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2577 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2578 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2579 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BIDIR, pb, pb2, tex_pb,
2580 &dmin, &next_block, 0, 0);
2582 if(mb_type&MB_TYPE_DIRECT){
2583 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2584 s->mv_type = MV_TYPE_16X16; //FIXME
2586 s->mv[0][0][0] = s->b_direct_forw_mv_table[xy][0];
2587 s->mv[0][0][1] = s->b_direct_forw_mv_table[xy][1];
2588 s->mv[1][0][0] = s->b_direct_back_mv_table[xy][0];
2589 s->mv[1][0][1] = s->b_direct_back_mv_table[xy][1];
2590 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_DIRECT, pb, pb2, tex_pb,
2591 &dmin, &next_block, s->b_direct_mv_table[xy][0], s->b_direct_mv_table[xy][1]);
2593 if(mb_type&MB_TYPE_INTRA){
2594 s->mv_dir = MV_DIR_FORWARD;
2595 s->mv_type = MV_TYPE_16X16;
2599 encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTRA, pb, pb2, tex_pb,
2600 &dmin, &next_block, 0, 0);
2601 /* force cleaning of ac/dc pred stuff if needed ... */
2602 if(s->h263_pred || s->h263_aic)
2603 s->mbintra_table[mb_x + mb_y*s->mb_width]=1;
2605 copy_context_after_encode(s, &best_s, -1);
2607 pb_bits_count= get_bit_count(&s->pb);
2608 flush_put_bits(&s->pb);
2609 ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2612 if(s->data_partitioning){
2613 pb2_bits_count= get_bit_count(&s->pb2);
2614 flush_put_bits(&s->pb2);
2615 ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2616 s->pb2= backup_s.pb2;
2618 tex_pb_bits_count= get_bit_count(&s->tex_pb);
2619 flush_put_bits(&s->tex_pb);
2620 ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2621 s->tex_pb= backup_s.tex_pb;
2623 s->last_bits= get_bit_count(&s->pb);
2625 int motion_x, motion_y;
2626 s->mv_type=MV_TYPE_16X16;
2627 // only one MB-Type possible
2630 s->mv_dir = MV_DIR_FORWARD;
2632 motion_x= s->mv[0][0][0] = 0;
2633 motion_y= s->mv[0][0][1] = 0;
2636 s->mv_dir = MV_DIR_FORWARD;
2638 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2639 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2641 case MB_TYPE_INTER4V:
2642 s->mv_dir = MV_DIR_FORWARD;
2643 s->mv_type = MV_TYPE_8X8;
2646 s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
2647 s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
2649 motion_x= motion_y= 0;
2651 case MB_TYPE_DIRECT:
2652 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2654 motion_x=s->b_direct_mv_table[xy][0];
2655 motion_y=s->b_direct_mv_table[xy][1];
2656 s->mv[0][0][0] = s->b_direct_forw_mv_table[xy][0];
2657 s->mv[0][0][1] = s->b_direct_forw_mv_table[xy][1];
2658 s->mv[1][0][0] = s->b_direct_back_mv_table[xy][0];
2659 s->mv[1][0][1] = s->b_direct_back_mv_table[xy][1];
2662 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2666 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2667 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2668 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2669 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2671 case MB_TYPE_BACKWARD:
2672 s->mv_dir = MV_DIR_BACKWARD;
2674 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2675 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2677 case MB_TYPE_FORWARD:
2678 s->mv_dir = MV_DIR_FORWARD;
2680 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2681 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2682 // printf(" %d %d ", motion_x, motion_y);
2685 motion_x=motion_y=0; //gcc warning fix
2686 printf("illegal MB type\n");
2688 encode_mb(s, motion_x, motion_y);
2690 /* clean the MV table in IPS frames for direct mode in B frames */
2691 if(s->mb_intra /* && I,P,S_TYPE */){
2692 s->p_mv_table[xy][0]=0;
2693 s->p_mv_table[xy][1]=0;
2696 MPV_decode_mb(s, s->block);
2697 //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_width, get_bit_count(&s->pb));
2701 /* Obtain average GOB size for RTP */
2704 s->mb_line_avgsize = pbBufPtr(&s->pb) - s->ptr_last_mb_line;
2705 else if (!(mb_y % s->gob_index)) {
2706 s->mb_line_avgsize = (s->mb_line_avgsize + pbBufPtr(&s->pb) - s->ptr_last_mb_line) >> 1;
2707 s->ptr_last_mb_line = pbBufPtr(&s->pb);
2709 //fprintf(stderr, "\nMB line: %d\tSize: %u\tAvg. Size: %u", s->mb_y,
2710 // (s->pb.buf_ptr - s->ptr_last_mb_line), s->mb_line_avgsize);
2711 if(s->codec_id!=CODEC_ID_MPEG4) s->first_slice_line = 0; //FIXME clean
2716 if(s->codec_id==CODEC_ID_MPEG4 && s->data_partitioning && s->pict_type!=B_TYPE)
2717 ff_mpeg4_merge_partitions(s);
2719 if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE)
2720 msmpeg4_encode_ext_header(s);
2722 if(s->codec_id==CODEC_ID_MPEG4)
2723 ff_mpeg4_stuffing(&s->pb);
2725 //if (s->gob_number)
2726 // fprintf(stderr,"\nNumber of GOB: %d", s->gob_number);
2728 /* Send the last GOB if RTP */
2730 flush_put_bits(&s->pb);
2731 pdif = pbBufPtr(&s->pb) - s->ptr_lastgob;
2732 /* Call the RTP callback to send the last GOB */
2733 if (s->rtp_callback)
2734 s->rtp_callback(s->ptr_lastgob, pdif, s->gob_number);
2735 s->ptr_lastgob = pbBufPtr(&s->pb);
2736 //fprintf(stderr,"\nGOB: %2d size: %d (last)", s->gob_number, pdif);
2740 static int dct_quantize_c(MpegEncContext *s,
2741 DCTELEM *block, int n,
2742 int qscale, int *overflow)
2744 int i, j, level, last_non_zero, q;
2748 unsigned int threshold1, threshold2;
2752 #ifndef ARCH_ALPHA /* Alpha uses unpermuted matrix */
2753 /* we need this permutation so that we correct the IDCT
2754 permutation. will be moved into DCT code */
2755 block_permute(block);
2766 /* For AIC we skip quant/dequant of INTRADC */
2769 /* note: block[0] is assumed to be positive */
2770 block[0] = (block[0] + (q >> 1)) / q;
2773 qmat = s->q_intra_matrix[qscale];
2774 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
2778 qmat = s->q_inter_matrix[qscale];
2779 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
2781 threshold1= (1<<QMAT_SHIFT) - bias - 1;
2782 threshold2= (threshold1<<1);
2785 j = zigzag_direct[i];
2787 level = level * qmat[j];
2789 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
2790 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
2791 if(((unsigned)(level+threshold1))>threshold2){
2793 level= (bias + level)>>QMAT_SHIFT;
2796 level= (bias - level)>>QMAT_SHIFT;
2805 *overflow= s->max_qcoeff < max; //overflow might have happend
2807 return last_non_zero;
2810 static void dct_unquantize_mpeg1_c(MpegEncContext *s,
2811 DCTELEM *block, int n, int qscale)
2813 int i, level, nCoeffs;
2814 const UINT16 *quant_matrix;
2816 if(s->alternate_scan) nCoeffs= 64;
2817 else nCoeffs= s->block_last_index[n]+1;
2821 block[0] = block[0] * s->y_dc_scale;
2823 block[0] = block[0] * s->c_dc_scale;
2824 /* XXX: only mpeg1 */
2825 quant_matrix = s->intra_matrix;
2826 for(i=1;i<nCoeffs;i++) {
2827 int j= zigzag_direct[i];
2832 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2833 level = (level - 1) | 1;
2836 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2837 level = (level - 1) | 1;
2840 if (level < -2048 || level > 2047)
2841 fprintf(stderr, "unquant error %d %d\n", i, level);
2848 quant_matrix = s->inter_matrix;
2849 for(;i<nCoeffs;i++) {
2850 int j= zigzag_direct[i];
2855 level = (((level << 1) + 1) * qscale *
2856 ((int) (quant_matrix[j]))) >> 4;
2857 level = (level - 1) | 1;
2860 level = (((level << 1) + 1) * qscale *
2861 ((int) (quant_matrix[j]))) >> 4;
2862 level = (level - 1) | 1;
2865 if (level < -2048 || level > 2047)
2866 fprintf(stderr, "unquant error %d %d\n", i, level);
2874 static void dct_unquantize_mpeg2_c(MpegEncContext *s,
2875 DCTELEM *block, int n, int qscale)
2877 int i, level, nCoeffs;
2878 const UINT16 *quant_matrix;
2880 if(s->alternate_scan) nCoeffs= 64;
2881 else nCoeffs= s->block_last_index[n]+1;
2885 block[0] = block[0] * s->y_dc_scale;
2887 block[0] = block[0] * s->c_dc_scale;
2888 quant_matrix = s->intra_matrix;
2889 for(i=1;i<nCoeffs;i++) {
2890 int j= zigzag_direct[i];
2895 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2898 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2901 if (level < -2048 || level > 2047)
2902 fprintf(stderr, "unquant error %d %d\n", i, level);
2910 quant_matrix = s->inter_matrix;
2911 for(;i<nCoeffs;i++) {
2912 int j= zigzag_direct[i];
2917 level = (((level << 1) + 1) * qscale *
2918 ((int) (quant_matrix[j]))) >> 4;
2921 level = (((level << 1) + 1) * qscale *
2922 ((int) (quant_matrix[j]))) >> 4;
2925 if (level < -2048 || level > 2047)
2926 fprintf(stderr, "unquant error %d %d\n", i, level);
2937 static void dct_unquantize_h263_c(MpegEncContext *s,
2938 DCTELEM *block, int n, int qscale)
2940 int i, level, qmul, qadd;
2946 block[0] = block[0] * s->y_dc_scale;
2948 block[0] = block[0] * s->c_dc_scale;
2951 nCoeffs= 64; //does not allways use zigzag table
2954 nCoeffs= zigzag_end[ s->block_last_index[n] ];
2957 qmul = s->qscale << 1;
2958 if (s->h263_aic && s->mb_intra)
2961 qadd = (s->qscale - 1) | 1;
2963 for(;i<nCoeffs;i++) {
2967 level = level * qmul - qadd;
2969 level = level * qmul + qadd;
2972 if (level < -2048 || level > 2047)
2973 fprintf(stderr, "unquant error %d %d\n", i, level);
2980 static void remove_ac(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int mb_x, int mb_y)
2982 int dc, dcb, dcr, y, i;
2984 dc= s->dc_val[0][mb_x*2+1 + (i&1) + (mb_y*2+1 + (i>>1))*(s->mb_width*2+2)];
2988 dest_y[x + (i&1)*8 + (y + (i>>1)*8)*s->linesize]= dc/8;
2992 dcb = s->dc_val[1][mb_x+1 + (mb_y+1)*(s->mb_width+2)];
2993 dcr= s->dc_val[2][mb_x+1 + (mb_y+1)*(s->mb_width+2)];
2997 dest_cb[x + y*(s->uvlinesize)]= dcb/8;
2998 dest_cr[x + y*(s->uvlinesize)]= dcr/8;
3004 * will conceal past errors, and allso drop b frames if needed
3007 void ff_conceal_past_errors(MpegEncContext *s, int unknown_pos)
3012 int i, intra_count=0, inter_count=0;
3013 int intra_conceal= s->msmpeg4_version ? 50 : 50; //FIXME finetune
3014 int inter_conceal= s->msmpeg4_version ? 50 : 50;
3017 if(mb_x>=s->mb_width) mb_x= s->mb_width -1;
3018 if(mb_y>=s->mb_height) mb_y= s->mb_height-1;
3020 if(s->decoding_error==0 && unknown_pos){
3021 if(s->data_partitioning && s->pict_type!=B_TYPE)
3022 s->decoding_error= DECODING_AC_LOST;
3024 s->decoding_error= DECODING_DESYNC;
3027 if(s->decoding_error==DECODING_DESYNC && s->pict_type!=B_TYPE) s->next_p_frame_damaged=1;
3029 for(i=mb_x + mb_y*s->mb_width; i>=0; i--){
3030 if(s->mbintra_table[i]) intra_count++;
3034 if(s->decoding_error==DECODING_AC_LOST){
3037 }else if(s->decoding_error==DECODING_ACDC_LOST){
3042 if(unknown_pos && (intra_count<inter_count)){
3043 intra_conceal= inter_conceal= s->mb_num;
3044 // printf("%d %d\n",intra_count, inter_count);
3047 fprintf(stderr, "concealing errors\n");
3049 /* for all MBs from the current one back until the last resync marker */
3050 for(; mb_y>=0 && mb_y>=s->resync_mb_y; mb_y--){
3051 for(; mb_x>=0; mb_x--){
3052 uint8_t *dest_y = s->current_picture[0] + (mb_y * 16* s->linesize ) + mb_x * 16;
3053 uint8_t *dest_cb = s->current_picture[1] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
3054 uint8_t *dest_cr = s->current_picture[2] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8;
3055 int mb_x_backup= s->mb_x; //FIXME pass xy to mpeg_motion
3056 int mb_y_backup= s->mb_y;
3059 if(s->mbintra_table[mb_y*s->mb_width + mb_x] && mb_dist<intra_conceal){
3060 if(s->decoding_error==DECODING_AC_LOST){
3061 remove_ac(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
3062 // printf("remove ac to %d %d\n", mb_x, mb_y);
3064 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
3065 s->last_picture, 0, 0, put_pixels_tab,
3066 0/*mx*/, 0/*my*/, 16);
3069 else if(!s->mbintra_table[mb_y*s->mb_width + mb_x] && mb_dist<inter_conceal){
3073 if(s->decoding_error!=DECODING_DESYNC){
3074 int xy= mb_x*2+1 + (mb_y*2+1)*(s->mb_width*2+2);
3075 mx= s->motion_val[ xy ][0];
3076 my= s->motion_val[ xy ][1];
3079 mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
3080 s->last_picture, 0, 0, put_pixels_tab,
3083 s->mb_x= mb_x_backup;
3084 s->mb_y= mb_y_backup;
3086 if(mb_x== s->resync_mb_x && mb_y== s->resync_mb_y) return;
3087 if(!s->mbskip_table[mb_x + mb_y*s->mb_width]) mb_dist++;
3093 AVCodec mpeg1video_encoder = {
3096 CODEC_ID_MPEG1VIDEO,
3097 sizeof(MpegEncContext),
3103 AVCodec h263_encoder = {
3107 sizeof(MpegEncContext),
3113 AVCodec h263p_encoder = {
3117 sizeof(MpegEncContext),
3123 AVCodec rv10_encoder = {
3127 sizeof(MpegEncContext),
3133 AVCodec mjpeg_encoder = {
3137 sizeof(MpegEncContext),
3143 AVCodec mpeg4_encoder = {
3147 sizeof(MpegEncContext),
3153 AVCodec msmpeg4v1_encoder = {
3157 sizeof(MpegEncContext),
3163 AVCodec msmpeg4v2_encoder = {
3167 sizeof(MpegEncContext),
3173 AVCodec msmpeg4v3_encoder = {
3177 sizeof(MpegEncContext),
3183 AVCodec wmv1_encoder = {
3187 sizeof(MpegEncContext),
3193 AVCodec wmv2_encoder = {
3197 sizeof(MpegEncContext),