2 * H263/MPEG4 backend for encoder and decoder
3 * Copyright (c) 2000,2001 Fabrice Bellard
5 * Copyright (c) 2001 Juan J. Sierralta P
6 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
33 #include "mpegvideo.h"
39 #include "mpeg4video.h"
42 uint8_t ff_h263_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3];
45 void ff_h263_update_motion_val(MpegEncContext * s){
46 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
47 //FIXME a lot of that is only needed for !low_delay
48 const int wrap = s->b8_stride;
49 const int xy = s->block_index[0];
51 s->current_picture.mbskip_table[mb_xy] = s->mb_skipped;
53 if(s->mv_type != MV_TYPE_8X8){
54 int motion_x, motion_y;
58 } else if (s->mv_type == MV_TYPE_16X16) {
59 motion_x = s->mv[0][0][0];
60 motion_y = s->mv[0][0][1];
61 } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
63 motion_x = s->mv[0][0][0] + s->mv[0][1][0];
64 motion_y = s->mv[0][0][1] + s->mv[0][1][1];
65 motion_x = (motion_x>>1) | (motion_x&1);
67 s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
68 s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
70 s->current_picture.ref_index[0][4*mb_xy ] =
71 s->current_picture.ref_index[0][4*mb_xy + 1] = s->field_select[0][0];
72 s->current_picture.ref_index[0][4*mb_xy + 2] =
73 s->current_picture.ref_index[0][4*mb_xy + 3] = s->field_select[0][1];
76 /* no update if 8X8 because it has been done during parsing */
77 s->current_picture.motion_val[0][xy][0] = motion_x;
78 s->current_picture.motion_val[0][xy][1] = motion_y;
79 s->current_picture.motion_val[0][xy + 1][0] = motion_x;
80 s->current_picture.motion_val[0][xy + 1][1] = motion_y;
81 s->current_picture.motion_val[0][xy + wrap][0] = motion_x;
82 s->current_picture.motion_val[0][xy + wrap][1] = motion_y;
83 s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x;
84 s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
87 if(s->encoding){ //FIXME encoding MUST be cleaned up
88 if (s->mv_type == MV_TYPE_8X8)
89 s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
91 s->current_picture.mb_type[mb_xy] = MB_TYPE_INTRA;
93 s->current_picture.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
97 int ff_h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
99 int x, y, wrap, a, c, pred_dc;
102 /* find prediction */
104 x = 2 * s->mb_x + (n & 1);
105 y = 2 * s->mb_y + ((n & 2) >> 1);
107 dc_val = s->dc_val[0];
112 dc_val = s->dc_val[n - 4 + 1];
117 a = dc_val[(x - 1) + (y) * wrap];
118 c = dc_val[(x) + (y - 1) * wrap];
120 /* No prediction outside GOB boundary */
121 if(s->first_slice_line && n!=3){
123 if(n!=1 && s->mb_x == s->resync_mb_x) a= 1024;
125 /* just DC prediction */
126 if (a != 1024 && c != 1024)
127 pred_dc = (a + c) >> 1;
133 /* we assume pred is positive */
134 *dc_val_ptr = &dc_val[x + y * wrap];
138 void ff_h263_loop_filter(MpegEncContext * s){
140 const int linesize = s->linesize;
141 const int uvlinesize= s->uvlinesize;
142 const int xy = s->mb_y * s->mb_stride + s->mb_x;
143 uint8_t *dest_y = s->dest[0];
144 uint8_t *dest_cb= s->dest[1];
145 uint8_t *dest_cr= s->dest[2];
147 // if(s->pict_type==AV_PICTURE_TYPE_B && !s->readable) return;
153 if (!IS_SKIP(s->current_picture.mb_type[xy])) {
155 s->dsp.h263_v_loop_filter(dest_y+8*linesize , linesize, qp_c);
156 s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c);
161 int qp_dt, qp_tt, qp_tc;
163 if (IS_SKIP(s->current_picture.mb_type[xy - s->mb_stride]))
166 qp_tt = s->current_picture.qscale_table[xy - s->mb_stride];
174 const int chroma_qp= s->chroma_qscale_table[qp_tc];
175 s->dsp.h263_v_loop_filter(dest_y , linesize, qp_tc);
176 s->dsp.h263_v_loop_filter(dest_y+8, linesize, qp_tc);
178 s->dsp.h263_v_loop_filter(dest_cb , uvlinesize, chroma_qp);
179 s->dsp.h263_v_loop_filter(dest_cr , uvlinesize, chroma_qp);
183 s->dsp.h263_h_loop_filter(dest_y-8*linesize+8 , linesize, qp_tt);
186 if (qp_tt || IS_SKIP(s->current_picture.mb_type[xy - 1 - s->mb_stride]))
189 qp_dt = s->current_picture.qscale_table[xy - 1 - s->mb_stride];
192 const int chroma_qp= s->chroma_qscale_table[qp_dt];
193 s->dsp.h263_h_loop_filter(dest_y -8*linesize , linesize, qp_dt);
194 s->dsp.h263_h_loop_filter(dest_cb-8*uvlinesize, uvlinesize, chroma_qp);
195 s->dsp.h263_h_loop_filter(dest_cr-8*uvlinesize, uvlinesize, chroma_qp);
201 s->dsp.h263_h_loop_filter(dest_y +8, linesize, qp_c);
202 if(s->mb_y + 1 == s->mb_height)
203 s->dsp.h263_h_loop_filter(dest_y+8*linesize+8, linesize, qp_c);
208 if (qp_c || IS_SKIP(s->current_picture.mb_type[xy - 1]))
211 qp_lc = s->current_picture.qscale_table[xy - 1];
214 s->dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
215 if(s->mb_y + 1 == s->mb_height){
216 const int chroma_qp= s->chroma_qscale_table[qp_lc];
217 s->dsp.h263_h_loop_filter(dest_y +8* linesize, linesize, qp_lc);
218 s->dsp.h263_h_loop_filter(dest_cb , uvlinesize, chroma_qp);
219 s->dsp.h263_h_loop_filter(dest_cr , uvlinesize, chroma_qp);
225 void ff_h263_pred_acdc(MpegEncContext * s, int16_t *block, int n)
227 int x, y, wrap, a, c, pred_dc, scale, i;
228 int16_t *dc_val, *ac_val, *ac_val1;
230 /* find prediction */
232 x = 2 * s->mb_x + (n & 1);
233 y = 2 * s->mb_y + (n>> 1);
235 dc_val = s->dc_val[0];
236 ac_val = s->ac_val[0][0];
237 scale = s->y_dc_scale;
242 dc_val = s->dc_val[n - 4 + 1];
243 ac_val = s->ac_val[n - 4 + 1][0];
244 scale = s->c_dc_scale;
247 ac_val += ((y) * wrap + (x)) * 16;
253 a = dc_val[(x - 1) + (y) * wrap];
254 c = dc_val[(x) + (y - 1) * wrap];
256 /* No prediction outside GOB boundary */
257 if(s->first_slice_line && n!=3){
259 if(n!=1 && s->mb_x == s->resync_mb_x) a= 1024;
264 if (s->h263_aic_dir) {
265 /* left prediction */
269 block[s->dsp.idct_permutation[i<<3]] += ac_val[i];
278 block[s->dsp.idct_permutation[i ]] += ac_val[i + 8];
284 /* just DC prediction */
285 if (a != 1024 && c != 1024)
286 pred_dc = (a + c) >> 1;
293 /* we assume pred is positive */
294 block[0]=block[0]*scale + pred_dc;
301 /* Update AC/DC tables */
302 dc_val[(x) + (y) * wrap] = block[0];
306 ac_val1[i ] = block[s->dsp.idct_permutation[i<<3]];
309 ac_val1[8 + i] = block[s->dsp.idct_permutation[i ]];
312 int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
316 int16_t *A, *B, *C, (*mot_val)[2];
317 static const int off[4]= {2, 1, 1, -1};
320 mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
323 /* special case for first (slice) line */
324 if (s->first_slice_line && block<3) {
325 // we can't just change some MVs to simulate that as we need them for the B frames (and ME)
326 // and if we ever support non rectangular objects than we need to do a few ifs here anyway :(
327 if(block==0){ //most common case
328 if(s->mb_x == s->resync_mb_x){ //rare
330 }else if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
331 C = mot_val[off[block] - wrap];
336 *px = mid_pred(A[0], 0, C[0]);
337 *py = mid_pred(A[1], 0, C[1]);
344 if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
345 C = mot_val[off[block] - wrap];
346 *px = mid_pred(A[0], 0, C[0]);
347 *py = mid_pred(A[1], 0, C[1]);
353 B = mot_val[ - wrap];
354 C = mot_val[off[block] - wrap];
355 if(s->mb_x == s->resync_mb_x) //rare
358 *px = mid_pred(A[0], B[0], C[0]);
359 *py = mid_pred(A[1], B[1], C[1]);
362 B = mot_val[ - wrap];
363 C = mot_val[off[block] - wrap];
364 *px = mid_pred(A[0], B[0], C[0]);
365 *py = mid_pred(A[1], B[1], C[1]);
372 * Get the GOB height based on picture height.
374 int ff_h263_get_gob_height(MpegEncContext *s){
375 if (s->height <= 400)
377 else if (s->height <= 800)