2 * VP8 compatible video decoder
4 * Copyright (C) 2010 David Conrad
5 * Copyright (C) 2010 Ronald S. Bultje
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 // TODO: Maybe add dequant
28 static void vp8_luma_dc_wht_c(DCTELEM block[4][4][16], DCTELEM dc[16])
30 int i, t0, t1, t2, t3;
32 for (i = 0; i < 4; i++) {
33 t0 = dc[0*4+i] + dc[3*4+i];
34 t1 = dc[1*4+i] + dc[2*4+i];
35 t2 = dc[1*4+i] - dc[2*4+i];
36 t3 = dc[0*4+i] - dc[3*4+i];
44 for (i = 0; i < 4; i++) {
45 t0 = dc[i*4+0] + dc[i*4+3] + 3; // rounding
46 t1 = dc[i*4+1] + dc[i*4+2];
47 t2 = dc[i*4+1] - dc[i*4+2];
48 t3 = dc[i*4+0] - dc[i*4+3] + 3; // rounding
50 *block[i][0] = (t0 + t1) >> 3;
51 *block[i][1] = (t3 + t2) >> 3;
52 *block[i][2] = (t0 - t1) >> 3;
53 *block[i][3] = (t3 - t2) >> 3;
58 #define MUL_20091(a) ((((a)*20091) >> 16) + (a))
59 #define MUL_35468(a) (((a)*35468) >> 16)
61 static void vp8_idct_add_c(uint8_t *dst, DCTELEM block[16], int stride)
63 int i, t0, t1, t2, t3;
64 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
67 for (i = 0; i < 4; i++) {
68 t0 = block[0*4+i] + block[2*4+i];
69 t1 = block[0*4+i] - block[2*4+i];
70 t2 = MUL_35468(block[1*4+i]) - MUL_20091(block[3*4+i]);
71 t3 = MUL_20091(block[1*4+i]) + MUL_35468(block[3*4+i]);
83 for (i = 0; i < 4; i++) {
84 t0 = tmp[0*4+i] + tmp[2*4+i];
85 t1 = tmp[0*4+i] - tmp[2*4+i];
86 t2 = MUL_35468(tmp[1*4+i]) - MUL_20091(tmp[3*4+i]);
87 t3 = MUL_20091(tmp[1*4+i]) + MUL_35468(tmp[3*4+i]);
89 dst[0] = cm[dst[0] + ((t0 + t3 + 4) >> 3)];
90 dst[1] = cm[dst[1] + ((t1 + t2 + 4) >> 3)];
91 dst[2] = cm[dst[2] + ((t1 - t2 + 4) >> 3)];
92 dst[3] = cm[dst[3] + ((t0 - t3 + 4) >> 3)];
97 static void vp8_idct_dc_add_c(uint8_t *dst, DCTELEM block[16], int stride)
99 int i, dc = (block[0] + 4) >> 3;
100 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP + dc;
103 for (i = 0; i < 4; i++) {
112 static void vp8_idct_dc_add4_c(uint8_t *dst, DCTELEM block[4][16], int stride)
115 for (j = 0; j < 4; j++) {
116 uint8_t *pix = dst+j*4;
117 int dc = (block[j][0] + 4) >> 3;
118 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP + dc;
122 for (i = 0; i < 4; i++) {
132 // because I like only having two parameters to pass functions...
134 int av_unused p3 = p[-4*stride];\
135 int av_unused p2 = p[-3*stride];\
136 int av_unused p1 = p[-2*stride];\
137 int av_unused p0 = p[-1*stride];\
138 int av_unused q0 = p[ 0*stride];\
139 int av_unused q1 = p[ 1*stride];\
140 int av_unused q2 = p[ 2*stride];\
141 int av_unused q3 = p[ 3*stride];
143 #define clip_int8(n) (cm[n+0x80]-0x80)
145 static av_always_inline void filter_common(uint8_t *p, int stride, int is4tap)
149 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
154 a += clip_int8(p1 - q1);
158 // We deviate from the spec here with c(a+3) >> 3
159 // since that's what libvpx does.
160 f1 = FFMIN(a+4, 127) >> 3;
161 f2 = FFMIN(a+3, 127) >> 3;
163 // Despite what the spec says, we do need to clamp here to
164 // be bitexact with libvpx.
165 p[-1*stride] = cm[p0 + f2];
166 p[ 0*stride] = cm[q0 - f1];
168 // only used for _inner on blocks without high edge variance
171 p[-2*stride] = cm[p1 + a];
172 p[ 1*stride] = cm[q1 - a];
176 static av_always_inline int simple_limit(uint8_t *p, int stride, int flim)
179 return 2*FFABS(p0-q0) + (FFABS(p1-q1) >> 1) <= flim;
183 * E - limit at the macroblock edge
184 * I - limit for interior difference
186 static av_always_inline int normal_limit(uint8_t *p, int stride, int E, int I)
189 return simple_limit(p, stride, E)
190 && FFABS(p3-p2) <= I && FFABS(p2-p1) <= I && FFABS(p1-p0) <= I
191 && FFABS(q3-q2) <= I && FFABS(q2-q1) <= I && FFABS(q1-q0) <= I;
194 // high edge variance
195 static av_always_inline int hev(uint8_t *p, int stride, int thresh)
198 return FFABS(p1-p0) > thresh || FFABS(q1-q0) > thresh;
201 static av_always_inline void filter_mbedge(uint8_t *p, int stride)
204 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
208 w = clip_int8(p1-q1);
209 w = clip_int8(w + 3*(q0-p0));
211 a0 = (27*w + 63) >> 7;
212 a1 = (18*w + 63) >> 7;
213 a2 = ( 9*w + 63) >> 7;
215 p[-3*stride] = cm[p2 + a2];
216 p[-2*stride] = cm[p1 + a1];
217 p[-1*stride] = cm[p0 + a0];
218 p[ 0*stride] = cm[q0 - a0];
219 p[ 1*stride] = cm[q1 - a1];
220 p[ 2*stride] = cm[q2 - a2];
223 #define LOOP_FILTER(dir, size, stridea, strideb, maybe_inline) \
224 static maybe_inline void vp8_ ## dir ## _loop_filter ## size ## _c(uint8_t *dst, int stride,\
225 int flim_E, int flim_I, int hev_thresh)\
229 for (i = 0; i < size; i++)\
230 if (normal_limit(dst+i*stridea, strideb, flim_E, flim_I)) {\
231 if (hev(dst+i*stridea, strideb, hev_thresh))\
232 filter_common(dst+i*stridea, strideb, 1);\
234 filter_mbedge(dst+i*stridea, strideb);\
238 static maybe_inline void vp8_ ## dir ## _loop_filter ## size ## _inner_c(uint8_t *dst, int stride,\
239 int flim_E, int flim_I, int hev_thresh)\
243 for (i = 0; i < size; i++)\
244 if (normal_limit(dst+i*stridea, strideb, flim_E, flim_I)) {\
245 int hv = hev(dst+i*stridea, strideb, hev_thresh);\
247 filter_common(dst+i*stridea, strideb, 1);\
249 filter_common(dst+i*stridea, strideb, 0);\
253 LOOP_FILTER(v, 16, 1, stride,)
254 LOOP_FILTER(h, 16, stride, 1,)
256 #define UV_LOOP_FILTER(dir, stridea, strideb) \
257 LOOP_FILTER(dir, 8, stridea, strideb, av_always_inline) \
258 static void vp8_ ## dir ## _loop_filter8uv_c(uint8_t *dstU, uint8_t *dstV, int stride,\
259 int fE, int fI, int hev_thresh)\
261 vp8_ ## dir ## _loop_filter8_c(dstU, stride, fE, fI, hev_thresh);\
262 vp8_ ## dir ## _loop_filter8_c(dstV, stride, fE, fI, hev_thresh);\
264 static void vp8_ ## dir ## _loop_filter8uv_inner_c(uint8_t *dstU, uint8_t *dstV, int stride,\
265 int fE, int fI, int hev_thresh)\
267 vp8_ ## dir ## _loop_filter8_inner_c(dstU, stride, fE, fI, hev_thresh);\
268 vp8_ ## dir ## _loop_filter8_inner_c(dstV, stride, fE, fI, hev_thresh);\
271 UV_LOOP_FILTER(v, 1, stride)
272 UV_LOOP_FILTER(h, stride, 1)
274 static void vp8_v_loop_filter_simple_c(uint8_t *dst, int stride, int flim)
278 for (i = 0; i < 16; i++)
279 if (simple_limit(dst+i, stride, flim))
280 filter_common(dst+i, stride, 1);
283 static void vp8_h_loop_filter_simple_c(uint8_t *dst, int stride, int flim)
287 for (i = 0; i < 16; i++)
288 if (simple_limit(dst+i*stride, 1, flim))
289 filter_common(dst+i*stride, 1, 1);
292 static const uint8_t subpel_filters[7][6] = {
293 { 0, 6, 123, 12, 1, 0 },
294 { 2, 11, 108, 36, 8, 1 },
295 { 0, 9, 93, 50, 6, 0 },
296 { 3, 16, 77, 77, 16, 3 },
297 { 0, 6, 50, 93, 9, 0 },
298 { 1, 8, 36, 108, 11, 2 },
299 { 0, 1, 12, 123, 6, 0 },
302 #define PUT_PIXELS(WIDTH) \
303 static void put_vp8_pixels ## WIDTH ##_c(uint8_t *dst, int dststride, uint8_t *src, int srcstride, int h, int x, int y) { \
305 for (i = 0; i < h; i++, dst+= dststride, src+= srcstride) { \
306 memcpy(dst, src, WIDTH); \
314 #define FILTER_6TAP(src, F, stride) \
315 cm[(F[2]*src[x+0*stride] - F[1]*src[x-1*stride] + F[0]*src[x-2*stride] + \
316 F[3]*src[x+1*stride] - F[4]*src[x+2*stride] + F[5]*src[x+3*stride] + 64) >> 7]
318 #define FILTER_4TAP(src, F, stride) \
319 cm[(F[2]*src[x+0*stride] - F[1]*src[x-1*stride] + \
320 F[3]*src[x+1*stride] - F[4]*src[x+2*stride] + 64) >> 7]
322 #define VP8_EPEL_H(SIZE, FILTER, FILTERNAME) \
323 static void put_vp8_epel ## SIZE ## _ ## FILTERNAME ## _c(uint8_t *dst, int dststride, uint8_t *src, int srcstride, int h, int mx, int my) \
325 const uint8_t *filter = subpel_filters[mx-1]; \
326 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \
329 for (y = 0; y < h; y++) { \
330 for (x = 0; x < SIZE; x++) \
331 dst[x] = FILTER(src, filter, 1); \
336 #define VP8_EPEL_V(SIZE, FILTER, FILTERNAME) \
337 static void put_vp8_epel ## SIZE ## _ ## FILTERNAME ## _c(uint8_t *dst, int dststride, uint8_t *src, int srcstride, int h, int mx, int my) \
339 const uint8_t *filter = subpel_filters[my-1]; \
340 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \
343 for (y = 0; y < h; y++) { \
344 for (x = 0; x < SIZE; x++) \
345 dst[x] = FILTER(src, filter, srcstride); \
350 #define VP8_EPEL_HV(SIZE, FILTERX, FILTERY, FILTERNAME) \
351 static void put_vp8_epel ## SIZE ## _ ## FILTERNAME ## _c(uint8_t *dst, int dststride, uint8_t *src, int srcstride, int h, int mx, int my) \
353 const uint8_t *filter = subpel_filters[mx-1]; \
354 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \
356 uint8_t tmp_array[(2*SIZE+5)*SIZE]; \
357 uint8_t *tmp = tmp_array; \
358 src -= 2*srcstride; \
360 for (y = 0; y < h+5; y++) { \
361 for (x = 0; x < SIZE; x++) \
362 tmp[x] = FILTERX(src, filter, 1); \
367 tmp = tmp_array + 2*SIZE; \
368 filter = subpel_filters[my-1]; \
370 for (y = 0; y < h; y++) { \
371 for (x = 0; x < SIZE; x++) \
372 dst[x] = FILTERY(tmp, filter, SIZE); \
378 VP8_EPEL_H(16, FILTER_4TAP, h4)
379 VP8_EPEL_H(8, FILTER_4TAP, h4)
380 VP8_EPEL_H(4, FILTER_4TAP, h4)
381 VP8_EPEL_H(16, FILTER_6TAP, h6)
382 VP8_EPEL_H(8, FILTER_6TAP, h6)
383 VP8_EPEL_H(4, FILTER_6TAP, h6)
384 VP8_EPEL_V(16, FILTER_4TAP, v4)
385 VP8_EPEL_V(8, FILTER_4TAP, v4)
386 VP8_EPEL_V(4, FILTER_4TAP, v4)
387 VP8_EPEL_V(16, FILTER_6TAP, v6)
388 VP8_EPEL_V(8, FILTER_6TAP, v6)
389 VP8_EPEL_V(4, FILTER_6TAP, v6)
390 VP8_EPEL_HV(16, FILTER_4TAP, FILTER_4TAP, h4v4)
391 VP8_EPEL_HV(8, FILTER_4TAP, FILTER_4TAP, h4v4)
392 VP8_EPEL_HV(4, FILTER_4TAP, FILTER_4TAP, h4v4)
393 VP8_EPEL_HV(16, FILTER_4TAP, FILTER_6TAP, h4v6)
394 VP8_EPEL_HV(8, FILTER_4TAP, FILTER_6TAP, h4v6)
395 VP8_EPEL_HV(4, FILTER_4TAP, FILTER_6TAP, h4v6)
396 VP8_EPEL_HV(16, FILTER_6TAP, FILTER_4TAP, h6v4)
397 VP8_EPEL_HV(8, FILTER_6TAP, FILTER_4TAP, h6v4)
398 VP8_EPEL_HV(4, FILTER_6TAP, FILTER_4TAP, h6v4)
399 VP8_EPEL_HV(16, FILTER_6TAP, FILTER_6TAP, h6v6)
400 VP8_EPEL_HV(8, FILTER_6TAP, FILTER_6TAP, h6v6)
401 VP8_EPEL_HV(4, FILTER_6TAP, FILTER_6TAP, h6v6)
403 #define VP8_BILINEAR(SIZE) \
404 static void put_vp8_bilinear ## SIZE ## _h_c(uint8_t *dst, int stride, uint8_t *src, int s2, int h, int mx, int my) \
406 int a = 8-mx, b = mx; \
409 for (y = 0; y < h; y++) { \
410 for (x = 0; x < SIZE; x++) \
411 dst[x] = (a*src[x] + b*src[x+1] + 4) >> 3; \
416 static void put_vp8_bilinear ## SIZE ## _v_c(uint8_t *dst, int stride, uint8_t *src, int s2, int h, int mx, int my) \
418 int c = 8-my, d = my; \
421 for (y = 0; y < h; y++) { \
422 for (x = 0; x < SIZE; x++) \
423 dst[x] = (c*src[x] + d*src[x+stride] + 4) >> 3; \
429 static void put_vp8_bilinear ## SIZE ## _hv_c(uint8_t *dst, int stride, uint8_t *src, int s2, int h, int mx, int my) \
431 int a = 8-mx, b = mx; \
432 int c = 8-my, d = my; \
434 uint8_t tmp_array[(2*SIZE+1)*SIZE]; \
435 uint8_t *tmp = tmp_array; \
437 for (y = 0; y < h+1; y++) { \
438 for (x = 0; x < SIZE; x++) \
439 tmp[x] = (a*src[x] + b*src[x+1] + 4) >> 3; \
446 for (y = 0; y < h; y++) { \
447 for (x = 0; x < SIZE; x++) \
448 dst[x] = (c*tmp[x] + d*tmp[x+SIZE] + 4) >> 3; \
458 #define VP8_MC_FUNC(IDX, SIZE) \
459 dsp->put_vp8_epel_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \
460 dsp->put_vp8_epel_pixels_tab[IDX][0][1] = put_vp8_epel ## SIZE ## _h4_c; \
461 dsp->put_vp8_epel_pixels_tab[IDX][0][2] = put_vp8_epel ## SIZE ## _h6_c; \
462 dsp->put_vp8_epel_pixels_tab[IDX][1][0] = put_vp8_epel ## SIZE ## _v4_c; \
463 dsp->put_vp8_epel_pixels_tab[IDX][1][1] = put_vp8_epel ## SIZE ## _h4v4_c; \
464 dsp->put_vp8_epel_pixels_tab[IDX][1][2] = put_vp8_epel ## SIZE ## _h6v4_c; \
465 dsp->put_vp8_epel_pixels_tab[IDX][2][0] = put_vp8_epel ## SIZE ## _v6_c; \
466 dsp->put_vp8_epel_pixels_tab[IDX][2][1] = put_vp8_epel ## SIZE ## _h4v6_c; \
467 dsp->put_vp8_epel_pixels_tab[IDX][2][2] = put_vp8_epel ## SIZE ## _h6v6_c
469 #define VP8_BILINEAR_MC_FUNC(IDX, SIZE) \
470 dsp->put_vp8_bilinear_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \
471 dsp->put_vp8_bilinear_pixels_tab[IDX][0][1] = put_vp8_bilinear ## SIZE ## _h_c; \
472 dsp->put_vp8_bilinear_pixels_tab[IDX][0][2] = put_vp8_bilinear ## SIZE ## _h_c; \
473 dsp->put_vp8_bilinear_pixels_tab[IDX][1][0] = put_vp8_bilinear ## SIZE ## _v_c; \
474 dsp->put_vp8_bilinear_pixels_tab[IDX][1][1] = put_vp8_bilinear ## SIZE ## _hv_c; \
475 dsp->put_vp8_bilinear_pixels_tab[IDX][1][2] = put_vp8_bilinear ## SIZE ## _hv_c; \
476 dsp->put_vp8_bilinear_pixels_tab[IDX][2][0] = put_vp8_bilinear ## SIZE ## _v_c; \
477 dsp->put_vp8_bilinear_pixels_tab[IDX][2][1] = put_vp8_bilinear ## SIZE ## _hv_c; \
478 dsp->put_vp8_bilinear_pixels_tab[IDX][2][2] = put_vp8_bilinear ## SIZE ## _hv_c
480 av_cold void ff_vp8dsp_init(VP8DSPContext *dsp)
482 dsp->vp8_luma_dc_wht = vp8_luma_dc_wht_c;
483 dsp->vp8_idct_add = vp8_idct_add_c;
484 dsp->vp8_idct_dc_add = vp8_idct_dc_add_c;
485 dsp->vp8_idct_dc_add4 = vp8_idct_dc_add4_c;
487 dsp->vp8_v_loop_filter16y = vp8_v_loop_filter16_c;
488 dsp->vp8_h_loop_filter16y = vp8_h_loop_filter16_c;
489 dsp->vp8_v_loop_filter8uv = vp8_v_loop_filter8uv_c;
490 dsp->vp8_h_loop_filter8uv = vp8_h_loop_filter8uv_c;
492 dsp->vp8_v_loop_filter16y_inner = vp8_v_loop_filter16_inner_c;
493 dsp->vp8_h_loop_filter16y_inner = vp8_h_loop_filter16_inner_c;
494 dsp->vp8_v_loop_filter8uv_inner = vp8_v_loop_filter8uv_inner_c;
495 dsp->vp8_h_loop_filter8uv_inner = vp8_h_loop_filter8uv_inner_c;
497 dsp->vp8_v_loop_filter_simple = vp8_v_loop_filter_simple_c;
498 dsp->vp8_h_loop_filter_simple = vp8_h_loop_filter_simple_c;
504 VP8_BILINEAR_MC_FUNC(0, 16);
505 VP8_BILINEAR_MC_FUNC(1, 8);
506 VP8_BILINEAR_MC_FUNC(2, 4);
509 ff_vp8dsp_init_x86(dsp);
511 ff_vp8dsp_init_altivec(dsp);