2 * Copyright (C) 2010 David Conrad
3 * Copyright (C) 2010 Ronald S. Bultje
4 * Copyright (C) 2014 Peter Ross
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * VP8 compatible video decoder
30 #include "libavutil/common.h"
32 #define MK_IDCT_DC_ADD4_C(name) \
33 static void name ## _idct_dc_add4uv_c(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)\
35 name ## _idct_dc_add_c(dst+stride*0+0, block[0], stride);\
36 name ## _idct_dc_add_c(dst+stride*0+4, block[1], stride);\
37 name ## _idct_dc_add_c(dst+stride*4+0, block[2], stride);\
38 name ## _idct_dc_add_c(dst+stride*4+4, block[3], stride);\
41 static void name ## _idct_dc_add4y_c(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)\
43 name ## _idct_dc_add_c(dst+ 0, block[0], stride);\
44 name ## _idct_dc_add_c(dst+ 4, block[1], stride);\
45 name ## _idct_dc_add_c(dst+ 8, block[2], stride);\
46 name ## _idct_dc_add_c(dst+12, block[3], stride);\
49 #if CONFIG_VP7_DECODER
50 static void vp7_luma_dc_wht_c(int16_t block[4][4][16], int16_t dc[16])
52 int i, a1, b1, c1, d1;
55 for (i = 0; i < 4; i++) {
56 a1 = (dc[i*4+0] + dc[i*4+2]) * 23170;
57 b1 = (dc[i*4+0] - dc[i*4+2]) * 23170;
58 c1 = dc[i*4+1] * 12540 - dc[i*4+3] * 30274;
59 d1 = dc[i*4+1] * 30274 + dc[i*4+3] * 12540;
60 tmp[i*4+0] = (a1 + d1) >> 14;
61 tmp[i*4+3] = (a1 - d1) >> 14;
62 tmp[i*4+1] = (b1 + c1) >> 14;
63 tmp[i*4+2] = (b1 - c1) >> 14;
66 for (i = 0; i < 4; i++) {
67 a1 = (tmp[i + 0] + tmp[i + 8]) * 23170;
68 b1 = (tmp[i + 0] - tmp[i + 8]) * 23170;
69 c1 = tmp[i + 4] * 12540 - tmp[i + 12] * 30274;
70 d1 = tmp[i + 4] * 30274 + tmp[i + 12] * 12540;
75 block[0][i][0] = (a1 + d1 + 0x20000) >> 18;
76 block[3][i][0] = (a1 - d1 + 0x20000) >> 18;
77 block[1][i][0] = (b1 + c1 + 0x20000) >> 18;
78 block[2][i][0] = (b1 - c1 + 0x20000) >> 18;
82 static void vp7_luma_dc_wht_dc_c(int16_t block[4][4][16], int16_t dc[16])
84 int i, val = (23170 * (23170 * dc[0] >> 14) + 0x20000) >> 18;
87 for (i = 0; i < 4; i++) {
95 static void vp7_idct_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
97 int i, a1, b1, c1, d1;
100 for (i = 0; i < 4; i++) {
101 a1 = (block[i*4+0] + block[i*4+2]) * 23170;
102 b1 = (block[i*4+0] - block[i*4+2]) * 23170;
103 c1 = block[i*4+1] * 12540 - block[i*4+3] * 30274;
104 d1 = block[i*4+1] * 30274 + block[i*4+3] * 12540;
109 tmp[i*4+0] = (a1 + d1) >> 14;
110 tmp[i*4+3] = (a1 - d1) >> 14;
111 tmp[i*4+1] = (b1 + c1) >> 14;
112 tmp[i*4+2] = (b1 - c1) >> 14;
115 for (i = 0; i < 4; i++) {
116 a1 = (tmp[i + 0] + tmp[i + 8]) * 23170;
117 b1 = (tmp[i + 0] - tmp[i + 8]) * 23170;
118 c1 = tmp[i + 4] * 12540 - tmp[i + 12] * 30274;
119 d1 = tmp[i + 4] * 30274 + tmp[i + 12] * 12540;
120 dst[0*stride+i] = av_clip_uint8(dst[0*stride+i] + ((a1 + d1 + 0x20000) >> 18));
121 dst[3*stride+i] = av_clip_uint8(dst[3*stride+i] + ((a1 - d1 + 0x20000) >> 18));
122 dst[1*stride+i] = av_clip_uint8(dst[1*stride+i] + ((b1 + c1 + 0x20000) >> 18));
123 dst[2*stride+i] = av_clip_uint8(dst[2*stride+i] + ((b1 - c1 + 0x20000) >> 18));
127 static void vp7_idct_dc_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
129 int i, dc = (23170 * (23170 * block[0] >> 14) + 0x20000) >> 18;
132 for (i = 0; i < 4; i++) {
133 dst[0] = av_clip_uint8(dst[0] + dc);
134 dst[1] = av_clip_uint8(dst[1] + dc);
135 dst[2] = av_clip_uint8(dst[2] + dc);
136 dst[3] = av_clip_uint8(dst[3] + dc);
141 MK_IDCT_DC_ADD4_C(vp7)
144 // TODO: Maybe add dequant
145 #if CONFIG_VP8_DECODER
146 static void vp8_luma_dc_wht_c(int16_t block[4][4][16], int16_t dc[16])
148 int i, t0, t1, t2, t3;
150 for (i = 0; i < 4; i++) {
151 t0 = dc[0*4+i] + dc[3*4+i];
152 t1 = dc[1*4+i] + dc[2*4+i];
153 t2 = dc[1*4+i] - dc[2*4+i];
154 t3 = dc[0*4+i] - dc[3*4+i];
162 for (i = 0; i < 4; i++) {
163 t0 = dc[i*4+0] + dc[i*4+3] + 3; // rounding
164 t1 = dc[i*4+1] + dc[i*4+2];
165 t2 = dc[i*4+1] - dc[i*4+2];
166 t3 = dc[i*4+0] - dc[i*4+3] + 3; // rounding
172 block[i][0][0] = (t0 + t1) >> 3;
173 block[i][1][0] = (t3 + t2) >> 3;
174 block[i][2][0] = (t0 - t1) >> 3;
175 block[i][3][0] = (t3 - t2) >> 3;
179 static void vp8_luma_dc_wht_dc_c(int16_t block[4][4][16], int16_t dc[16])
181 int i, val = (dc[0] + 3) >> 3;
184 for (i = 0; i < 4; i++) {
185 block[i][0][0] = val;
186 block[i][1][0] = val;
187 block[i][2][0] = val;
188 block[i][3][0] = val;
192 #define MUL_20091(a) ((((a)*20091) >> 16) + (a))
193 #define MUL_35468(a) (((a)*35468) >> 16)
195 static void vp8_idct_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
197 int i, t0, t1, t2, t3;
200 for (i = 0; i < 4; i++) {
201 t0 = block[0*4+i] + block[2*4+i];
202 t1 = block[0*4+i] - block[2*4+i];
203 t2 = MUL_35468(block[1*4+i]) - MUL_20091(block[3*4+i]);
204 t3 = MUL_20091(block[1*4+i]) + MUL_35468(block[3*4+i]);
210 tmp[i*4+0] = t0 + t3;
211 tmp[i*4+1] = t1 + t2;
212 tmp[i*4+2] = t1 - t2;
213 tmp[i*4+3] = t0 - t3;
216 for (i = 0; i < 4; i++) {
217 t0 = tmp[0*4+i] + tmp[2*4+i];
218 t1 = tmp[0*4+i] - tmp[2*4+i];
219 t2 = MUL_35468(tmp[1*4+i]) - MUL_20091(tmp[3*4+i]);
220 t3 = MUL_20091(tmp[1*4+i]) + MUL_35468(tmp[3*4+i]);
222 dst[0] = av_clip_uint8(dst[0] + ((t0 + t3 + 4) >> 3));
223 dst[1] = av_clip_uint8(dst[1] + ((t1 + t2 + 4) >> 3));
224 dst[2] = av_clip_uint8(dst[2] + ((t1 - t2 + 4) >> 3));
225 dst[3] = av_clip_uint8(dst[3] + ((t0 - t3 + 4) >> 3));
230 static void vp8_idct_dc_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
232 int i, dc = (block[0] + 4) >> 3;
235 for (i = 0; i < 4; i++) {
236 dst[0] = av_clip_uint8(dst[0] + dc);
237 dst[1] = av_clip_uint8(dst[1] + dc);
238 dst[2] = av_clip_uint8(dst[2] + dc);
239 dst[3] = av_clip_uint8(dst[3] + dc);
244 MK_IDCT_DC_ADD4_C(vp8)
247 // because I like only having two parameters to pass functions...
249 int av_unused p3 = p[-4*stride];\
250 int av_unused p2 = p[-3*stride];\
251 int av_unused p1 = p[-2*stride];\
252 int av_unused p0 = p[-1*stride];\
253 int av_unused q0 = p[ 0*stride];\
254 int av_unused q1 = p[ 1*stride];\
255 int av_unused q2 = p[ 2*stride];\
256 int av_unused q3 = p[ 3*stride];
258 #define clip_int8(n) (cm[n+0x80]-0x80)
260 static av_always_inline void filter_common(uint8_t *p, ptrdiff_t stride, int is4tap, int vpn)
264 const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
269 a += clip_int8(p1 - q1);
273 // We deviate from the spec here with c(a+3) >> 3
274 // since that's what libvpx does.
275 f1 = FFMIN(a+4, 127) >> 3;
278 f2 = f1 - ((a & 7) == 4);
280 f2 = FFMIN(a+3, 127) >> 3;
282 // Despite what the spec says, we do need to clamp here to
283 // be bitexact with libvpx.
284 p[-1*stride] = cm[p0 + f2];
285 p[ 0*stride] = cm[q0 - f1];
287 // only used for _inner on blocks without high edge variance
290 p[-2*stride] = cm[p1 + a];
291 p[ 1*stride] = cm[q1 - a];
295 static av_always_inline int vp7_simple_limit(uint8_t *p, ptrdiff_t stride, int flim)
298 return FFABS(p0-q0) <= flim;
301 static av_always_inline int vp8_simple_limit(uint8_t *p, ptrdiff_t stride, int flim)
304 return 2*FFABS(p0-q0) + (FFABS(p1-q1) >> 1) <= flim;
308 * E - limit at the macroblock edge
309 * I - limit for interior difference
311 #define NORMAL_LIMIT(vpn) \
312 static av_always_inline int vp ## vpn ## _normal_limit(uint8_t *p, ptrdiff_t stride, int E, int I)\
315 return vp ## vpn ## _simple_limit(p, stride, E)\
316 && FFABS(p3-p2) <= I && FFABS(p2-p1) <= I && FFABS(p1-p0) <= I\
317 && FFABS(q3-q2) <= I && FFABS(q2-q1) <= I && FFABS(q1-q0) <= I;\
323 // high edge variance
324 static av_always_inline int hev(uint8_t *p, ptrdiff_t stride, int thresh)
327 return FFABS(p1-p0) > thresh || FFABS(q1-q0) > thresh;
330 static av_always_inline void filter_mbedge(uint8_t *p, ptrdiff_t stride)
333 const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
337 w = clip_int8(p1-q1);
338 w = clip_int8(w + 3*(q0-p0));
340 a0 = (27*w + 63) >> 7;
341 a1 = (18*w + 63) >> 7;
342 a2 = ( 9*w + 63) >> 7;
344 p[-3*stride] = cm[p2 + a2];
345 p[-2*stride] = cm[p1 + a1];
346 p[-1*stride] = cm[p0 + a0];
347 p[ 0*stride] = cm[q0 - a0];
348 p[ 1*stride] = cm[q1 - a1];
349 p[ 2*stride] = cm[q2 - a2];
352 #define LOOP_FILTER(vpn, dir, size, stridea, strideb, maybe_inline) \
353 static maybe_inline void vp ## vpn ## _ ## dir ## _loop_filter ## size ## _c(uint8_t *dst, ptrdiff_t stride,\
354 int flim_E, int flim_I, int hev_thresh)\
358 for (i = 0; i < size; i++)\
359 if (vp ## vpn ## _normal_limit(dst+i*stridea, strideb, flim_E, flim_I)) {\
360 if (hev(dst+i*stridea, strideb, hev_thresh))\
361 filter_common(dst+i*stridea, strideb, 1, vpn);\
363 filter_mbedge(dst+i*stridea, strideb);\
367 static maybe_inline void vp ## vpn ## _ ## dir ## _loop_filter ## size ## _inner_c(uint8_t *dst, ptrdiff_t stride,\
368 int flim_E, int flim_I, int hev_thresh)\
372 for (i = 0; i < size; i++)\
373 if (vp ## vpn ## _normal_limit(dst+i*stridea, strideb, flim_E, flim_I)) {\
374 int hv = hev(dst+i*stridea, strideb, hev_thresh);\
376 filter_common(dst+i*stridea, strideb, 1, vpn);\
378 filter_common(dst+i*stridea, strideb, 0, vpn);\
382 #define UV_LOOP_FILTER(vpn, dir, stridea, strideb) \
383 LOOP_FILTER(vpn, dir, 8, stridea, strideb, av_always_inline) \
384 static void vp ## vpn ## _ ## dir ## _loop_filter8uv_c(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,\
385 int fE, int fI, int hev_thresh)\
387 vp ## vpn ## _ ## dir ## _loop_filter8_c(dstU, stride, fE, fI, hev_thresh);\
388 vp ## vpn ## _ ## dir ## _loop_filter8_c(dstV, stride, fE, fI, hev_thresh);\
390 static void vp ## vpn ## _ ## dir ## _loop_filter8uv_inner_c(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride,\
391 int fE, int fI, int hev_thresh)\
393 vp ## vpn ## _ ## dir ## _loop_filter8_inner_c(dstU, stride, fE, fI, hev_thresh);\
394 vp ## vpn ## _ ## dir ## _loop_filter8_inner_c(dstV, stride, fE, fI, hev_thresh);\
397 #define LOOP_FILTER_SIMPLE(vpn) \
398 static void vp ## vpn ## _v_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, int flim)\
402 for (i = 0; i < 16; i++)\
403 if (vp ## vpn ## _simple_limit(dst+i, stride, flim))\
404 filter_common(dst+i, stride, 1, vpn);\
407 static void vp ## vpn ## _h_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, int flim)\
411 for (i = 0; i < 16; i++)\
412 if (vp ## vpn ## _simple_limit(dst+i*stride, 1, flim))\
413 filter_common(dst+i*stride, 1, 1, vpn);\
416 #if CONFIG_VP7_DECODER
417 LOOP_FILTER(7, v, 16, 1, stride,)
418 LOOP_FILTER(7, h, 16, stride, 1,)
419 UV_LOOP_FILTER(7, v, 1, stride)
420 UV_LOOP_FILTER(7, h, stride, 1)
421 LOOP_FILTER_SIMPLE(7)
424 #if CONFIG_VP8_DECODER
425 LOOP_FILTER(8, v, 16, 1, stride,)
426 LOOP_FILTER(8, h, 16, stride, 1,)
427 UV_LOOP_FILTER(8, v, 1, stride)
428 UV_LOOP_FILTER(8, h, stride, 1)
429 LOOP_FILTER_SIMPLE(8)
432 static const uint8_t subpel_filters[7][6] = {
433 { 0, 6, 123, 12, 1, 0 },
434 { 2, 11, 108, 36, 8, 1 },
435 { 0, 9, 93, 50, 6, 0 },
436 { 3, 16, 77, 77, 16, 3 },
437 { 0, 6, 50, 93, 9, 0 },
438 { 1, 8, 36, 108, 11, 2 },
439 { 0, 1, 12, 123, 6, 0 },
442 #define PUT_PIXELS(WIDTH) \
443 static void put_vp8_pixels ## WIDTH ##_c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int x, int y) { \
445 for (i = 0; i < h; i++, dst+= dststride, src+= srcstride) { \
446 memcpy(dst, src, WIDTH); \
454 #define FILTER_6TAP(src, F, stride) \
455 cm[(F[2]*src[x+0*stride] - F[1]*src[x-1*stride] + F[0]*src[x-2*stride] + \
456 F[3]*src[x+1*stride] - F[4]*src[x+2*stride] + F[5]*src[x+3*stride] + 64) >> 7]
458 #define FILTER_4TAP(src, F, stride) \
459 cm[(F[2]*src[x+0*stride] - F[1]*src[x-1*stride] + \
460 F[3]*src[x+1*stride] - F[4]*src[x+2*stride] + 64) >> 7]
462 #define VP8_EPEL_H(SIZE, TAPS) \
463 static void put_vp8_epel ## SIZE ## _h ## TAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
465 const uint8_t *filter = subpel_filters[mx-1]; \
466 const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \
469 for (y = 0; y < h; y++) { \
470 for (x = 0; x < SIZE; x++) \
471 dst[x] = FILTER_ ## TAPS ## TAP(src, filter, 1); \
476 #define VP8_EPEL_V(SIZE, TAPS) \
477 static void put_vp8_epel ## SIZE ## _v ## TAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
479 const uint8_t *filter = subpel_filters[my-1]; \
480 const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \
483 for (y = 0; y < h; y++) { \
484 for (x = 0; x < SIZE; x++) \
485 dst[x] = FILTER_ ## TAPS ## TAP(src, filter, srcstride); \
490 #define VP8_EPEL_HV(SIZE, HTAPS, VTAPS) \
491 static void put_vp8_epel ## SIZE ## _h ## HTAPS ## v ## VTAPS ## _c(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int h, int mx, int my) \
493 const uint8_t *filter = subpel_filters[mx-1]; \
494 const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; \
496 uint8_t tmp_array[(2*SIZE+VTAPS-1)*SIZE]; \
497 uint8_t *tmp = tmp_array; \
498 src -= (2-(VTAPS==4))*srcstride; \
500 for (y = 0; y < h+VTAPS-1; y++) { \
501 for (x = 0; x < SIZE; x++) \
502 tmp[x] = FILTER_ ## HTAPS ## TAP(src, filter, 1); \
507 tmp = tmp_array + (2-(VTAPS==4))*SIZE; \
508 filter = subpel_filters[my-1]; \
510 for (y = 0; y < h; y++) { \
511 for (x = 0; x < SIZE; x++) \
512 dst[x] = FILTER_ ## VTAPS ## TAP(tmp, filter, SIZE); \
530 VP8_EPEL_HV(16, 4, 4)
533 VP8_EPEL_HV(16, 4, 6)
536 VP8_EPEL_HV(16, 6, 4)
539 VP8_EPEL_HV(16, 6, 6)
543 #define VP8_BILINEAR(SIZE) \
544 static void put_vp8_bilinear ## SIZE ## _h_c(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my) \
546 int a = 8-mx, b = mx; \
549 for (y = 0; y < h; y++) { \
550 for (x = 0; x < SIZE; x++) \
551 dst[x] = (a*src[x] + b*src[x+1] + 4) >> 3; \
556 static void put_vp8_bilinear ## SIZE ## _v_c(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my) \
558 int c = 8-my, d = my; \
561 for (y = 0; y < h; y++) { \
562 for (x = 0; x < SIZE; x++) \
563 dst[x] = (c*src[x] + d*src[x+sstride] + 4) >> 3; \
569 static void put_vp8_bilinear ## SIZE ## _hv_c(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my) \
571 int a = 8-mx, b = mx; \
572 int c = 8-my, d = my; \
574 uint8_t tmp_array[(2*SIZE+1)*SIZE]; \
575 uint8_t *tmp = tmp_array; \
577 for (y = 0; y < h+1; y++) { \
578 for (x = 0; x < SIZE; x++) \
579 tmp[x] = (a*src[x] + b*src[x+1] + 4) >> 3; \
586 for (y = 0; y < h; y++) { \
587 for (x = 0; x < SIZE; x++) \
588 dst[x] = (c*tmp[x] + d*tmp[x+SIZE] + 4) >> 3; \
598 #define VP8_MC_FUNC(IDX, SIZE) \
599 dsp->put_vp8_epel_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \
600 dsp->put_vp8_epel_pixels_tab[IDX][0][1] = put_vp8_epel ## SIZE ## _h4_c; \
601 dsp->put_vp8_epel_pixels_tab[IDX][0][2] = put_vp8_epel ## SIZE ## _h6_c; \
602 dsp->put_vp8_epel_pixels_tab[IDX][1][0] = put_vp8_epel ## SIZE ## _v4_c; \
603 dsp->put_vp8_epel_pixels_tab[IDX][1][1] = put_vp8_epel ## SIZE ## _h4v4_c; \
604 dsp->put_vp8_epel_pixels_tab[IDX][1][2] = put_vp8_epel ## SIZE ## _h6v4_c; \
605 dsp->put_vp8_epel_pixels_tab[IDX][2][0] = put_vp8_epel ## SIZE ## _v6_c; \
606 dsp->put_vp8_epel_pixels_tab[IDX][2][1] = put_vp8_epel ## SIZE ## _h4v6_c; \
607 dsp->put_vp8_epel_pixels_tab[IDX][2][2] = put_vp8_epel ## SIZE ## _h6v6_c
609 #define VP8_BILINEAR_MC_FUNC(IDX, SIZE) \
610 dsp->put_vp8_bilinear_pixels_tab[IDX][0][0] = put_vp8_pixels ## SIZE ## _c; \
611 dsp->put_vp8_bilinear_pixels_tab[IDX][0][1] = put_vp8_bilinear ## SIZE ## _h_c; \
612 dsp->put_vp8_bilinear_pixels_tab[IDX][0][2] = put_vp8_bilinear ## SIZE ## _h_c; \
613 dsp->put_vp8_bilinear_pixels_tab[IDX][1][0] = put_vp8_bilinear ## SIZE ## _v_c; \
614 dsp->put_vp8_bilinear_pixels_tab[IDX][1][1] = put_vp8_bilinear ## SIZE ## _hv_c; \
615 dsp->put_vp8_bilinear_pixels_tab[IDX][1][2] = put_vp8_bilinear ## SIZE ## _hv_c; \
616 dsp->put_vp8_bilinear_pixels_tab[IDX][2][0] = put_vp8_bilinear ## SIZE ## _v_c; \
617 dsp->put_vp8_bilinear_pixels_tab[IDX][2][1] = put_vp8_bilinear ## SIZE ## _hv_c; \
618 dsp->put_vp8_bilinear_pixels_tab[IDX][2][2] = put_vp8_bilinear ## SIZE ## _hv_c
620 av_cold void ff_vp8dsp_init(VP8DSPContext *dsp, int vp7)
622 #if CONFIG_VP7_DECODER && CONFIG_VP8_DECODER
623 #define VPX(f) vp7 ? vp7_ ## f : vp8_ ## f
624 #elif CONFIG_VP7_DECODER
625 #define VPX(f) vp7_ ## f
626 #else // CONFIG_VP8_DECODER
627 #define VPX(f) vp8_ ## f
630 dsp->vp8_luma_dc_wht = VPX(luma_dc_wht_c);
631 dsp->vp8_luma_dc_wht_dc = VPX(luma_dc_wht_dc_c);
632 dsp->vp8_idct_add = VPX(idct_add_c);
633 dsp->vp8_idct_dc_add = VPX(idct_dc_add_c);
634 dsp->vp8_idct_dc_add4y = VPX(idct_dc_add4y_c);
635 dsp->vp8_idct_dc_add4uv = VPX(idct_dc_add4uv_c);
637 dsp->vp8_v_loop_filter16y = VPX(v_loop_filter16_c);
638 dsp->vp8_h_loop_filter16y = VPX(h_loop_filter16_c);
639 dsp->vp8_v_loop_filter8uv = VPX(v_loop_filter8uv_c);
640 dsp->vp8_h_loop_filter8uv = VPX(h_loop_filter8uv_c);
642 dsp->vp8_v_loop_filter16y_inner = VPX(v_loop_filter16_inner_c);
643 dsp->vp8_h_loop_filter16y_inner = VPX(h_loop_filter16_inner_c);
644 dsp->vp8_v_loop_filter8uv_inner = VPX(v_loop_filter8uv_inner_c);
645 dsp->vp8_h_loop_filter8uv_inner = VPX(h_loop_filter8uv_inner_c);
647 dsp->vp8_v_loop_filter_simple = VPX(v_loop_filter_simple_c);
648 dsp->vp8_h_loop_filter_simple = VPX(h_loop_filter_simple_c);
654 VP8_BILINEAR_MC_FUNC(0, 16);
655 VP8_BILINEAR_MC_FUNC(1, 8);
656 VP8_BILINEAR_MC_FUNC(2, 4);
659 ff_vp8dsp_init_arm(dsp, vp7);
661 ff_vp8dsp_init_ppc(dsp);
663 ff_vp8dsp_init_x86(dsp, vp7);