2 * This file is part of FFmpeg.
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "libavcodec/vp8dsp.h"
22 void ff_vp8_luma_dc_wht_dc_armv6(DCTELEM block[4][4][16], DCTELEM dc[16]);
24 #define idct_funcs(opt) \
25 void ff_vp8_luma_dc_wht_ ## opt(DCTELEM block[4][4][16], DCTELEM dc[16]); \
26 void ff_vp8_idct_add_ ## opt(uint8_t *dst, DCTELEM block[16], int stride); \
27 void ff_vp8_idct_dc_add_ ## opt(uint8_t *dst, DCTELEM block[16], int stride); \
28 void ff_vp8_idct_dc_add4y_ ## opt(uint8_t *dst, DCTELEM block[4][16], int stride); \
29 void ff_vp8_idct_dc_add4uv_ ## opt(uint8_t *dst, DCTELEM block[4][16], int stride)
34 void ff_vp8_v_loop_filter16_neon(uint8_t *dst, int stride,
35 int flim_E, int flim_I, int hev_thresh);
36 void ff_vp8_h_loop_filter16_neon(uint8_t *dst, int stride,
37 int flim_E, int flim_I, int hev_thresh);
38 void ff_vp8_v_loop_filter8uv_neon(uint8_t *dstU, uint8_t *dstV, int stride,
39 int flim_E, int flim_I, int hev_thresh);
40 void ff_vp8_h_loop_filter8uv_neon(uint8_t *dstU, uint8_t *dstV, int stride,
41 int flim_E, int flim_I, int hev_thresh);
43 void ff_vp8_v_loop_filter16_inner_neon(uint8_t *dst, int stride,
44 int flim_E, int flim_I, int hev_thresh);
45 void ff_vp8_h_loop_filter16_inner_neon(uint8_t *dst, int stride,
46 int flim_E, int flim_I, int hev_thresh);
47 void ff_vp8_v_loop_filter8uv_inner_neon(uint8_t *dstU, uint8_t *dstV,
48 int stride, int flim_E, int flim_I,
50 void ff_vp8_h_loop_filter8uv_inner_neon(uint8_t *dstU, uint8_t *dstV,
51 int stride, int flim_E, int flim_I,
54 void ff_vp8_v_loop_filter_inner_armv6(uint8_t *dst, int stride,
55 int flim_E, int flim_I,
56 int hev_thresh, int count);
57 void ff_vp8_h_loop_filter_inner_armv6(uint8_t *dst, int stride,
58 int flim_E, int flim_I,
59 int hev_thresh, int count);
60 void ff_vp8_v_loop_filter_armv6(uint8_t *dst, int stride,
61 int flim_E, int flim_I,
62 int hev_thresh, int count);
63 void ff_vp8_h_loop_filter_armv6(uint8_t *dst, int stride,
64 int flim_E, int flim_I,
65 int hev_thresh, int count);
67 static void ff_vp8_v_loop_filter16_armv6(uint8_t *dst, int stride,
68 int flim_E, int flim_I, int hev_thresh)
70 ff_vp8_v_loop_filter_armv6(dst, stride, flim_E, flim_I, hev_thresh, 4);
73 static void ff_vp8_h_loop_filter16_armv6(uint8_t *dst, int stride,
74 int flim_E, int flim_I, int hev_thresh)
76 ff_vp8_h_loop_filter_armv6(dst, stride, flim_E, flim_I, hev_thresh, 4);
79 static void ff_vp8_v_loop_filter8uv_armv6(uint8_t *dstU, uint8_t *dstV, int stride,
80 int flim_E, int flim_I, int hev_thresh)
82 ff_vp8_v_loop_filter_armv6(dstU, stride, flim_E, flim_I, hev_thresh, 2);
83 ff_vp8_v_loop_filter_armv6(dstV, stride, flim_E, flim_I, hev_thresh, 2);
86 static void ff_vp8_h_loop_filter8uv_armv6(uint8_t *dstU, uint8_t *dstV, int stride,
87 int flim_E, int flim_I, int hev_thresh)
89 ff_vp8_h_loop_filter_armv6(dstU, stride, flim_E, flim_I, hev_thresh, 2);
90 ff_vp8_h_loop_filter_armv6(dstV, stride, flim_E, flim_I, hev_thresh, 2);
93 static void ff_vp8_v_loop_filter16_inner_armv6(uint8_t *dst, int stride,
94 int flim_E, int flim_I, int hev_thresh)
96 ff_vp8_v_loop_filter_inner_armv6(dst, stride, flim_E, flim_I, hev_thresh, 4);
99 static void ff_vp8_h_loop_filter16_inner_armv6(uint8_t *dst, int stride,
100 int flim_E, int flim_I, int hev_thresh)
102 ff_vp8_h_loop_filter_inner_armv6(dst, stride, flim_E, flim_I, hev_thresh, 4);
105 static void ff_vp8_v_loop_filter8uv_inner_armv6(uint8_t *dstU, uint8_t *dstV,
106 int stride, int flim_E, int flim_I,
109 ff_vp8_v_loop_filter_inner_armv6(dstU, stride, flim_E, flim_I, hev_thresh, 2);
110 ff_vp8_v_loop_filter_inner_armv6(dstV, stride, flim_E, flim_I, hev_thresh, 2);
113 static void ff_vp8_h_loop_filter8uv_inner_armv6(uint8_t *dstU, uint8_t *dstV,
114 int stride, int flim_E, int flim_I,
117 ff_vp8_h_loop_filter_inner_armv6(dstU, stride, flim_E, flim_I, hev_thresh, 2);
118 ff_vp8_h_loop_filter_inner_armv6(dstV, stride, flim_E, flim_I, hev_thresh, 2);
121 #define simple_lf_funcs(opt) \
122 void ff_vp8_v_loop_filter16_simple_ ## opt(uint8_t *dst, int stride, int flim); \
123 void ff_vp8_h_loop_filter16_simple_ ## opt(uint8_t *dst, int stride, int flim)
125 simple_lf_funcs(neon);
126 simple_lf_funcs(armv6);
128 #define VP8_MC_OPT(n, opt) \
129 void ff_put_vp8_##n##_##opt(uint8_t *dst, int dststride, \
130 uint8_t *src, int srcstride, \
136 #define VP8_EPEL(w) \
137 VP8_MC(epel ## w ## _h4); \
138 VP8_MC(epel ## w ## _h6); \
139 VP8_MC(epel ## w ## _h4v4); \
140 VP8_MC(epel ## w ## _h6v4); \
141 VP8_MC(epel ## w ## _v4); \
142 VP8_MC(epel ## w ## _v6); \
143 VP8_MC(epel ## w ## _h4v6); \
144 VP8_MC(epel ## w ## _h6v6)
148 VP8_MC_OPT(pixels16, armv6);
151 VP8_MC_OPT(pixels8, armv6);
153 VP8_MC_OPT(pixels4, armv6);
165 #define VP8_V6_MC(n) \
166 void ff_put_vp8_##n##_armv6(uint8_t *dst, int dststride, uint8_t *src, \
167 int srcstride, int w, int h, int mxy)
176 #define VP8_EPEL_HV(SIZE, TAPNUMX, TAPNUMY, NAME, HNAME, VNAME, MAXHEIGHT) \
177 static void ff_put_vp8_##NAME##SIZE##_##HNAME##VNAME##_armv6( \
178 uint8_t *dst, int dststride, uint8_t *src, \
179 int srcstride, int h, int mx, int my) \
181 DECLARE_ALIGNED(4, uint8_t, tmp)[SIZE * (MAXHEIGHT + TAPNUMY - 1)]; \
182 uint8_t *tmpptr = tmp + SIZE * (TAPNUMY / 2 - 1); \
183 src -= srcstride * (TAPNUMY / 2 - 1); \
184 ff_put_vp8_ ## NAME ## _ ## HNAME ## _armv6(tmp, SIZE, src, srcstride, \
185 SIZE, h + TAPNUMY - 1, mx); \
186 ff_put_vp8_ ## NAME ## _ ## VNAME ## _armv6(dst, dststride, tmpptr, SIZE, \
190 VP8_EPEL_HV(16, 6, 6, epel, h6, v6, 16);
191 VP8_EPEL_HV(16, 2, 2, bilin, h, v, 16);
192 VP8_EPEL_HV(8, 6, 6, epel, h6, v6, 16);
193 VP8_EPEL_HV(8, 4, 6, epel, h4, v6, 16);
194 VP8_EPEL_HV(8, 6, 4, epel, h6, v4, 16);
195 VP8_EPEL_HV(8, 4, 4, epel, h4, v4, 16);
196 VP8_EPEL_HV(8, 2, 2, bilin, h, v, 16);
197 VP8_EPEL_HV(4, 6, 6, epel, h6, v6, 8);
198 VP8_EPEL_HV(4, 4, 6, epel, h4, v6, 8);
199 VP8_EPEL_HV(4, 6, 4, epel, h6, v4, 8);
200 VP8_EPEL_HV(4, 4, 4, epel, h4, v4, 8);
201 VP8_EPEL_HV(4, 2, 2, bilin, h, v, 8);
203 extern void put_vp8_epel4_v6_c(uint8_t *dst, int d, uint8_t *src, int s, int h, int mx, int my);
205 #define VP8_EPEL_H_OR_V(SIZE, NAME, HV) \
206 static void ff_put_vp8_##NAME##SIZE##_##HV##_armv6( \
207 uint8_t *dst, int dststride, uint8_t *src, \
208 int srcstride, int h, int mx, int my) \
210 ff_put_vp8_## NAME ## _ ## HV ## _armv6(dst, dststride, src, srcstride, \
214 VP8_EPEL_H_OR_V(4, epel, h6);
215 VP8_EPEL_H_OR_V(4, epel, h4);
216 VP8_EPEL_H_OR_V(4, epel, v6);
217 VP8_EPEL_H_OR_V(4, epel, v4);
218 VP8_EPEL_H_OR_V(4, bilin, v);
219 VP8_EPEL_H_OR_V(4, bilin, h);
220 VP8_EPEL_H_OR_V(8, epel, h6);
221 VP8_EPEL_H_OR_V(8, epel, h4);
222 VP8_EPEL_H_OR_V(8, epel, v6);
223 VP8_EPEL_H_OR_V(8, epel, v4);
224 VP8_EPEL_H_OR_V(8, bilin, v);
225 VP8_EPEL_H_OR_V(8, bilin, h);
226 VP8_EPEL_H_OR_V(16, epel, h6);
227 VP8_EPEL_H_OR_V(16, epel, v6);
228 VP8_EPEL_H_OR_V(16, bilin, v);
229 VP8_EPEL_H_OR_V(16, bilin, h);
231 av_cold void ff_vp8dsp_init_arm(VP8DSPContext *dsp)
233 #define set_func_ptrs(opt) \
234 dsp->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_##opt; \
235 dsp->vp8_luma_dc_wht_dc = ff_vp8_luma_dc_wht_dc_armv6; \
237 dsp->vp8_idct_add = ff_vp8_idct_add_##opt; \
238 dsp->vp8_idct_dc_add = ff_vp8_idct_dc_add_##opt; \
239 dsp->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_##opt; \
240 dsp->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_##opt; \
242 dsp->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16_##opt; \
243 dsp->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16_##opt; \
244 dsp->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_##opt; \
245 dsp->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_##opt; \
247 dsp->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16_inner_##opt; \
248 dsp->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16_inner_##opt; \
249 dsp->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_##opt; \
250 dsp->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_##opt; \
252 dsp->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter16_simple_##opt; \
253 dsp->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter16_simple_##opt; \
255 dsp->put_vp8_epel_pixels_tab[0][0][0] = ff_put_vp8_pixels16_##opt; \
256 dsp->put_vp8_epel_pixels_tab[0][0][2] = ff_put_vp8_epel16_h6_##opt; \
257 dsp->put_vp8_epel_pixels_tab[0][2][0] = ff_put_vp8_epel16_v6_##opt; \
258 dsp->put_vp8_epel_pixels_tab[0][2][2] = ff_put_vp8_epel16_h6v6_##opt; \
260 dsp->put_vp8_epel_pixels_tab[1][0][0] = ff_put_vp8_pixels8_##opt; \
261 dsp->put_vp8_epel_pixels_tab[1][0][1] = ff_put_vp8_epel8_h4_##opt; \
262 dsp->put_vp8_epel_pixels_tab[1][0][2] = ff_put_vp8_epel8_h6_##opt; \
263 dsp->put_vp8_epel_pixels_tab[1][1][0] = ff_put_vp8_epel8_v4_##opt; \
264 dsp->put_vp8_epel_pixels_tab[1][1][1] = ff_put_vp8_epel8_h4v4_##opt; \
265 dsp->put_vp8_epel_pixels_tab[1][1][2] = ff_put_vp8_epel8_h6v4_##opt; \
266 dsp->put_vp8_epel_pixels_tab[1][2][0] = ff_put_vp8_epel8_v6_##opt; \
267 dsp->put_vp8_epel_pixels_tab[1][2][1] = ff_put_vp8_epel8_h4v6_##opt; \
268 dsp->put_vp8_epel_pixels_tab[1][2][2] = ff_put_vp8_epel8_h6v6_##opt; \
270 dsp->put_vp8_epel_pixels_tab[2][0][0] = ff_put_vp8_pixels4_armv6; \
271 dsp->put_vp8_epel_pixels_tab[2][0][1] = ff_put_vp8_epel4_h4_##opt; \
272 dsp->put_vp8_epel_pixels_tab[2][0][2] = ff_put_vp8_epel4_h6_##opt; \
273 dsp->put_vp8_epel_pixels_tab[2][1][0] = ff_put_vp8_epel4_v4_##opt; \
274 dsp->put_vp8_epel_pixels_tab[2][1][1] = ff_put_vp8_epel4_h4v4_##opt; \
275 dsp->put_vp8_epel_pixels_tab[2][1][2] = ff_put_vp8_epel4_h6v4_##opt; \
276 dsp->put_vp8_epel_pixels_tab[2][2][0] = ff_put_vp8_epel4_v6_##opt; \
277 dsp->put_vp8_epel_pixels_tab[2][2][1] = ff_put_vp8_epel4_h4v6_##opt; \
278 dsp->put_vp8_epel_pixels_tab[2][2][2] = ff_put_vp8_epel4_h6v6_##opt; \
280 dsp->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_##opt; \
281 dsp->put_vp8_bilinear_pixels_tab[0][0][2] = ff_put_vp8_bilin16_h_##opt; \
282 dsp->put_vp8_bilinear_pixels_tab[0][2][0] = ff_put_vp8_bilin16_v_##opt; \
283 dsp->put_vp8_bilinear_pixels_tab[0][2][2] = ff_put_vp8_bilin16_hv_##opt; \
285 dsp->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_##opt; \
286 dsp->put_vp8_bilinear_pixels_tab[1][0][1] = ff_put_vp8_bilin8_h_##opt; \
287 dsp->put_vp8_bilinear_pixels_tab[1][0][2] = ff_put_vp8_bilin8_h_##opt; \
288 dsp->put_vp8_bilinear_pixels_tab[1][1][0] = ff_put_vp8_bilin8_v_##opt; \
289 dsp->put_vp8_bilinear_pixels_tab[1][1][1] = ff_put_vp8_bilin8_hv_##opt; \
290 dsp->put_vp8_bilinear_pixels_tab[1][1][2] = ff_put_vp8_bilin8_hv_##opt; \
291 dsp->put_vp8_bilinear_pixels_tab[1][2][0] = ff_put_vp8_bilin8_v_##opt; \
292 dsp->put_vp8_bilinear_pixels_tab[1][2][1] = ff_put_vp8_bilin8_hv_##opt; \
293 dsp->put_vp8_bilinear_pixels_tab[1][2][2] = ff_put_vp8_bilin8_hv_##opt; \
295 dsp->put_vp8_bilinear_pixels_tab[2][0][0] = ff_put_vp8_pixels4_armv6; \
296 dsp->put_vp8_bilinear_pixels_tab[2][0][1] = ff_put_vp8_bilin4_h_##opt; \
297 dsp->put_vp8_bilinear_pixels_tab[2][0][2] = ff_put_vp8_bilin4_h_##opt; \
298 dsp->put_vp8_bilinear_pixels_tab[2][1][0] = ff_put_vp8_bilin4_v_##opt; \
299 dsp->put_vp8_bilinear_pixels_tab[2][1][1] = ff_put_vp8_bilin4_hv_##opt; \
300 dsp->put_vp8_bilinear_pixels_tab[2][1][2] = ff_put_vp8_bilin4_hv_##opt; \
301 dsp->put_vp8_bilinear_pixels_tab[2][2][0] = ff_put_vp8_bilin4_v_##opt; \
302 dsp->put_vp8_bilinear_pixels_tab[2][2][1] = ff_put_vp8_bilin4_hv_##opt; \
303 dsp->put_vp8_bilinear_pixels_tab[2][2][2] = ff_put_vp8_bilin4_hv_##opt
306 } else if (HAVE_ARMV6) {
307 set_func_ptrs(armv6);