2 * VP9 SIMD optimizations
4 * Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavutil/attributes.h"
24 #include "libavutil/cpu.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/x86/asm.h"
27 #include "libavutil/x86/cpu.h"
28 #include "libavcodec/vp9dsp.h"
32 #define fpel_func(avg, sz, opt) \
33 void ff_vp9_##avg##sz##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \
34 const uint8_t *src, ptrdiff_t src_stride, \
35 int h, int mx, int my)
36 fpel_func(put, 4, mmx);
37 fpel_func(put, 8, mmx);
38 fpel_func(put, 16, sse);
39 fpel_func(put, 32, sse);
40 fpel_func(put, 64, sse);
41 fpel_func(avg, 4, mmxext);
42 fpel_func(avg, 8, mmxext);
43 fpel_func(avg, 16, sse2);
44 fpel_func(avg, 32, sse2);
45 fpel_func(avg, 64, sse2);
46 fpel_func(put, 32, avx);
47 fpel_func(put, 64, avx);
48 fpel_func(avg, 32, avx2);
49 fpel_func(avg, 64, avx2);
52 #define mc_func(avg, sz, dir, opt, type, f_sz) \
53 void ff_vp9_##avg##_8tap_1d_##dir##_##sz##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \
54 const uint8_t *src, ptrdiff_t src_stride, \
55 int h, const type (*filter)[f_sz])
56 #define mc_funcs(sz, opt, type, fsz) \
57 mc_func(put, sz, h, opt, type, fsz); \
58 mc_func(avg, sz, h, opt, type, fsz); \
59 mc_func(put, sz, v, opt, type, fsz); \
60 mc_func(avg, sz, v, opt, type, fsz)
62 mc_funcs(4, mmxext, int16_t, 8);
63 mc_funcs(8, sse2, int16_t, 8);
64 mc_funcs(4, ssse3, int8_t, 32);
65 mc_funcs(8, ssse3, int8_t, 32);
67 mc_funcs(16, ssse3, int8_t, 32);
68 mc_funcs(32, avx2, int8_t, 32);
74 #define mc_rep_func(avg, sz, hsz, dir, opt, type, f_sz) \
75 static av_always_inline void \
76 ff_vp9_##avg##_8tap_1d_##dir##_##sz##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \
77 const uint8_t *src, ptrdiff_t src_stride, \
78 int h, const type (*filter)[f_sz]) \
80 ff_vp9_##avg##_8tap_1d_##dir##_##hsz##_##opt(dst, dst_stride, src, \
81 src_stride, h, filter); \
82 ff_vp9_##avg##_8tap_1d_##dir##_##hsz##_##opt(dst + hsz, dst_stride, src + hsz, \
83 src_stride, h, filter); \
86 #define mc_rep_funcs(sz, hsz, opt, type, fsz) \
87 mc_rep_func(put, sz, hsz, h, opt, type, fsz); \
88 mc_rep_func(avg, sz, hsz, h, opt, type, fsz); \
89 mc_rep_func(put, sz, hsz, v, opt, type, fsz); \
90 mc_rep_func(avg, sz, hsz, v, opt, type, fsz)
92 mc_rep_funcs(16, 8, sse2, int16_t, 8);
94 mc_rep_funcs(16, 8, ssse3, int8_t, 32);
96 mc_rep_funcs(32, 16, sse2, int16_t, 8);
97 mc_rep_funcs(32, 16, ssse3, int8_t, 32);
98 mc_rep_funcs(64, 32, sse2, int16_t, 8);
99 mc_rep_funcs(64, 32, ssse3, int8_t, 32);
100 #if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
101 mc_rep_funcs(64, 32, avx2, int8_t, 32);
107 extern const int8_t ff_filters_ssse3[3][15][4][32];
108 extern const int16_t ff_filters_sse2[3][15][8][8];
110 #define filter_8tap_2d_fn(op, sz, f, f_opt, fname, align, opt) \
111 static void op##_8tap_##fname##_##sz##hv_##opt(uint8_t *dst, ptrdiff_t dst_stride, \
112 const uint8_t *src, ptrdiff_t src_stride, \
113 int h, int mx, int my) \
115 LOCAL_ALIGNED_##align(uint8_t, temp, [71 * 64]); \
116 ff_vp9_put_8tap_1d_h_##sz##_##opt(temp, 64, src - 3 * src_stride, src_stride, \
117 h + 7, ff_filters_##f_opt[f][mx - 1]); \
118 ff_vp9_##op##_8tap_1d_v_##sz##_##opt(dst, dst_stride, temp + 3 * 64, 64, \
119 h, ff_filters_##f_opt[f][my - 1]); \
122 #define filters_8tap_2d_fn(op, sz, align, opt, f_opt) \
123 filter_8tap_2d_fn(op, sz, FILTER_8TAP_REGULAR, f_opt, regular, align, opt) \
124 filter_8tap_2d_fn(op, sz, FILTER_8TAP_SHARP, f_opt, sharp, align, opt) \
125 filter_8tap_2d_fn(op, sz, FILTER_8TAP_SMOOTH, f_opt, smooth, align, opt)
127 #define filters_8tap_2d_fn2(op, align, opt4, opt8, f_opt) \
128 filters_8tap_2d_fn(op, 64, align, opt8, f_opt) \
129 filters_8tap_2d_fn(op, 32, align, opt8, f_opt) \
130 filters_8tap_2d_fn(op, 16, align, opt8, f_opt) \
131 filters_8tap_2d_fn(op, 8, align, opt8, f_opt) \
132 filters_8tap_2d_fn(op, 4, align, opt4, f_opt)
134 filters_8tap_2d_fn2(put, 16, mmxext, sse2, sse2)
135 filters_8tap_2d_fn2(avg, 16, mmxext, sse2, sse2)
136 filters_8tap_2d_fn2(put, 16, ssse3, ssse3, ssse3)
137 filters_8tap_2d_fn2(avg, 16, ssse3, ssse3, ssse3)
138 #if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
139 filters_8tap_2d_fn(put, 64, 32, avx2, ssse3)
140 filters_8tap_2d_fn(put, 32, 32, avx2, ssse3)
141 filters_8tap_2d_fn(avg, 64, 32, avx2, ssse3)
142 filters_8tap_2d_fn(avg, 32, 32, avx2, ssse3)
145 #undef filters_8tap_2d_fn2
146 #undef filters_8tap_2d_fn
147 #undef filter_8tap_2d_fn
149 #define filter_8tap_1d_fn(op, sz, f, f_opt, fname, dir, dvar, opt) \
150 static void op##_8tap_##fname##_##sz##dir##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \
151 const uint8_t *src, ptrdiff_t src_stride, \
152 int h, int mx, int my) \
154 ff_vp9_##op##_8tap_1d_##dir##_##sz##_##opt(dst, dst_stride, src, src_stride, \
155 h, ff_filters_##f_opt[f][dvar - 1]); \
158 #define filters_8tap_1d_fn(op, sz, dir, dvar, opt, f_opt) \
159 filter_8tap_1d_fn(op, sz, FILTER_8TAP_REGULAR, f_opt, regular, dir, dvar, opt) \
160 filter_8tap_1d_fn(op, sz, FILTER_8TAP_SHARP, f_opt, sharp, dir, dvar, opt) \
161 filter_8tap_1d_fn(op, sz, FILTER_8TAP_SMOOTH, f_opt, smooth, dir, dvar, opt)
163 #define filters_8tap_1d_fn2(op, sz, opt, f_opt) \
164 filters_8tap_1d_fn(op, sz, h, mx, opt, f_opt) \
165 filters_8tap_1d_fn(op, sz, v, my, opt, f_opt)
167 #define filters_8tap_1d_fn3(op, opt4, opt8, f_opt) \
168 filters_8tap_1d_fn2(op, 64, opt8, f_opt) \
169 filters_8tap_1d_fn2(op, 32, opt8, f_opt) \
170 filters_8tap_1d_fn2(op, 16, opt8, f_opt) \
171 filters_8tap_1d_fn2(op, 8, opt8, f_opt) \
172 filters_8tap_1d_fn2(op, 4, opt4, f_opt)
174 filters_8tap_1d_fn3(put, mmxext, sse2, sse2)
175 filters_8tap_1d_fn3(avg, mmxext, sse2, sse2)
176 filters_8tap_1d_fn3(put, ssse3, ssse3, ssse3)
177 filters_8tap_1d_fn3(avg, ssse3, ssse3, ssse3)
178 #if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
179 filters_8tap_1d_fn2(put, 64, avx2, ssse3)
180 filters_8tap_1d_fn2(put, 32, avx2, ssse3)
181 filters_8tap_1d_fn2(avg, 64, avx2, ssse3)
182 filters_8tap_1d_fn2(avg, 32, avx2, ssse3)
185 #undef filters_8tap_1d_fn
186 #undef filters_8tap_1d_fn2
187 #undef filters_8tap_1d_fn3
188 #undef filter_8tap_1d_fn
190 #define itxfm_func(typea, typeb, size, opt) \
191 void ff_vp9_##typea##_##typeb##_##size##x##size##_add_##opt(uint8_t *dst, ptrdiff_t stride, \
192 int16_t *block, int eob)
193 #define itxfm_funcs(size, opt) \
194 itxfm_func(idct, idct, size, opt); \
195 itxfm_func(iadst, idct, size, opt); \
196 itxfm_func(idct, iadst, size, opt); \
197 itxfm_func(iadst, iadst, size, opt)
199 itxfm_func(idct, idct, 4, mmxext);
200 itxfm_func(idct, iadst, 4, sse2);
201 itxfm_func(iadst, idct, 4, sse2);
202 itxfm_func(iadst, iadst, 4, sse2);
203 itxfm_funcs(4, ssse3);
204 itxfm_funcs(8, sse2);
205 itxfm_funcs(8, ssse3);
207 itxfm_funcs(16, sse2);
208 itxfm_funcs(16, ssse3);
209 itxfm_funcs(16, avx);
210 itxfm_func(idct, idct, 32, sse2);
211 itxfm_func(idct, idct, 32, ssse3);
212 itxfm_func(idct, idct, 32, avx);
213 itxfm_func(iwht, iwht, 4, mmx);
218 #define lpf_funcs(size1, size2, opt) \
219 void ff_vp9_loop_filter_v_##size1##_##size2##_##opt(uint8_t *dst, ptrdiff_t stride, \
220 int E, int I, int H); \
221 void ff_vp9_loop_filter_h_##size1##_##size2##_##opt(uint8_t *dst, ptrdiff_t stride, \
224 lpf_funcs(16, 16, sse2);
225 lpf_funcs(16, 16, ssse3);
226 lpf_funcs(16, 16, avx);
227 lpf_funcs(44, 16, sse2);
228 lpf_funcs(44, 16, ssse3);
229 lpf_funcs(44, 16, avx);
230 lpf_funcs(84, 16, sse2);
231 lpf_funcs(84, 16, ssse3);
232 lpf_funcs(84, 16, avx);
233 lpf_funcs(48, 16, sse2);
234 lpf_funcs(48, 16, ssse3);
235 lpf_funcs(48, 16, avx);
236 lpf_funcs(88, 16, sse2);
237 lpf_funcs(88, 16, ssse3);
238 lpf_funcs(88, 16, avx);
242 #define ipred_func(size, type, opt) \
243 void ff_vp9_ipred_##type##_##size##x##size##_##opt(uint8_t *dst, ptrdiff_t stride, \
244 const uint8_t *l, const uint8_t *a)
246 #define ipred_funcs(type, opt) \
247 ipred_func(4, type, opt); \
248 ipred_func(8, type, opt); \
249 ipred_func(16, type, opt); \
250 ipred_func(32, type, opt)
252 ipred_funcs(dc, ssse3);
253 ipred_funcs(dc_left, ssse3);
254 ipred_funcs(dc_top, ssse3);
258 ipred_func(8, v, mmx);
259 ipred_func(16, v, sse2);
260 ipred_func(32, v, sse2);
262 #define ipred_func_set(size, type, opt1, opt2) \
263 ipred_func(size, type, opt1); \
264 ipred_func(size, type, opt2)
266 #define ipred_funcs(type, opt1, opt2) \
267 ipred_func(4, type, opt1); \
268 ipred_func_set(8, type, opt1, opt2); \
269 ipred_func_set(16, type, opt1, opt2); \
270 ipred_func_set(32, type, opt1, opt2)
272 ipred_funcs(h, ssse3, avx);
273 ipred_funcs(tm, ssse3, avx);
274 ipred_funcs(dl, ssse3, avx);
275 ipred_funcs(dr, ssse3, avx);
276 ipred_funcs(hu, ssse3, avx);
277 ipred_funcs(hd, ssse3, avx);
278 ipred_funcs(vl, ssse3, avx);
279 ipred_funcs(vr, ssse3, avx);
281 ipred_func(32, dc, avx2);
282 ipred_func(32, dc_left, avx2);
283 ipred_func(32, dc_top, avx2);
284 ipred_func(32, v, avx2);
285 ipred_func(32, h, avx2);
286 ipred_func(32, tm, avx2);
289 #undef ipred_func_set
292 #endif /* HAVE_YASM */
294 av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp)
297 int cpu_flags = av_get_cpu_flags();
299 #define init_fpel(idx1, idx2, sz, type, opt) \
300 dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = \
301 dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = \
302 dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] = \
303 dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = ff_vp9_##type##sz##_##opt
305 #define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type, opt) \
306 dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][idxh][idxv] = type##_8tap_smooth_##sz##dir##_##opt; \
307 dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][idxh][idxv] = type##_8tap_regular_##sz##dir##_##opt; \
308 dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][idxh][idxv] = type##_8tap_sharp_##sz##dir##_##opt
310 #define init_subpel2(idx1, idx2, sz, type, opt) \
311 init_subpel1(idx1, idx2, 1, 1, sz, hv, type, opt); \
312 init_subpel1(idx1, idx2, 0, 1, sz, v, type, opt); \
313 init_subpel1(idx1, idx2, 1, 0, sz, h, type, opt)
315 #define init_subpel3_32_64(idx, type, opt) \
316 init_subpel2(0, idx, 64, type, opt); \
317 init_subpel2(1, idx, 32, type, opt)
319 #define init_subpel3_8to64(idx, type, opt) \
320 init_subpel3_32_64(idx, type, opt); \
321 init_subpel2(2, idx, 16, type, opt); \
322 init_subpel2(3, idx, 8, type, opt)
324 #define init_subpel3(idx, type, opt) \
325 init_subpel3_8to64(idx, type, opt); \
326 init_subpel2(4, idx, 4, type, opt)
328 #define init_lpf(opt) do { \
330 dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_##opt; \
331 dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_##opt; \
332 dsp->loop_filter_mix2[0][0][0] = ff_vp9_loop_filter_h_44_16_##opt; \
333 dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_##opt; \
334 dsp->loop_filter_mix2[0][1][0] = ff_vp9_loop_filter_h_48_16_##opt; \
335 dsp->loop_filter_mix2[0][1][1] = ff_vp9_loop_filter_v_48_16_##opt; \
336 dsp->loop_filter_mix2[1][0][0] = ff_vp9_loop_filter_h_84_16_##opt; \
337 dsp->loop_filter_mix2[1][0][1] = ff_vp9_loop_filter_v_84_16_##opt; \
338 dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_##opt; \
339 dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_##opt; \
343 #define init_ipred(tx, sz, opt) do { \
344 dsp->intra_pred[tx][HOR_PRED] = ff_vp9_ipred_h_##sz##x##sz##_##opt; \
345 dsp->intra_pred[tx][DIAG_DOWN_LEFT_PRED] = ff_vp9_ipred_dl_##sz##x##sz##_##opt; \
346 dsp->intra_pred[tx][DIAG_DOWN_RIGHT_PRED] = ff_vp9_ipred_dr_##sz##x##sz##_##opt; \
347 dsp->intra_pred[tx][HOR_DOWN_PRED] = ff_vp9_ipred_hd_##sz##x##sz##_##opt; \
348 dsp->intra_pred[tx][VERT_LEFT_PRED] = ff_vp9_ipred_vl_##sz##x##sz##_##opt; \
349 dsp->intra_pred[tx][HOR_UP_PRED] = ff_vp9_ipred_hu_##sz##x##sz##_##opt; \
350 if (ARCH_X86_64 || tx != TX_32X32) { \
351 dsp->intra_pred[tx][VERT_RIGHT_PRED] = ff_vp9_ipred_vr_##sz##x##sz##_##opt; \
352 dsp->intra_pred[tx][TM_VP8_PRED] = ff_vp9_ipred_tm_##sz##x##sz##_##opt; \
355 #define init_dc_ipred(tx, sz, opt) do { \
356 init_ipred(tx, sz, opt); \
357 dsp->intra_pred[tx][DC_PRED] = ff_vp9_ipred_dc_##sz##x##sz##_##opt; \
358 dsp->intra_pred[tx][LEFT_DC_PRED] = ff_vp9_ipred_dc_left_##sz##x##sz##_##opt; \
359 dsp->intra_pred[tx][TOP_DC_PRED] = ff_vp9_ipred_dc_top_##sz##x##sz##_##opt; \
362 if (EXTERNAL_MMX(cpu_flags)) {
363 init_fpel(4, 0, 4, put, mmx);
364 init_fpel(3, 0, 8, put, mmx);
365 dsp->itxfm_add[4 /* lossless */][DCT_DCT] =
366 dsp->itxfm_add[4 /* lossless */][ADST_DCT] =
367 dsp->itxfm_add[4 /* lossless */][DCT_ADST] =
368 dsp->itxfm_add[4 /* lossless */][ADST_ADST] = ff_vp9_iwht_iwht_4x4_add_mmx;
369 dsp->intra_pred[TX_8X8][VERT_PRED] = ff_vp9_ipred_v_8x8_mmx;
372 if (EXTERNAL_MMXEXT(cpu_flags)) {
373 init_subpel2(4, 0, 4, put, mmxext);
374 init_subpel2(4, 1, 4, avg, mmxext);
375 init_fpel(4, 1, 4, avg, mmxext);
376 init_fpel(3, 1, 8, avg, mmxext);
377 dsp->itxfm_add[TX_4X4][DCT_DCT] = ff_vp9_idct_idct_4x4_add_mmxext;
380 if (EXTERNAL_SSE(cpu_flags)) {
381 init_fpel(2, 0, 16, put, sse);
382 init_fpel(1, 0, 32, put, sse);
383 init_fpel(0, 0, 64, put, sse);
386 if (EXTERNAL_SSE2(cpu_flags)) {
387 init_subpel3_8to64(0, put, sse2);
388 init_subpel3_8to64(1, avg, sse2);
389 init_fpel(2, 1, 16, avg, sse2);
390 init_fpel(1, 1, 32, avg, sse2);
391 init_fpel(0, 1, 64, avg, sse2);
393 dsp->itxfm_add[TX_4X4][ADST_DCT] = ff_vp9_idct_iadst_4x4_add_sse2;
394 dsp->itxfm_add[TX_4X4][DCT_ADST] = ff_vp9_iadst_idct_4x4_add_sse2;
395 dsp->itxfm_add[TX_4X4][ADST_ADST] = ff_vp9_iadst_iadst_4x4_add_sse2;
396 dsp->itxfm_add[TX_8X8][DCT_DCT] = ff_vp9_idct_idct_8x8_add_sse2;
397 dsp->itxfm_add[TX_8X8][ADST_DCT] = ff_vp9_idct_iadst_8x8_add_sse2;
398 dsp->itxfm_add[TX_8X8][DCT_ADST] = ff_vp9_iadst_idct_8x8_add_sse2;
399 dsp->itxfm_add[TX_8X8][ADST_ADST] = ff_vp9_iadst_iadst_8x8_add_sse2;
400 dsp->itxfm_add[TX_16X16][DCT_DCT] = ff_vp9_idct_idct_16x16_add_sse2;
401 dsp->itxfm_add[TX_16X16][ADST_DCT] = ff_vp9_idct_iadst_16x16_add_sse2;
402 dsp->itxfm_add[TX_16X16][DCT_ADST] = ff_vp9_iadst_idct_16x16_add_sse2;
403 dsp->itxfm_add[TX_16X16][ADST_ADST] = ff_vp9_iadst_iadst_16x16_add_sse2;
404 dsp->itxfm_add[TX_32X32][ADST_ADST] =
405 dsp->itxfm_add[TX_32X32][ADST_DCT] =
406 dsp->itxfm_add[TX_32X32][DCT_ADST] =
407 dsp->itxfm_add[TX_32X32][DCT_DCT] = ff_vp9_idct_idct_32x32_add_sse2;
408 dsp->intra_pred[TX_16X16][VERT_PRED] = ff_vp9_ipred_v_16x16_sse2;
409 dsp->intra_pred[TX_32X32][VERT_PRED] = ff_vp9_ipred_v_32x32_sse2;
412 if (EXTERNAL_SSSE3(cpu_flags)) {
413 init_subpel3(0, put, ssse3);
414 init_subpel3(1, avg, ssse3);
415 dsp->itxfm_add[TX_4X4][DCT_DCT] = ff_vp9_idct_idct_4x4_add_ssse3;
416 dsp->itxfm_add[TX_4X4][ADST_DCT] = ff_vp9_idct_iadst_4x4_add_ssse3;
417 dsp->itxfm_add[TX_4X4][DCT_ADST] = ff_vp9_iadst_idct_4x4_add_ssse3;
418 dsp->itxfm_add[TX_4X4][ADST_ADST] = ff_vp9_iadst_iadst_4x4_add_ssse3;
419 dsp->itxfm_add[TX_8X8][DCT_DCT] = ff_vp9_idct_idct_8x8_add_ssse3;
420 dsp->itxfm_add[TX_8X8][ADST_DCT] = ff_vp9_idct_iadst_8x8_add_ssse3;
421 dsp->itxfm_add[TX_8X8][DCT_ADST] = ff_vp9_iadst_idct_8x8_add_ssse3;
422 dsp->itxfm_add[TX_8X8][ADST_ADST] = ff_vp9_iadst_iadst_8x8_add_ssse3;
423 dsp->itxfm_add[TX_16X16][DCT_DCT] = ff_vp9_idct_idct_16x16_add_ssse3;
424 dsp->itxfm_add[TX_16X16][ADST_DCT] = ff_vp9_idct_iadst_16x16_add_ssse3;
425 dsp->itxfm_add[TX_16X16][DCT_ADST] = ff_vp9_iadst_idct_16x16_add_ssse3;
426 dsp->itxfm_add[TX_16X16][ADST_ADST] = ff_vp9_iadst_iadst_16x16_add_ssse3;
427 dsp->itxfm_add[TX_32X32][ADST_ADST] =
428 dsp->itxfm_add[TX_32X32][ADST_DCT] =
429 dsp->itxfm_add[TX_32X32][DCT_ADST] =
430 dsp->itxfm_add[TX_32X32][DCT_DCT] = ff_vp9_idct_idct_32x32_add_ssse3;
432 init_dc_ipred(TX_4X4, 4, ssse3);
433 init_dc_ipred(TX_8X8, 8, ssse3);
434 init_dc_ipred(TX_16X16, 16, ssse3);
435 init_dc_ipred(TX_32X32, 32, ssse3);
438 if (EXTERNAL_AVX(cpu_flags)) {
439 dsp->itxfm_add[TX_8X8][DCT_DCT] = ff_vp9_idct_idct_8x8_add_avx;
440 dsp->itxfm_add[TX_8X8][ADST_DCT] = ff_vp9_idct_iadst_8x8_add_avx;
441 dsp->itxfm_add[TX_8X8][DCT_ADST] = ff_vp9_iadst_idct_8x8_add_avx;
442 dsp->itxfm_add[TX_8X8][ADST_ADST] = ff_vp9_iadst_iadst_8x8_add_avx;
443 dsp->itxfm_add[TX_16X16][DCT_DCT] = ff_vp9_idct_idct_16x16_add_avx;
444 dsp->itxfm_add[TX_16X16][ADST_DCT] = ff_vp9_idct_iadst_16x16_add_avx;
445 dsp->itxfm_add[TX_16X16][DCT_ADST] = ff_vp9_iadst_idct_16x16_add_avx;
446 dsp->itxfm_add[TX_16X16][ADST_ADST] = ff_vp9_iadst_iadst_16x16_add_avx;
447 dsp->itxfm_add[TX_32X32][ADST_ADST] =
448 dsp->itxfm_add[TX_32X32][ADST_DCT] =
449 dsp->itxfm_add[TX_32X32][DCT_ADST] =
450 dsp->itxfm_add[TX_32X32][DCT_DCT] = ff_vp9_idct_idct_32x32_add_avx;
451 init_fpel(1, 0, 32, put, avx);
452 init_fpel(0, 0, 64, put, avx);
454 init_ipred(TX_8X8, 8, avx);
455 init_ipred(TX_16X16, 16, avx);
456 init_ipred(TX_32X32, 32, avx);
459 if (EXTERNAL_AVX2(cpu_flags)) {
460 init_fpel(1, 1, 32, avg, avx2);
461 init_fpel(0, 1, 64, avg, avx2);
463 #if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
464 init_subpel3_32_64(0, put, avx2);
465 init_subpel3_32_64(1, avg, avx2);
468 dsp->intra_pred[TX_32X32][DC_PRED] = ff_vp9_ipred_dc_32x32_avx2;
469 dsp->intra_pred[TX_32X32][LEFT_DC_PRED] = ff_vp9_ipred_dc_left_32x32_avx2;
470 dsp->intra_pred[TX_32X32][TOP_DC_PRED] = ff_vp9_ipred_dc_top_32x32_avx2;
471 dsp->intra_pred[TX_32X32][VERT_PRED] = ff_vp9_ipred_v_32x32_avx2;
472 dsp->intra_pred[TX_32X32][HOR_PRED] = ff_vp9_ipred_h_32x32_avx2;
473 dsp->intra_pred[TX_32X32][TM_VP8_PRED] = ff_vp9_ipred_tm_32x32_avx2;
481 #endif /* HAVE_YASM */