2 * VP9 SIMD optimizations
4 * Copyright (c) 2013 Ronald S. Bultje <rsbultje gmail com>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavutil/attributes.h"
24 #include "libavutil/cpu.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/x86/asm.h"
27 #include "libavutil/x86/cpu.h"
28 #include "libavcodec/vp9dsp.h"
32 #define fpel_func(avg, sz, opt) \
33 void ff_vp9_##avg##sz##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \
34 const uint8_t *src, ptrdiff_t src_stride, \
35 int h, int mx, int my)
36 fpel_func(put, 4, mmx);
37 fpel_func(put, 8, mmx);
38 fpel_func(put, 16, sse);
39 fpel_func(put, 32, sse);
40 fpel_func(put, 64, sse);
41 fpel_func(avg, 4, mmxext);
42 fpel_func(avg, 8, mmxext);
43 fpel_func(avg, 16, sse2);
44 fpel_func(avg, 32, sse2);
45 fpel_func(avg, 64, sse2);
46 fpel_func(put, 32, avx);
47 fpel_func(put, 64, avx);
48 fpel_func(avg, 32, avx2);
49 fpel_func(avg, 64, avx2);
52 #define mc_func(avg, sz, dir, opt, type, f_sz) \
53 void ff_vp9_##avg##_8tap_1d_##dir##_##sz##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \
54 const uint8_t *src, ptrdiff_t src_stride, \
55 int h, const type (*filter)[f_sz])
56 #define mc_funcs(sz, opt, type, fsz) \
57 mc_func(put, sz, h, opt, type, fsz); \
58 mc_func(avg, sz, h, opt, type, fsz); \
59 mc_func(put, sz, v, opt, type, fsz); \
60 mc_func(avg, sz, v, opt, type, fsz)
62 mc_funcs(4, mmxext, int16_t, 8);
63 mc_funcs(8, sse2, int16_t, 8);
64 mc_funcs(4, ssse3, int8_t, 32);
65 mc_funcs(8, ssse3, int8_t, 32);
67 mc_funcs(16, ssse3, int8_t, 32);
68 mc_funcs(32, avx2, int8_t, 32);
74 #define mc_rep_func(avg, sz, hsz, dir, opt, type, f_sz) \
75 static av_always_inline void \
76 ff_vp9_##avg##_8tap_1d_##dir##_##sz##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \
77 const uint8_t *src, ptrdiff_t src_stride, \
78 int h, const type (*filter)[f_sz]) \
80 ff_vp9_##avg##_8tap_1d_##dir##_##hsz##_##opt(dst, dst_stride, src, \
81 src_stride, h, filter); \
82 ff_vp9_##avg##_8tap_1d_##dir##_##hsz##_##opt(dst + hsz, dst_stride, src + hsz, \
83 src_stride, h, filter); \
86 #define mc_rep_funcs(sz, hsz, opt, type, fsz) \
87 mc_rep_func(put, sz, hsz, h, opt, type, fsz); \
88 mc_rep_func(avg, sz, hsz, h, opt, type, fsz); \
89 mc_rep_func(put, sz, hsz, v, opt, type, fsz); \
90 mc_rep_func(avg, sz, hsz, v, opt, type, fsz)
92 mc_rep_funcs(16, 8, sse2, int16_t, 8);
94 mc_rep_funcs(16, 8, ssse3, int8_t, 32);
96 mc_rep_funcs(32, 16, sse2, int16_t, 8);
97 mc_rep_funcs(32, 16, ssse3, int8_t, 32);
98 mc_rep_funcs(64, 32, sse2, int16_t, 8);
99 mc_rep_funcs(64, 32, ssse3, int8_t, 32);
100 #if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
101 mc_rep_funcs(64, 32, avx2, int8_t, 32);
107 extern const int8_t ff_filters_ssse3[3][15][4][32];
108 extern const int16_t ff_filters_sse2[3][15][8][8];
110 #define filter_8tap_2d_fn(op, sz, f, f_opt, fname, align, opt) \
111 static void op##_8tap_##fname##_##sz##hv_##opt(uint8_t *dst, ptrdiff_t dst_stride, \
112 const uint8_t *src, ptrdiff_t src_stride, \
113 int h, int mx, int my) \
115 LOCAL_ALIGNED_##align(uint8_t, temp, [71 * 64]); \
116 ff_vp9_put_8tap_1d_h_##sz##_##opt(temp, 64, src - 3 * src_stride, src_stride, \
117 h + 7, ff_filters_##f_opt[f][mx - 1]); \
118 ff_vp9_##op##_8tap_1d_v_##sz##_##opt(dst, dst_stride, temp + 3 * 64, 64, \
119 h, ff_filters_##f_opt[f][my - 1]); \
122 #define filters_8tap_2d_fn(op, sz, align, opt, f_opt) \
123 filter_8tap_2d_fn(op, sz, FILTER_8TAP_REGULAR, f_opt, regular, align, opt) \
124 filter_8tap_2d_fn(op, sz, FILTER_8TAP_SHARP, f_opt, sharp, align, opt) \
125 filter_8tap_2d_fn(op, sz, FILTER_8TAP_SMOOTH, f_opt, smooth, align, opt)
127 #define filters_8tap_2d_fn2(op, align, opt4, opt8, f_opt) \
128 filters_8tap_2d_fn(op, 64, align, opt8, f_opt) \
129 filters_8tap_2d_fn(op, 32, align, opt8, f_opt) \
130 filters_8tap_2d_fn(op, 16, align, opt8, f_opt) \
131 filters_8tap_2d_fn(op, 8, align, opt8, f_opt) \
132 filters_8tap_2d_fn(op, 4, align, opt4, f_opt)
134 filters_8tap_2d_fn2(put, 16, mmxext, sse2, sse2)
135 filters_8tap_2d_fn2(avg, 16, mmxext, sse2, sse2)
136 filters_8tap_2d_fn2(put, 16, ssse3, ssse3, ssse3)
137 filters_8tap_2d_fn2(avg, 16, ssse3, ssse3, ssse3)
138 #if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
139 filters_8tap_2d_fn(put, 64, 32, avx2, ssse3)
140 filters_8tap_2d_fn(put, 32, 32, avx2, ssse3)
141 filters_8tap_2d_fn(avg, 64, 32, avx2, ssse3)
142 filters_8tap_2d_fn(avg, 32, 32, avx2, ssse3)
145 #undef filters_8tap_2d_fn2
146 #undef filters_8tap_2d_fn
147 #undef filter_8tap_2d_fn
149 #define filter_8tap_1d_fn(op, sz, f, f_opt, fname, dir, dvar, opt) \
150 static void op##_8tap_##fname##_##sz##dir##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \
151 const uint8_t *src, ptrdiff_t src_stride, \
152 int h, int mx, int my) \
154 ff_vp9_##op##_8tap_1d_##dir##_##sz##_##opt(dst, dst_stride, src, src_stride, \
155 h, ff_filters_##f_opt[f][dvar - 1]); \
158 #define filters_8tap_1d_fn(op, sz, dir, dvar, opt, f_opt) \
159 filter_8tap_1d_fn(op, sz, FILTER_8TAP_REGULAR, f_opt, regular, dir, dvar, opt) \
160 filter_8tap_1d_fn(op, sz, FILTER_8TAP_SHARP, f_opt, sharp, dir, dvar, opt) \
161 filter_8tap_1d_fn(op, sz, FILTER_8TAP_SMOOTH, f_opt, smooth, dir, dvar, opt)
163 #define filters_8tap_1d_fn2(op, sz, opt, f_opt) \
164 filters_8tap_1d_fn(op, sz, h, mx, opt, f_opt) \
165 filters_8tap_1d_fn(op, sz, v, my, opt, f_opt)
167 #define filters_8tap_1d_fn3(op, opt4, opt8, f_opt) \
168 filters_8tap_1d_fn2(op, 64, opt8, f_opt) \
169 filters_8tap_1d_fn2(op, 32, opt8, f_opt) \
170 filters_8tap_1d_fn2(op, 16, opt8, f_opt) \
171 filters_8tap_1d_fn2(op, 8, opt8, f_opt) \
172 filters_8tap_1d_fn2(op, 4, opt4, f_opt)
174 filters_8tap_1d_fn3(put, mmxext, sse2, sse2)
175 filters_8tap_1d_fn3(avg, mmxext, sse2, sse2)
176 filters_8tap_1d_fn3(put, ssse3, ssse3, ssse3)
177 filters_8tap_1d_fn3(avg, ssse3, ssse3, ssse3)
178 #if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
179 filters_8tap_1d_fn2(put, 64, avx2, ssse3)
180 filters_8tap_1d_fn2(put, 32, avx2, ssse3)
181 filters_8tap_1d_fn2(avg, 64, avx2, ssse3)
182 filters_8tap_1d_fn2(avg, 32, avx2, ssse3)
185 #undef filters_8tap_1d_fn
186 #undef filters_8tap_1d_fn2
187 #undef filters_8tap_1d_fn3
188 #undef filter_8tap_1d_fn
190 #define itxfm_func(typea, typeb, size, opt) \
191 void ff_vp9_##typea##_##typeb##_##size##x##size##_add_##opt(uint8_t *dst, ptrdiff_t stride, \
192 int16_t *block, int eob)
193 #define itxfm_funcs(size, opt) \
194 itxfm_func(idct, idct, size, opt); \
195 itxfm_func(iadst, idct, size, opt); \
196 itxfm_func(idct, iadst, size, opt); \
197 itxfm_func(iadst, iadst, size, opt)
199 itxfm_func(idct, idct, 4, mmxext);
200 itxfm_func(idct, iadst, 4, sse2);
201 itxfm_func(iadst, idct, 4, sse2);
202 itxfm_func(iadst, iadst, 4, sse2);
203 itxfm_funcs(4, ssse3);
204 itxfm_funcs(8, sse2);
205 itxfm_funcs(8, ssse3);
207 itxfm_funcs(16, sse2);
208 itxfm_funcs(16, ssse3);
209 itxfm_funcs(16, avx);
210 itxfm_func(idct, idct, 32, sse2);
211 itxfm_func(idct, idct, 32, ssse3);
212 itxfm_func(idct, idct, 32, avx);
213 itxfm_func(iwht, iwht, 4, mmx);
218 #define lpf_funcs(size1, size2, opt) \
219 void ff_vp9_loop_filter_v_##size1##_##size2##_##opt(uint8_t *dst, ptrdiff_t stride, \
220 int E, int I, int H); \
221 void ff_vp9_loop_filter_h_##size1##_##size2##_##opt(uint8_t *dst, ptrdiff_t stride, \
224 lpf_funcs(16, 16, sse2);
225 lpf_funcs(16, 16, ssse3);
226 lpf_funcs(16, 16, avx);
227 lpf_funcs(44, 16, sse2);
228 lpf_funcs(44, 16, ssse3);
229 lpf_funcs(44, 16, avx);
230 lpf_funcs(84, 16, sse2);
231 lpf_funcs(84, 16, ssse3);
232 lpf_funcs(84, 16, avx);
233 lpf_funcs(48, 16, sse2);
234 lpf_funcs(48, 16, ssse3);
235 lpf_funcs(48, 16, avx);
236 lpf_funcs(88, 16, sse2);
237 lpf_funcs(88, 16, ssse3);
238 lpf_funcs(88, 16, avx);
242 #define ipred_func(size, type, opt) \
243 void ff_vp9_ipred_##type##_##size##x##size##_##opt(uint8_t *dst, ptrdiff_t stride, \
244 const uint8_t *l, const uint8_t *a)
246 ipred_func(8, v, mmx);
248 #define ipred_dc_funcs(size, opt) \
249 ipred_func(size, dc, opt); \
250 ipred_func(size, dc_left, opt); \
251 ipred_func(size, dc_top, opt)
253 ipred_dc_funcs(4, mmxext);
254 ipred_dc_funcs(8, mmxext);
256 #define ipred_dir_tm_funcs(size, opt) \
257 ipred_func(size, tm, opt); \
258 ipred_func(size, dl, opt); \
259 ipred_func(size, dr, opt); \
260 ipred_func(size, hd, opt); \
261 ipred_func(size, hu, opt); \
262 ipred_func(size, vl, opt); \
263 ipred_func(size, vr, opt)
265 ipred_dir_tm_funcs(4, mmxext);
267 ipred_func(16, v, sse);
268 ipred_func(32, v, sse);
270 ipred_dc_funcs(16, sse2);
271 ipred_dc_funcs(32, sse2);
273 #define ipred_dir_tm_h_funcs(size, opt) \
274 ipred_dir_tm_funcs(size, opt); \
275 ipred_func(size, h, opt)
277 ipred_dir_tm_h_funcs(8, sse2);
278 ipred_dir_tm_h_funcs(16, sse2);
279 ipred_dir_tm_h_funcs(32, sse2);
281 ipred_func(4, h, sse2);
283 #define ipred_all_funcs(size, opt) \
284 ipred_dc_funcs(size, opt); \
285 ipred_dir_tm_h_funcs(size, opt)
287 // FIXME hd/vl_4x4_ssse3 does not exist
288 ipred_all_funcs(4, ssse3);
289 ipred_all_funcs(8, ssse3);
290 ipred_all_funcs(16, ssse3);
291 ipred_all_funcs(32, ssse3);
293 ipred_dir_tm_h_funcs(8, avx);
294 ipred_dir_tm_h_funcs(16, avx);
295 ipred_dir_tm_h_funcs(32, avx);
297 ipred_func(32, v, avx);
299 ipred_dc_funcs(32, avx2);
300 ipred_func(32, h, avx2);
301 ipred_func(32, tm, avx2);
304 #undef ipred_dir_tm_h_funcs
305 #undef ipred_dir_tm_funcs
306 #undef ipred_dc_funcs
308 #endif /* HAVE_YASM */
310 av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp, int bpp, int bitexact)
314 if (bpp != 8) return;
316 cpu_flags = av_get_cpu_flags();
318 #define init_fpel(idx1, idx2, sz, type, opt) \
319 dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = \
320 dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = \
321 dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] = \
322 dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = ff_vp9_##type##sz##_##opt
324 #define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type, opt) \
325 dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][idxh][idxv] = type##_8tap_smooth_##sz##dir##_##opt; \
326 dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][idxh][idxv] = type##_8tap_regular_##sz##dir##_##opt; \
327 dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][idxh][idxv] = type##_8tap_sharp_##sz##dir##_##opt
329 #define init_subpel2(idx1, idx2, sz, type, opt) \
330 init_subpel1(idx1, idx2, 1, 1, sz, hv, type, opt); \
331 init_subpel1(idx1, idx2, 0, 1, sz, v, type, opt); \
332 init_subpel1(idx1, idx2, 1, 0, sz, h, type, opt)
334 #define init_subpel3_32_64(idx, type, opt) \
335 init_subpel2(0, idx, 64, type, opt); \
336 init_subpel2(1, idx, 32, type, opt)
338 #define init_subpel3_8to64(idx, type, opt) \
339 init_subpel3_32_64(idx, type, opt); \
340 init_subpel2(2, idx, 16, type, opt); \
341 init_subpel2(3, idx, 8, type, opt)
343 #define init_subpel3(idx, type, opt) \
344 init_subpel3_8to64(idx, type, opt); \
345 init_subpel2(4, idx, 4, type, opt)
347 #define init_lpf(opt) do { \
348 dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_##opt; \
349 dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_##opt; \
350 dsp->loop_filter_mix2[0][0][0] = ff_vp9_loop_filter_h_44_16_##opt; \
351 dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_##opt; \
352 dsp->loop_filter_mix2[0][1][0] = ff_vp9_loop_filter_h_48_16_##opt; \
353 dsp->loop_filter_mix2[0][1][1] = ff_vp9_loop_filter_v_48_16_##opt; \
354 dsp->loop_filter_mix2[1][0][0] = ff_vp9_loop_filter_h_84_16_##opt; \
355 dsp->loop_filter_mix2[1][0][1] = ff_vp9_loop_filter_v_84_16_##opt; \
356 dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_##opt; \
357 dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_##opt; \
360 #define init_ipred(sz, opt, t, e) \
361 dsp->intra_pred[TX_##sz##X##sz][e##_PRED] = ff_vp9_ipred_##t##_##sz##x##sz##_##opt
363 #define ff_vp9_ipred_hd_4x4_ssse3 ff_vp9_ipred_hd_4x4_mmxext
364 #define ff_vp9_ipred_vl_4x4_ssse3 ff_vp9_ipred_vl_4x4_mmxext
365 #define init_dir_tm_ipred(sz, opt) do { \
366 init_ipred(sz, opt, dl, DIAG_DOWN_LEFT); \
367 init_ipred(sz, opt, dr, DIAG_DOWN_RIGHT); \
368 init_ipred(sz, opt, hd, HOR_DOWN); \
369 init_ipred(sz, opt, vl, VERT_LEFT); \
370 init_ipred(sz, opt, hu, HOR_UP); \
371 init_ipred(sz, opt, tm, TM_VP8); \
372 init_ipred(sz, opt, vr, VERT_RIGHT); \
374 #define init_dir_tm_h_ipred(sz, opt) do { \
375 init_dir_tm_ipred(sz, opt); \
376 init_ipred(sz, opt, h, HOR); \
378 #define init_dc_ipred(sz, opt) do { \
379 init_ipred(sz, opt, dc, DC); \
380 init_ipred(sz, opt, dc_left, LEFT_DC); \
381 init_ipred(sz, opt, dc_top, TOP_DC); \
383 #define init_all_ipred(sz, opt) do { \
384 init_dc_ipred(sz, opt); \
385 init_dir_tm_h_ipred(sz, opt); \
388 if (EXTERNAL_MMX(cpu_flags)) {
389 init_fpel(4, 0, 4, put, mmx);
390 init_fpel(3, 0, 8, put, mmx);
392 dsp->itxfm_add[4 /* lossless */][DCT_DCT] =
393 dsp->itxfm_add[4 /* lossless */][ADST_DCT] =
394 dsp->itxfm_add[4 /* lossless */][DCT_ADST] =
395 dsp->itxfm_add[4 /* lossless */][ADST_ADST] = ff_vp9_iwht_iwht_4x4_add_mmx;
397 init_ipred(8, mmx, v, VERT);
400 if (EXTERNAL_MMXEXT(cpu_flags)) {
401 init_subpel2(4, 0, 4, put, mmxext);
402 init_subpel2(4, 1, 4, avg, mmxext);
403 init_fpel(4, 1, 4, avg, mmxext);
404 init_fpel(3, 1, 8, avg, mmxext);
405 dsp->itxfm_add[TX_4X4][DCT_DCT] = ff_vp9_idct_idct_4x4_add_mmxext;
406 init_dc_ipred(4, mmxext);
407 init_dc_ipred(8, mmxext);
408 init_dir_tm_ipred(4, mmxext);
411 if (EXTERNAL_SSE(cpu_flags)) {
412 init_fpel(2, 0, 16, put, sse);
413 init_fpel(1, 0, 32, put, sse);
414 init_fpel(0, 0, 64, put, sse);
415 init_ipred(16, sse, v, VERT);
416 init_ipred(32, sse, v, VERT);
419 if (EXTERNAL_SSE2(cpu_flags)) {
420 init_subpel3_8to64(0, put, sse2);
421 init_subpel3_8to64(1, avg, sse2);
422 init_fpel(2, 1, 16, avg, sse2);
423 init_fpel(1, 1, 32, avg, sse2);
424 init_fpel(0, 1, 64, avg, sse2);
426 dsp->itxfm_add[TX_4X4][ADST_DCT] = ff_vp9_idct_iadst_4x4_add_sse2;
427 dsp->itxfm_add[TX_4X4][DCT_ADST] = ff_vp9_iadst_idct_4x4_add_sse2;
428 dsp->itxfm_add[TX_4X4][ADST_ADST] = ff_vp9_iadst_iadst_4x4_add_sse2;
429 dsp->itxfm_add[TX_8X8][DCT_DCT] = ff_vp9_idct_idct_8x8_add_sse2;
430 dsp->itxfm_add[TX_8X8][ADST_DCT] = ff_vp9_idct_iadst_8x8_add_sse2;
431 dsp->itxfm_add[TX_8X8][DCT_ADST] = ff_vp9_iadst_idct_8x8_add_sse2;
432 dsp->itxfm_add[TX_8X8][ADST_ADST] = ff_vp9_iadst_iadst_8x8_add_sse2;
433 dsp->itxfm_add[TX_16X16][DCT_DCT] = ff_vp9_idct_idct_16x16_add_sse2;
434 dsp->itxfm_add[TX_16X16][ADST_DCT] = ff_vp9_idct_iadst_16x16_add_sse2;
435 dsp->itxfm_add[TX_16X16][DCT_ADST] = ff_vp9_iadst_idct_16x16_add_sse2;
436 dsp->itxfm_add[TX_16X16][ADST_ADST] = ff_vp9_iadst_iadst_16x16_add_sse2;
437 dsp->itxfm_add[TX_32X32][ADST_ADST] =
438 dsp->itxfm_add[TX_32X32][ADST_DCT] =
439 dsp->itxfm_add[TX_32X32][DCT_ADST] =
440 dsp->itxfm_add[TX_32X32][DCT_DCT] = ff_vp9_idct_idct_32x32_add_sse2;
441 init_dc_ipred(16, sse2);
442 init_dc_ipred(32, sse2);
443 init_dir_tm_h_ipred(8, sse2);
444 init_dir_tm_h_ipred(16, sse2);
445 init_dir_tm_h_ipred(32, sse2);
446 init_ipred(4, sse2, h, HOR);
449 if (EXTERNAL_SSSE3(cpu_flags)) {
450 init_subpel3(0, put, ssse3);
451 init_subpel3(1, avg, ssse3);
452 dsp->itxfm_add[TX_4X4][DCT_DCT] = ff_vp9_idct_idct_4x4_add_ssse3;
453 dsp->itxfm_add[TX_4X4][ADST_DCT] = ff_vp9_idct_iadst_4x4_add_ssse3;
454 dsp->itxfm_add[TX_4X4][DCT_ADST] = ff_vp9_iadst_idct_4x4_add_ssse3;
455 dsp->itxfm_add[TX_4X4][ADST_ADST] = ff_vp9_iadst_iadst_4x4_add_ssse3;
456 dsp->itxfm_add[TX_8X8][DCT_DCT] = ff_vp9_idct_idct_8x8_add_ssse3;
457 dsp->itxfm_add[TX_8X8][ADST_DCT] = ff_vp9_idct_iadst_8x8_add_ssse3;
458 dsp->itxfm_add[TX_8X8][DCT_ADST] = ff_vp9_iadst_idct_8x8_add_ssse3;
459 dsp->itxfm_add[TX_8X8][ADST_ADST] = ff_vp9_iadst_iadst_8x8_add_ssse3;
460 dsp->itxfm_add[TX_16X16][DCT_DCT] = ff_vp9_idct_idct_16x16_add_ssse3;
461 dsp->itxfm_add[TX_16X16][ADST_DCT] = ff_vp9_idct_iadst_16x16_add_ssse3;
462 dsp->itxfm_add[TX_16X16][DCT_ADST] = ff_vp9_iadst_idct_16x16_add_ssse3;
463 dsp->itxfm_add[TX_16X16][ADST_ADST] = ff_vp9_iadst_iadst_16x16_add_ssse3;
464 dsp->itxfm_add[TX_32X32][ADST_ADST] =
465 dsp->itxfm_add[TX_32X32][ADST_DCT] =
466 dsp->itxfm_add[TX_32X32][DCT_ADST] =
467 dsp->itxfm_add[TX_32X32][DCT_DCT] = ff_vp9_idct_idct_32x32_add_ssse3;
469 init_all_ipred(4, ssse3);
470 init_all_ipred(8, ssse3);
471 init_all_ipred(16, ssse3);
472 init_all_ipred(32, ssse3);
475 if (EXTERNAL_AVX(cpu_flags)) {
476 dsp->itxfm_add[TX_8X8][DCT_DCT] = ff_vp9_idct_idct_8x8_add_avx;
477 dsp->itxfm_add[TX_8X8][ADST_DCT] = ff_vp9_idct_iadst_8x8_add_avx;
478 dsp->itxfm_add[TX_8X8][DCT_ADST] = ff_vp9_iadst_idct_8x8_add_avx;
479 dsp->itxfm_add[TX_8X8][ADST_ADST] = ff_vp9_iadst_iadst_8x8_add_avx;
480 dsp->itxfm_add[TX_16X16][DCT_DCT] = ff_vp9_idct_idct_16x16_add_avx;
481 dsp->itxfm_add[TX_16X16][ADST_DCT] = ff_vp9_idct_iadst_16x16_add_avx;
482 dsp->itxfm_add[TX_16X16][DCT_ADST] = ff_vp9_iadst_idct_16x16_add_avx;
483 dsp->itxfm_add[TX_16X16][ADST_ADST] = ff_vp9_iadst_iadst_16x16_add_avx;
484 dsp->itxfm_add[TX_32X32][ADST_ADST] =
485 dsp->itxfm_add[TX_32X32][ADST_DCT] =
486 dsp->itxfm_add[TX_32X32][DCT_ADST] =
487 dsp->itxfm_add[TX_32X32][DCT_DCT] = ff_vp9_idct_idct_32x32_add_avx;
489 init_dir_tm_h_ipred(8, avx);
490 init_dir_tm_h_ipred(16, avx);
491 init_dir_tm_h_ipred(32, avx);
493 if (EXTERNAL_AVX_FAST(cpu_flags)) {
494 init_fpel(1, 0, 32, put, avx);
495 init_fpel(0, 0, 64, put, avx);
496 init_ipred(32, avx, v, VERT);
499 if (EXTERNAL_AVX2(cpu_flags)) {
500 init_fpel(1, 1, 32, avg, avx2);
501 init_fpel(0, 1, 64, avg, avx2);
503 #if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
504 init_subpel3_32_64(0, put, avx2);
505 init_subpel3_32_64(1, avg, avx2);
508 init_dc_ipred(32, avx2);
509 init_ipred(32, avx2, h, HOR);
510 init_ipred(32, avx2, tm, TM_VP8);
518 #endif /* HAVE_YASM */