2 * Copyright (c) 2015 - 2017 Shivraj Patil (Shivraj.Patil@imgtec.com)
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavcodec/vp9dsp.h"
23 #include "libavutil/mips/generic_macros_msa.h"
24 #include "vp9dsp_mips.h"
26 #define VP9_DCT_CONST_BITS 14
27 #define ROUND_POWER_OF_TWO(value, n) (((value) + (1 << ((n) - 1))) >> (n))
29 static const int32_t cospi_1_64 = 16364;
30 static const int32_t cospi_2_64 = 16305;
31 static const int32_t cospi_3_64 = 16207;
32 static const int32_t cospi_4_64 = 16069;
33 static const int32_t cospi_5_64 = 15893;
34 static const int32_t cospi_6_64 = 15679;
35 static const int32_t cospi_7_64 = 15426;
36 static const int32_t cospi_8_64 = 15137;
37 static const int32_t cospi_9_64 = 14811;
38 static const int32_t cospi_10_64 = 14449;
39 static const int32_t cospi_11_64 = 14053;
40 static const int32_t cospi_12_64 = 13623;
41 static const int32_t cospi_13_64 = 13160;
42 static const int32_t cospi_14_64 = 12665;
43 static const int32_t cospi_15_64 = 12140;
44 static const int32_t cospi_16_64 = 11585;
45 static const int32_t cospi_17_64 = 11003;
46 static const int32_t cospi_18_64 = 10394;
47 static const int32_t cospi_19_64 = 9760;
48 static const int32_t cospi_20_64 = 9102;
49 static const int32_t cospi_21_64 = 8423;
50 static const int32_t cospi_22_64 = 7723;
51 static const int32_t cospi_23_64 = 7005;
52 static const int32_t cospi_24_64 = 6270;
53 static const int32_t cospi_25_64 = 5520;
54 static const int32_t cospi_26_64 = 4756;
55 static const int32_t cospi_27_64 = 3981;
56 static const int32_t cospi_28_64 = 3196;
57 static const int32_t cospi_29_64 = 2404;
58 static const int32_t cospi_30_64 = 1606;
59 static const int32_t cospi_31_64 = 804;
61 // 16384 * sqrt(2) * sin(kPi/9) * 2 / 3
62 static const int32_t sinpi_1_9 = 5283;
63 static const int32_t sinpi_2_9 = 9929;
64 static const int32_t sinpi_3_9 = 13377;
65 static const int32_t sinpi_4_9 = 15212;
67 #define VP9_DOTP_CONST_PAIR(reg0, reg1, cnst0, cnst1, out0, out1) \
69 v8i16 k0_m = __msa_fill_h(cnst0); \
70 v4i32 s0_m, s1_m, s2_m, s3_m; \
72 s0_m = (v4i32) __msa_fill_h(cnst1); \
73 k0_m = __msa_ilvev_h((v8i16) s0_m, k0_m); \
75 ILVRL_H2_SW((-reg1), reg0, s1_m, s0_m); \
76 ILVRL_H2_SW(reg0, reg1, s3_m, s2_m); \
77 DOTP_SH2_SW(s1_m, s0_m, k0_m, k0_m, s1_m, s0_m); \
78 SRARI_W2_SW(s1_m, s0_m, VP9_DCT_CONST_BITS); \
79 out0 = __msa_pckev_h((v8i16) s0_m, (v8i16) s1_m); \
81 DOTP_SH2_SW(s3_m, s2_m, k0_m, k0_m, s1_m, s0_m); \
82 SRARI_W2_SW(s1_m, s0_m, VP9_DCT_CONST_BITS); \
83 out1 = __msa_pckev_h((v8i16) s0_m, (v8i16) s1_m); \
86 #define VP9_DOT_ADD_SUB_SRARI_PCK(in0, in1, in2, in3, in4, in5, in6, in7, \
87 dst0, dst1, dst2, dst3) \
89 v4i32 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m; \
90 v4i32 tp5_m, tp6_m, tp7_m, tp8_m, tp9_m; \
92 DOTP_SH4_SW(in0, in1, in0, in1, in4, in4, in5, in5, \
93 tp0_m, tp2_m, tp3_m, tp4_m); \
94 DOTP_SH4_SW(in2, in3, in2, in3, in6, in6, in7, in7, \
95 tp5_m, tp6_m, tp7_m, tp8_m); \
96 BUTTERFLY_4(tp0_m, tp3_m, tp7_m, tp5_m, tp1_m, tp9_m, tp7_m, tp5_m); \
97 BUTTERFLY_4(tp2_m, tp4_m, tp8_m, tp6_m, tp3_m, tp0_m, tp4_m, tp2_m); \
98 SRARI_W4_SW(tp1_m, tp9_m, tp7_m, tp5_m, VP9_DCT_CONST_BITS); \
99 SRARI_W4_SW(tp3_m, tp0_m, tp4_m, tp2_m, VP9_DCT_CONST_BITS); \
100 PCKEV_H4_SH(tp1_m, tp3_m, tp9_m, tp0_m, tp7_m, tp4_m, tp5_m, tp2_m, \
101 dst0, dst1, dst2, dst3); \
104 #define VP9_DOT_SHIFT_RIGHT_PCK_H(in0, in1, in2) \
107 v4i32 tp0_m, tp1_m; \
109 DOTP_SH2_SW(in0, in1, in2, in2, tp1_m, tp0_m); \
110 SRARI_W2_SW(tp1_m, tp0_m, VP9_DCT_CONST_BITS); \
111 dst_m = __msa_pckev_h((v8i16) tp1_m, (v8i16) tp0_m); \
116 #define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, \
117 out0, out1, out2, out3, out4, out5, out6, out7) \
119 v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m; \
120 v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m; \
121 v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64, \
122 cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 }; \
123 v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, \
124 -cospi_16_64, cospi_24_64, -cospi_24_64, 0, 0 }; \
126 SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m); \
127 cnst2_m = -cnst0_m; \
128 ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \
129 SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m); \
130 cnst4_m = -cnst2_m; \
131 ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \
133 ILVRL_H2_SH(in0, in7, vec1_m, vec0_m); \
134 ILVRL_H2_SH(in4, in3, vec3_m, vec2_m); \
135 VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
136 cnst1_m, cnst2_m, cnst3_m, in7, in0, \
139 SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m); \
140 cnst2_m = -cnst0_m; \
141 ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \
142 SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m); \
143 cnst4_m = -cnst2_m; \
144 ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \
146 ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
147 ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \
149 VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
150 cnst1_m, cnst2_m, cnst3_m, in5, in2, \
152 BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5); \
156 SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5, \
157 cnst0_m, cnst1_m, cnst2_m, cnst3_m); \
159 ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m); \
160 cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
163 ILVRL_H2_SH(in4, in3, vec1_m, vec0_m); \
164 ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \
165 VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
166 cnst2_m, cnst3_m, cnst1_m, out1, out6, \
169 SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m); \
170 cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
172 ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
173 ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m); \
174 out3 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
175 out4 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \
176 out2 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m); \
177 out5 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m); \
184 #define VP9_MADD_SHORT(m0, m1, c0, c1, res0, res1) \
186 v4i32 madd0_m, madd1_m, madd2_m, madd3_m; \
187 v8i16 madd_s0_m, madd_s1_m; \
189 ILVRL_H2_SH(m1, m0, madd_s0_m, madd_s1_m); \
190 DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s0_m, madd_s1_m, \
191 c0, c0, c1, c1, madd0_m, madd1_m, madd2_m, madd3_m); \
192 SRARI_W4_SW(madd0_m, madd1_m, madd2_m, madd3_m, VP9_DCT_CONST_BITS); \
193 PCKEV_H2_SH(madd1_m, madd0_m, madd3_m, madd2_m, res0, res1); \
196 #define VP9_MADD_BF(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, \
197 out0, out1, out2, out3) \
199 v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m; \
200 v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m, m4_m, m5_m; \
202 ILVRL_H2_SH(inp1, inp0, madd_s0_m, madd_s1_m); \
203 ILVRL_H2_SH(inp3, inp2, madd_s2_m, madd_s3_m); \
204 DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m, \
205 cst0, cst0, cst2, cst2, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
206 BUTTERFLY_4(tmp0_m, tmp1_m, tmp3_m, tmp2_m, \
207 m4_m, m5_m, tmp3_m, tmp2_m); \
208 SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS); \
209 PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out0, out1); \
210 DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m, \
211 cst1, cst1, cst3, cst3, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
212 BUTTERFLY_4(tmp0_m, tmp1_m, tmp3_m, tmp2_m, \
213 m4_m, m5_m, tmp3_m, tmp2_m); \
214 SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS); \
215 PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out2, out3); \
218 #define VP9_SET_COSPI_PAIR(c0_h, c1_h) \
220 v8i16 out0_m, r0_m, r1_m; \
222 r0_m = __msa_fill_h(c0_h); \
223 r1_m = __msa_fill_h(c1_h); \
224 out0_m = __msa_ilvev_h(r1_m, r0_m); \
229 #define VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3) \
231 uint8_t *dst_m = (uint8_t *) (dst); \
232 v16u8 dst0_m, dst1_m, dst2_m, dst3_m; \
233 v16i8 tmp0_m, tmp1_m; \
234 v16i8 zero_m = { 0 }; \
235 v8i16 res0_m, res1_m, res2_m, res3_m; \
237 LD_UB4(dst_m, dst_stride, dst0_m, dst1_m, dst2_m, dst3_m); \
238 ILVR_B4_SH(zero_m, dst0_m, zero_m, dst1_m, zero_m, dst2_m, \
239 zero_m, dst3_m, res0_m, res1_m, res2_m, res3_m); \
240 ADD4(res0_m, in0, res1_m, in1, res2_m, in2, res3_m, in3, \
241 res0_m, res1_m, res2_m, res3_m); \
242 CLIP_SH4_0_255(res0_m, res1_m, res2_m, res3_m); \
243 PCKEV_B2_SB(res1_m, res0_m, res3_m, res2_m, tmp0_m, tmp1_m); \
244 ST8x4_UB(tmp0_m, tmp1_m, dst_m, dst_stride); \
247 #define VP9_IDCT4x4(in0, in1, in2, in3, out0, out1, out2, out3) \
249 v8i16 c0_m, c1_m, c2_m, c3_m; \
250 v8i16 step0_m, step1_m; \
251 v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
253 c0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \
254 c1_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \
255 step0_m = __msa_ilvr_h(in2, in0); \
256 DOTP_SH2_SW(step0_m, step0_m, c0_m, c1_m, tmp0_m, tmp1_m); \
258 c2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \
259 c3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \
260 step1_m = __msa_ilvr_h(in3, in1); \
261 DOTP_SH2_SW(step1_m, step1_m, c2_m, c3_m, tmp2_m, tmp3_m); \
262 SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS); \
264 PCKEV_H2_SW(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp0_m, tmp2_m); \
265 SLDI_B2_0_SW(tmp0_m, tmp2_m, tmp1_m, tmp3_m, 8); \
266 BUTTERFLY_4((v8i16) tmp0_m, (v8i16) tmp1_m, \
267 (v8i16) tmp2_m, (v8i16) tmp3_m, \
268 out0, out1, out2, out3); \
271 #define VP9_IADST4x4(in0, in1, in2, in3, out0, out1, out2, out3) \
273 v8i16 res0_m, res1_m, c0_m, c1_m; \
274 v8i16 k1_m, k2_m, k3_m, k4_m; \
275 v8i16 zero_m = { 0 }; \
276 v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
277 v4i32 int0_m, int1_m, int2_m, int3_m; \
278 v8i16 mask_m = { sinpi_1_9, sinpi_2_9, sinpi_3_9, \
279 sinpi_4_9, -sinpi_1_9, -sinpi_2_9, -sinpi_3_9, \
282 SPLATI_H4_SH(mask_m, 3, 0, 1, 2, c0_m, c1_m, k1_m, k2_m); \
283 ILVEV_H2_SH(c0_m, c1_m, k1_m, k2_m, c0_m, c1_m); \
284 ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m); \
285 DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp2_m, tmp1_m); \
286 int0_m = tmp2_m + tmp1_m; \
288 SPLATI_H2_SH(mask_m, 4, 7, k4_m, k3_m); \
289 ILVEV_H2_SH(k4_m, k1_m, k3_m, k2_m, c0_m, c1_m); \
290 DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp0_m, tmp1_m); \
291 int1_m = tmp0_m + tmp1_m; \
293 c0_m = __msa_splati_h(mask_m, 6); \
294 ILVL_H2_SH(k2_m, c0_m, zero_m, k2_m, c0_m, c1_m); \
295 ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m); \
296 DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp0_m, tmp1_m); \
297 int2_m = tmp0_m + tmp1_m; \
299 c0_m = __msa_splati_h(mask_m, 6); \
300 c0_m = __msa_ilvev_h(c0_m, k1_m); \
302 res0_m = __msa_ilvr_h((in1), (in3)); \
303 tmp0_m = __msa_dotp_s_w(res0_m, c0_m); \
304 int3_m = tmp2_m + tmp0_m; \
306 res0_m = __msa_ilvr_h((in2), (in3)); \
307 c1_m = __msa_ilvev_h(k4_m, k3_m); \
309 tmp2_m = __msa_dotp_s_w(res0_m, c1_m); \
310 res1_m = __msa_ilvr_h((in0), (in2)); \
311 c1_m = __msa_ilvev_h(k1_m, zero_m); \
313 tmp3_m = __msa_dotp_s_w(res1_m, c1_m); \
317 SRARI_W4_SW(int0_m, int1_m, int2_m, int3_m, VP9_DCT_CONST_BITS); \
318 PCKEV_H2_SH(int0_m, int0_m, int1_m, int1_m, out0, out1); \
319 PCKEV_H2_SH(int2_m, int2_m, int3_m, int3_m, out2, out3); \
322 #define TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, \
323 out0, out1, out2, out3, out4, out5, out6, out7) \
325 v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
326 v8i16 tmp0_n, tmp1_n, tmp2_n, tmp3_n; \
327 v8i16 zero_m = { 0 }; \
329 ILVR_H4_SH(in1, in0, in3, in2, in5, in4, in7, in6, \
330 tmp0_n, tmp1_n, tmp2_n, tmp3_n); \
331 ILVRL_W2_SH(tmp1_n, tmp0_n, tmp0_m, tmp2_m); \
332 ILVRL_W2_SH(tmp3_n, tmp2_n, tmp1_m, tmp3_m); \
334 out0 = (v8i16) __msa_ilvr_d((v2i64) tmp1_m, (v2i64) tmp0_m); \
335 out1 = (v8i16) __msa_ilvl_d((v2i64) tmp1_m, (v2i64) tmp0_m); \
336 out2 = (v8i16) __msa_ilvr_d((v2i64) tmp3_m, (v2i64) tmp2_m); \
337 out3 = (v8i16) __msa_ilvl_d((v2i64) tmp3_m, (v2i64) tmp2_m); \
345 static void vp9_idct4x4_1_add_msa(int16_t *input, uint8_t *dst,
351 out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), VP9_DCT_CONST_BITS);
352 out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS);
353 out = ROUND_POWER_OF_TWO(out, 4);
354 vec = __msa_fill_h(out);
357 ADDBLK_ST4x4_UB(vec, vec, vec, vec, dst, dst_stride);
360 static void vp9_idct4x4_colcol_addblk_msa(int16_t *input, uint8_t *dst,
363 v8i16 in0, in1, in2, in3;
366 /* load vector elements of 4x4 block */
367 LD4x4_SH(input, in0, in1, in2, in3);
368 ST_SH2(zero, zero, input, 8);
370 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
372 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
373 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
374 /* rounding (add 2^3, divide by 2^4) */
375 SRARI_H4_SH(in0, in1, in2, in3, 4);
376 ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride);
379 static void vp9_iadst4x4_colcol_addblk_msa(int16_t *input, uint8_t *dst,
382 v8i16 in0, in1, in2, in3;
385 /* load vector elements of 4x4 block */
386 LD4x4_SH(input, in0, in1, in2, in3);
387 ST_SH2(zero, zero, input, 8);
389 VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
391 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
392 VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
393 /* rounding (add 2^3, divide by 2^4) */
394 SRARI_H4_SH(in0, in1, in2, in3, 4);
395 ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride);
398 static void vp9_iadst_idct_4x4_add_msa(int16_t *input, uint8_t *dst,
399 int32_t dst_stride, int32_t eob)
401 v8i16 in0, in1, in2, in3;
404 /* load vector elements of 4x4 block */
405 LD4x4_SH(input, in0, in1, in2, in3);
406 ST_SH2(zero, zero, input, 8);
408 VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
410 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
411 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
412 /* rounding (add 2^3, divide by 2^4) */
413 SRARI_H4_SH(in0, in1, in2, in3, 4);
414 ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride);
417 static void vp9_idct_iadst_4x4_add_msa(int16_t *input, uint8_t *dst,
418 int32_t dst_stride, int32_t eob)
420 v8i16 in0, in1, in2, in3;
423 /* load vector elements of 4x4 block */
424 LD4x4_SH(input, in0, in1, in2, in3);
425 ST_SH2(zero, zero, input, 8);
427 VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
429 TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
430 VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
431 /* rounding (add 2^3, divide by 2^4) */
432 SRARI_H4_SH(in0, in1, in2, in3, 4);
433 ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride);
436 #define VP9_SET_CONST_PAIR(mask_h, idx1_h, idx2_h) \
440 SPLATI_H2_SH(mask_h, idx1_h, idx2_h, c0_m, c1_m); \
441 c0_m = __msa_ilvev_h(c1_m, c0_m); \
446 /* multiply and add macro */
447 #define VP9_MADD(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, \
448 out0, out1, out2, out3) \
450 v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m; \
451 v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
453 ILVRL_H2_SH(inp1, inp0, madd_s1_m, madd_s0_m); \
454 ILVRL_H2_SH(inp3, inp2, madd_s3_m, madd_s2_m); \
455 DOTP_SH4_SW(madd_s1_m, madd_s0_m, madd_s1_m, madd_s0_m, \
456 cst0, cst0, cst1, cst1, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
457 SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS); \
458 PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out1); \
459 DOTP_SH4_SW(madd_s3_m, madd_s2_m, madd_s3_m, madd_s2_m, \
460 cst2, cst2, cst3, cst3, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
461 SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS); \
462 PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out2, out3); \
466 #define VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, \
467 out0, out1, out2, out3, out4, out5, out6, out7) \
469 v8i16 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m; \
470 v8i16 k0_m, k1_m, k2_m, k3_m, res0_m, res1_m, res2_m, res3_m; \
471 v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
472 v8i16 mask_m = { cospi_28_64, cospi_4_64, cospi_20_64, cospi_12_64, \
473 cospi_16_64, -cospi_4_64, -cospi_20_64, -cospi_16_64 }; \
475 k0_m = VP9_SET_CONST_PAIR(mask_m, 0, 5); \
476 k1_m = VP9_SET_CONST_PAIR(mask_m, 1, 0); \
477 k2_m = VP9_SET_CONST_PAIR(mask_m, 6, 3); \
478 k3_m = VP9_SET_CONST_PAIR(mask_m, 3, 2); \
479 VP9_MADD(in1, in7, in3, in5, k0_m, k1_m, k2_m, k3_m, in1, in7, in3, in5); \
480 SUB2(in1, in3, in7, in5, res0_m, res1_m); \
481 k0_m = VP9_SET_CONST_PAIR(mask_m, 4, 7); \
482 k1_m = __msa_splati_h(mask_m, 4); \
484 ILVRL_H2_SH(res0_m, res1_m, res2_m, res3_m); \
485 DOTP_SH4_SW(res2_m, res3_m, res2_m, res3_m, k0_m, k0_m, k1_m, k1_m, \
486 tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
487 SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS); \
489 PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tp5_m, tp6_m); \
491 k2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \
492 k3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \
493 VP9_MADD(in0, in4, in2, in6, k1_m, k0_m, k2_m, k3_m, \
494 in0, in4, in2, in6); \
495 BUTTERFLY_4(in0, in4, in2, in6, tp0_m, tp1_m, tp2_m, tp3_m); \
496 BUTTERFLY_8(tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m, \
497 out0, out1, out2, out3, out4, out5, out6, out7); \
500 #define VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, \
501 out0, out1, out2, out3, out4, out5, out6, out7) \
503 v4i32 r0_m, r1_m, r2_m, r3_m, r4_m, r5_m, r6_m, r7_m; \
504 v4i32 m0_m, m1_m, m2_m, m3_m, t0_m, t1_m; \
505 v8i16 res0_m, res1_m, res2_m, res3_m, k0_m, k1_m, in_s0, in_s1; \
506 v8i16 mask1_m = { cospi_2_64, cospi_30_64, -cospi_2_64, \
507 cospi_10_64, cospi_22_64, -cospi_10_64, cospi_18_64, cospi_14_64 }; \
508 v8i16 mask2_m = { cospi_14_64, -cospi_18_64, cospi_26_64, \
509 cospi_6_64, -cospi_26_64, cospi_8_64, cospi_24_64, -cospi_8_64 }; \
510 v8i16 mask3_m = { -cospi_24_64, cospi_8_64, cospi_16_64, \
511 -cospi_16_64, 0, 0, 0, 0 }; \
513 k0_m = VP9_SET_CONST_PAIR(mask1_m, 0, 1); \
514 k1_m = VP9_SET_CONST_PAIR(mask1_m, 1, 2); \
515 ILVRL_H2_SH(in1, in0, in_s1, in_s0); \
516 DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \
517 r0_m, r1_m, r2_m, r3_m); \
518 k0_m = VP9_SET_CONST_PAIR(mask1_m, 6, 7); \
519 k1_m = VP9_SET_CONST_PAIR(mask2_m, 0, 1); \
520 ILVRL_H2_SH(in5, in4, in_s1, in_s0); \
521 DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \
522 r4_m, r5_m, r6_m, r7_m); \
523 ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, \
524 m0_m, m1_m, m2_m, m3_m); \
525 SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \
526 PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, res0_m, res1_m); \
527 SUB4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, \
528 m0_m, m1_m, m2_m, m3_m); \
529 SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \
530 PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, t0_m, t1_m); \
531 k0_m = VP9_SET_CONST_PAIR(mask1_m, 3, 4); \
532 k1_m = VP9_SET_CONST_PAIR(mask1_m, 4, 5); \
533 ILVRL_H2_SH(in3, in2, in_s1, in_s0); \
534 DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \
535 r0_m, r1_m, r2_m, r3_m); \
536 k0_m = VP9_SET_CONST_PAIR(mask2_m, 2, 3); \
537 k1_m = VP9_SET_CONST_PAIR(mask2_m, 3, 4); \
538 ILVRL_H2_SH(in7, in6, in_s1, in_s0); \
539 DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \
540 r4_m, r5_m, r6_m, r7_m); \
541 ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, \
542 m0_m, m1_m, m2_m, m3_m); \
543 SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \
544 PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, res2_m, res3_m); \
545 SUB4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, \
546 m0_m, m1_m, m2_m, m3_m); \
547 SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \
548 PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, r2_m, r3_m); \
549 ILVRL_H2_SW(r3_m, r2_m, m2_m, m3_m); \
550 BUTTERFLY_4(res0_m, res1_m, res3_m, res2_m, out0, in7, in4, in3); \
551 k0_m = VP9_SET_CONST_PAIR(mask2_m, 5, 6); \
552 k1_m = VP9_SET_CONST_PAIR(mask2_m, 6, 7); \
553 ILVRL_H2_SH(t1_m, t0_m, in_s1, in_s0); \
554 DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \
555 r0_m, r1_m, r2_m, r3_m); \
556 k1_m = VP9_SET_CONST_PAIR(mask3_m, 0, 1); \
557 DOTP_SH4_SW(m2_m, m3_m, m2_m, m3_m, k0_m, k0_m, k1_m, k1_m, \
558 r4_m, r5_m, r6_m, r7_m); \
559 ADD4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m, \
560 m0_m, m1_m, m2_m, m3_m); \
561 SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \
562 PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in1, out6); \
563 SUB4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m, \
564 m0_m, m1_m, m2_m, m3_m); \
565 SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \
566 PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in2, in5); \
567 k0_m = VP9_SET_CONST_PAIR(mask3_m, 2, 2); \
568 k1_m = VP9_SET_CONST_PAIR(mask3_m, 2, 3); \
569 ILVRL_H2_SH(in4, in3, in_s1, in_s0); \
570 DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \
571 m0_m, m1_m, m2_m, m3_m); \
572 SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \
573 PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in3, out4); \
574 ILVRL_H2_SW(in5, in2, m2_m, m3_m); \
575 DOTP_SH4_SW(m2_m, m3_m, m2_m, m3_m, k0_m, k0_m, k1_m, k1_m, \
576 m0_m, m1_m, m2_m, m3_m); \
577 SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \
578 PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, out2, in5); \
586 static void vp9_idct8x8_1_add_msa(int16_t *input, uint8_t *dst,
593 out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), VP9_DCT_CONST_BITS);
594 out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS);
595 val = ROUND_POWER_OF_TWO(out, 5);
596 vec = __msa_fill_h(val);
599 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
600 dst += (4 * dst_stride);
601 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
604 static void vp9_idct8x8_12_colcol_addblk_msa(int16_t *input, uint8_t *dst,
607 v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
608 v8i16 s0, s1, s2, s3, s4, s5, s6, s7, k0, k1, k2, k3, m0, m1, m2, m3;
609 v4i32 tmp0, tmp1, tmp2, tmp3;
612 /* load vector elements of 8x8 block */
613 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
614 ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8);
615 ILVR_D2_SH(in1, in0, in3, in2, in0, in1);
616 ILVR_D2_SH(in5, in4, in7, in6, in2, in3);
619 ILVL_H2_SH(in3, in0, in2, in1, s0, s1);
620 k0 = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
621 k1 = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
622 k2 = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
623 k3 = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
624 DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
625 SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, VP9_DCT_CONST_BITS);
626 PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
627 PCKEV_H2_SH(zero, tmp2, zero, tmp3, s2, s3);
628 BUTTERFLY_4(s0, s1, s3, s2, s4, s7, s6, s5);
631 ILVR_H2_SH(in3, in1, in2, in0, s1, s0);
632 k0 = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
633 k1 = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
634 k2 = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
635 k3 = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
636 DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
637 SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, VP9_DCT_CONST_BITS);
638 PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
639 PCKEV_H2_SH(zero, tmp2, zero, tmp3, s2, s3);
640 BUTTERFLY_4(s0, s1, s2, s3, m0, m1, m2, m3);
643 s0 = __msa_ilvr_h(s6, s5);
645 k1 = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
646 DOTP_SH2_SW(s0, s0, k1, k0, tmp0, tmp1);
647 SRARI_W2_SW(tmp0, tmp1, VP9_DCT_CONST_BITS);
648 PCKEV_H2_SH(zero, tmp0, zero, tmp1, s2, s3);
651 BUTTERFLY_8(m0, m1, m2, m3, s4, s2, s3, s7,
652 in0, in1, in2, in3, in4, in5, in6, in7);
653 TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
654 in0, in1, in2, in3, in4, in5, in6, in7);
655 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
656 in0, in1, in2, in3, in4, in5, in6, in7);
658 /* final rounding (add 2^4, divide by 2^5) and shift */
659 SRARI_H4_SH(in0, in1, in2, in3, 5);
660 SRARI_H4_SH(in4, in5, in6, in7, 5);
662 /* add block and store 8x8 */
663 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
664 dst += (4 * dst_stride);
665 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
668 static void vp9_idct8x8_colcol_addblk_msa(int16_t *input, uint8_t *dst,
671 v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
674 /* load vector elements of 8x8 block */
675 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
676 ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8);
678 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
679 in0, in1, in2, in3, in4, in5, in6, in7);
680 /* columns transform */
681 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
682 in0, in1, in2, in3, in4, in5, in6, in7);
684 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
685 in0, in1, in2, in3, in4, in5, in6, in7);
686 /* final rounding (add 2^4, divide by 2^5) and shift */
687 SRARI_H4_SH(in0, in1, in2, in3, 5);
688 SRARI_H4_SH(in4, in5, in6, in7, 5);
689 /* add block and store 8x8 */
690 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
691 dst += (4 * dst_stride);
692 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
695 static void vp9_iadst8x8_colcol_addblk_msa(int16_t *input, uint8_t *dst,
698 v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
699 v8i16 res0, res1, res2, res3, res4, res5, res6, res7;
700 v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
701 v8i16 out0, out1, out2, out3, out4, out5, out6, out7;
702 v8i16 cnst0, cnst1, cnst2, cnst3, cnst4;
703 v8i16 temp0, temp1, temp2, temp3, s0, s1;
706 /* load vector elements of 8x8 block */
707 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
708 ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8);
711 VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
712 in0, in1, in2, in3, in4, in5, in6, in7);
714 /* columns transform */
715 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
716 in0, in1, in2, in3, in4, in5, in6, in7);
718 cnst0 = __msa_fill_h(cospi_2_64);
719 cnst1 = __msa_fill_h(cospi_30_64);
721 ILVEV_H2_SH(cnst0, cnst1, cnst1, cnst2, cnst0, cnst1);
722 cnst2 = __msa_fill_h(cospi_18_64);
723 cnst3 = __msa_fill_h(cospi_14_64);
725 ILVEV_H2_SH(cnst2, cnst3, cnst3, cnst4, cnst2, cnst3);
727 ILVRL_H2_SH(in0, in7, temp1, temp0);
728 ILVRL_H2_SH(in4, in3, temp3, temp2);
729 VP9_DOT_ADD_SUB_SRARI_PCK(temp0, temp1, temp2, temp3, cnst0, cnst1, cnst2,
730 cnst3, in7, in0, in4, in3);
732 cnst0 = __msa_fill_h(cospi_10_64);
733 cnst1 = __msa_fill_h(cospi_22_64);
735 ILVEV_H2_SH(cnst0, cnst1, cnst1, cnst2, cnst0, cnst1);
736 cnst2 = __msa_fill_h(cospi_26_64);
737 cnst3 = __msa_fill_h(cospi_6_64);
739 ILVEV_H2_SH(cnst2, cnst3, cnst3, cnst4, cnst2, cnst3);
741 ILVRL_H2_SH(in2, in5, temp1, temp0);
742 ILVRL_H2_SH(in6, in1, temp3, temp2);
743 VP9_DOT_ADD_SUB_SRARI_PCK(temp0, temp1, temp2, temp3, cnst0, cnst1, cnst2,
744 cnst3, in5, in2, in6, in1);
745 BUTTERFLY_4(in7, in0, in2, in5, s1, s0, in2, in5);
748 SRARI_H2_SH(out0, out7, 5);
749 dst0 = LD_UB(dst + 0 * dst_stride);
750 dst7 = LD_UB(dst + 7 * dst_stride);
752 res0 = (v8i16) __msa_ilvr_b((v16i8) zero, (v16i8) dst0);
754 res0 = CLIP_SH_0_255(res0);
755 res0 = (v8i16) __msa_pckev_b((v16i8) res0, (v16i8) res0);
758 res7 = (v8i16) __msa_ilvr_b((v16i8) zero, (v16i8) dst7);
760 res7 = CLIP_SH_0_255(res7);
761 res7 = (v8i16) __msa_pckev_b((v16i8) res7, (v16i8) res7);
762 ST8x1_UB(res7, dst + 7 * dst_stride);
764 cnst1 = __msa_fill_h(cospi_24_64);
765 cnst0 = __msa_fill_h(cospi_8_64);
769 ILVEV_H2_SH(cnst3, cnst0, cnst1, cnst2, cnst3, cnst2);
770 cnst0 = __msa_ilvev_h(cnst1, cnst0);
773 ILVRL_H2_SH(in4, in3, temp1, temp0);
774 ILVRL_H2_SH(in6, in1, temp3, temp2);
775 VP9_DOT_ADD_SUB_SRARI_PCK(temp0, temp1, temp2, temp3, cnst0, cnst2, cnst3,
776 cnst1, out1, out6, s0, s1);
778 SRARI_H2_SH(out1, out6, 5);
779 dst1 = LD_UB(dst + 1 * dst_stride);
780 dst6 = LD_UB(dst + 6 * dst_stride);
781 ILVR_B2_SH(zero, dst1, zero, dst6, res1, res6);
782 ADD2(res1, out1, res6, out6, res1, res6);
783 CLIP_SH2_0_255(res1, res6);
784 PCKEV_B2_SH(res1, res1, res6, res6, res1, res6);
785 ST8x1_UB(res1, dst + dst_stride);
786 ST8x1_UB(res6, dst + 6 * dst_stride);
788 cnst0 = __msa_fill_h(cospi_16_64);
790 cnst1 = __msa_ilvev_h(cnst1, cnst0);
792 ILVRL_H2_SH(in2, in5, temp1, temp0);
793 ILVRL_H2_SH(s0, s1, temp3, temp2);
794 out3 = VP9_DOT_SHIFT_RIGHT_PCK_H(temp0, temp1, cnst0);
795 out4 = VP9_DOT_SHIFT_RIGHT_PCK_H(temp0, temp1, cnst1);
797 SRARI_H2_SH(out3, out4, 5);
798 dst3 = LD_UB(dst + 3 * dst_stride);
799 dst4 = LD_UB(dst + 4 * dst_stride);
800 ILVR_B2_SH(zero, dst3, zero, dst4, res3, res4);
801 ADD2(res3, out3, res4, out4, res3, res4);
802 CLIP_SH2_0_255(res3, res4);
803 PCKEV_B2_SH(res3, res3, res4, res4, res3, res4);
804 ST8x1_UB(res3, dst + 3 * dst_stride);
805 ST8x1_UB(res4, dst + 4 * dst_stride);
807 out2 = VP9_DOT_SHIFT_RIGHT_PCK_H(temp2, temp3, cnst0);
808 out5 = VP9_DOT_SHIFT_RIGHT_PCK_H(temp2, temp3, cnst1);
810 SRARI_H2_SH(out2, out5, 5);
811 dst2 = LD_UB(dst + 2 * dst_stride);
812 dst5 = LD_UB(dst + 5 * dst_stride);
813 ILVR_B2_SH(zero, dst2, zero, dst5, res2, res5);
814 ADD2(res2, out2, res5, out5, res2, res5);
815 CLIP_SH2_0_255(res2, res5);
816 PCKEV_B2_SH(res2, res2, res5, res5, res2, res5);
817 ST8x1_UB(res2, dst + 2 * dst_stride);
818 ST8x1_UB(res5, dst + 5 * dst_stride);
821 static void vp9_iadst_idct_8x8_add_msa(int16_t *input, uint8_t *dst,
822 int32_t dst_stride, int32_t eob)
824 v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
827 /* load vector elements of 8x8 block */
828 LD_SH8(input, 8, in1, in6, in3, in4, in5, in2, in7, in0);
829 ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8);
831 VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
832 in0, in1, in2, in3, in4, in5, in6, in7);
833 /* columns transform */
834 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
835 in0, in1, in2, in3, in4, in5, in6, in7);
837 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
838 in0, in1, in2, in3, in4, in5, in6, in7);
839 /* final rounding (add 2^4, divide by 2^5) and shift */
840 SRARI_H4_SH(in0, in1, in2, in3, 5);
841 SRARI_H4_SH(in4, in5, in6, in7, 5);
842 /* add block and store 8x8 */
843 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
844 dst += (4 * dst_stride);
845 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
848 static void vp9_idct_iadst_8x8_add_msa(int16_t *input, uint8_t *dst,
849 int32_t dst_stride, int32_t eob)
851 v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
854 /* load vector elements of 8x8 block */
855 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
856 ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8);
859 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
860 in0, in1, in2, in3, in4, in5, in6, in7);
861 /* columns transform */
862 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
863 in1, in6, in3, in4, in5, in2, in7, in0);
865 VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
866 in0, in1, in2, in3, in4, in5, in6, in7);
867 /* final rounding (add 2^4, divide by 2^5) and shift */
868 SRARI_H4_SH(in0, in1, in2, in3, 5);
869 SRARI_H4_SH(in4, in5, in6, in7, 5);
870 /* add block and store 8x8 */
871 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
872 dst += (4 * dst_stride);
873 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
876 #define VP9_IADST8x16_1D(r0, r1, r2, r3, r4, r5, r6, r7, r8, \
877 r9, r10, r11, r12, r13, r14, r15, \
878 out0, out1, out2, out3, out4, out5, \
879 out6, out7, out8, out9, out10, out11, \
880 out12, out13, out14, out15) \
882 v8i16 g0_m, g1_m, g2_m, g3_m, g4_m, g5_m, g6_m, g7_m; \
883 v8i16 g8_m, g9_m, g10_m, g11_m, g12_m, g13_m, g14_m, g15_m; \
884 v8i16 h0_m, h1_m, h2_m, h3_m, h4_m, h5_m, h6_m, h7_m; \
885 v8i16 h8_m, h9_m, h10_m, h11_m; \
886 v8i16 k0_m, k1_m, k2_m, k3_m; \
889 k0_m = VP9_SET_COSPI_PAIR(cospi_1_64, cospi_31_64); \
890 k1_m = VP9_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64); \
891 k2_m = VP9_SET_COSPI_PAIR(cospi_17_64, cospi_15_64); \
892 k3_m = VP9_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64); \
893 VP9_MADD_BF(r15, r0, r7, r8, k0_m, k1_m, k2_m, k3_m, \
894 g0_m, g1_m, g2_m, g3_m); \
895 k0_m = VP9_SET_COSPI_PAIR(cospi_5_64, cospi_27_64); \
896 k1_m = VP9_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64); \
897 k2_m = VP9_SET_COSPI_PAIR(cospi_21_64, cospi_11_64); \
898 k3_m = VP9_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64); \
899 VP9_MADD_BF(r13, r2, r5, r10, k0_m, k1_m, k2_m, k3_m, \
900 g4_m, g5_m, g6_m, g7_m); \
901 k0_m = VP9_SET_COSPI_PAIR(cospi_9_64, cospi_23_64); \
902 k1_m = VP9_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64); \
903 k2_m = VP9_SET_COSPI_PAIR(cospi_25_64, cospi_7_64); \
904 k3_m = VP9_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64); \
905 VP9_MADD_BF(r11, r4, r3, r12, k0_m, k1_m, k2_m, k3_m, \
906 g8_m, g9_m, g10_m, g11_m); \
907 k0_m = VP9_SET_COSPI_PAIR(cospi_13_64, cospi_19_64); \
908 k1_m = VP9_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64); \
909 k2_m = VP9_SET_COSPI_PAIR(cospi_29_64, cospi_3_64); \
910 k3_m = VP9_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64); \
911 VP9_MADD_BF(r9, r6, r1, r14, k0_m, k1_m, k2_m, k3_m, \
912 g12_m, g13_m, g14_m, g15_m); \
915 k0_m = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64); \
916 k1_m = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64); \
917 k2_m = VP9_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64); \
918 VP9_MADD_BF(g1_m, g3_m, g9_m, g11_m, k0_m, k1_m, k2_m, k0_m, \
919 h0_m, h1_m, h2_m, h3_m); \
920 k0_m = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64); \
921 k1_m = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64); \
922 k2_m = VP9_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64); \
923 VP9_MADD_BF(g7_m, g5_m, g15_m, g13_m, k0_m, k1_m, k2_m, k0_m, \
924 h4_m, h5_m, h6_m, h7_m); \
925 BUTTERFLY_4(h0_m, h2_m, h6_m, h4_m, out8, out9, out11, out10); \
926 BUTTERFLY_8(g0_m, g2_m, g4_m, g6_m, g14_m, g12_m, g10_m, g8_m, \
927 h8_m, h9_m, h10_m, h11_m, h6_m, h4_m, h2_m, h0_m); \
930 BUTTERFLY_4(h8_m, h9_m, h11_m, h10_m, out0, out1, h11_m, h10_m); \
931 k0_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \
932 k1_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \
933 k2_m = VP9_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64); \
934 VP9_MADD_BF(h0_m, h2_m, h4_m, h6_m, k0_m, k1_m, k2_m, k0_m, \
935 out4, out6, out5, out7); \
936 VP9_MADD_BF(h1_m, h3_m, h5_m, h7_m, k0_m, k1_m, k2_m, k0_m, \
937 out12, out14, out13, out15); \
940 k0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \
941 k1_m = VP9_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64); \
942 k2_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \
943 k3_m = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64); \
944 VP9_MADD_SHORT(h10_m, h11_m, k1_m, k2_m, out2, out3); \
945 VP9_MADD_SHORT(out6, out7, k0_m, k3_m, out6, out7); \
946 VP9_MADD_SHORT(out10, out11, k0_m, k3_m, out10, out11); \
947 VP9_MADD_SHORT(out14, out15, k1_m, k2_m, out14, out15); \
950 static void vp9_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
953 v8i16 loc0, loc1, loc2, loc3;
954 v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14;
955 v8i16 reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15;
956 v8i16 tmp5, tmp6, tmp7;
961 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7,
962 reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15);
964 ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16);
966 ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16);
968 VP9_DOTP_CONST_PAIR(reg2, reg14, cospi_28_64, cospi_4_64, reg2, reg14);
969 VP9_DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6);
970 BUTTERFLY_4(reg2, reg14, reg6, reg10, loc0, loc1, reg14, reg2);
971 VP9_DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3);
972 VP9_DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8);
973 VP9_DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12);
974 BUTTERFLY_4(reg8, reg0, reg4, reg12, reg2, reg6, reg10, reg14);
978 reg12 = reg14 - loc0;
979 reg14 = reg14 + loc0;
983 reg10 = reg10 + loc2;
986 VP9_DOTP_CONST_PAIR(reg1, reg15, cospi_30_64, cospi_2_64, reg1, reg15);
987 VP9_DOTP_CONST_PAIR(reg9, reg7, cospi_14_64, cospi_18_64, loc2, loc3);
992 reg15 = reg15 + loc3;
994 VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_22_64, cospi_10_64, reg5, reg11);
995 VP9_DOTP_CONST_PAIR(reg13, reg3, cospi_6_64, cospi_26_64, loc0, loc1);
996 BUTTERFLY_4(loc0, loc1, reg11, reg5, reg13, reg3, reg11, reg5);
1001 reg15 = reg2 - loc1;
1003 loc1 = reg1 + reg13;
1004 reg13 = reg1 - reg13;
1011 VP9_DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9);
1012 VP9_DOTP_CONST_PAIR((-reg5), (-reg11), cospi_8_64, cospi_24_64, reg5,
1020 loc0 = reg7 + reg11;
1021 reg11 = reg7 - reg11;
1026 VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11);
1027 BUTTERFLY_4(reg8, reg10, reg11, reg5, loc0, reg4, reg9, loc1);
1032 VP9_DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13);
1033 BUTTERFLY_4(reg12, reg14, reg13, reg3, reg8, reg6, reg7, reg5);
1036 /* Transpose and store the output */
1041 SRARI_H4_SH(reg0, reg2, reg4, reg6, 6);
1042 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg0, reg2, reg4, reg6);
1043 dst += (4 * dst_stride);
1044 SRARI_H4_SH(reg8, reg10, reg12, reg14, 6);
1045 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg8, reg10, reg12, reg14);
1046 dst += (4 * dst_stride);
1047 SRARI_H4_SH(reg3, reg13, reg11, reg5, 6);
1048 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg3, reg13, reg11, reg5);
1049 dst += (4 * dst_stride);
1050 SRARI_H4_SH(reg7, reg9, reg1, reg15, 6);
1051 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg7, reg9, reg1, reg15);
1054 static void vp9_idct16_1d_columns_msa(int16_t *input, int16_t *output)
1056 v8i16 loc0, loc1, loc2, loc3;
1057 v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14;
1058 v8i16 reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15;
1059 v8i16 tmp5, tmp6, tmp7;
1064 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7,
1065 reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15);
1067 ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16);
1069 ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16);
1071 VP9_DOTP_CONST_PAIR(reg2, reg14, cospi_28_64, cospi_4_64, reg2, reg14);
1072 VP9_DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6);
1073 BUTTERFLY_4(reg2, reg14, reg6, reg10, loc0, loc1, reg14, reg2);
1074 VP9_DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3);
1075 VP9_DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8);
1076 VP9_DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12);
1077 BUTTERFLY_4(reg8, reg0, reg4, reg12, reg2, reg6, reg10, reg14);
1081 reg12 = reg14 - loc0;
1082 reg14 = reg14 + loc0;
1085 reg8 = reg10 - loc2;
1086 reg10 = reg10 + loc2;
1089 VP9_DOTP_CONST_PAIR(reg1, reg15, cospi_30_64, cospi_2_64, reg1, reg15);
1090 VP9_DOTP_CONST_PAIR(reg9, reg7, cospi_14_64, cospi_18_64, loc2, loc3);
1094 reg7 = reg15 - loc3;
1095 reg15 = reg15 + loc3;
1097 VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_22_64, cospi_10_64, reg5, reg11);
1098 VP9_DOTP_CONST_PAIR(reg13, reg3, cospi_6_64, cospi_26_64, loc0, loc1);
1099 BUTTERFLY_4(loc0, loc1, reg11, reg5, reg13, reg3, reg11, reg5);
1101 loc1 = reg15 + reg3;
1102 reg3 = reg15 - reg3;
1104 reg15 = reg2 - loc1;
1106 loc1 = reg1 + reg13;
1107 reg13 = reg1 - reg13;
1114 VP9_DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9);
1115 VP9_DOTP_CONST_PAIR((-reg5), (-reg11), cospi_8_64, cospi_24_64, reg5,
1123 loc0 = reg7 + reg11;
1124 reg11 = reg7 - reg11;
1130 VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11);
1131 BUTTERFLY_4(reg8, reg10, reg11, reg5, loc0, reg4, reg9, loc1);
1136 VP9_DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13);
1137 BUTTERFLY_4(reg12, reg14, reg13, reg3, reg8, reg6, reg7, reg5);
1140 /* Transpose and store the output */
1145 /* transpose block */
1146 TRANSPOSE8x8_SH_SH(reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14,
1147 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14);
1148 ST_SH4(reg0, reg2, reg4, reg6, output, 16);
1149 ST_SH4(reg8, reg10, reg12, reg14, (output + 4 * 16), 16);
1151 /* transpose block */
1152 TRANSPOSE8x8_SH_SH(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15,
1153 reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15);
1154 ST_SH4(reg3, reg13, reg11, reg5, (output + 8), 16);
1155 ST_SH4(reg7, reg9, reg1, reg15, (output + 8 + 4 * 16), 16);
1158 static void vp9_idct16x16_1_add_msa(int16_t *input, uint8_t *dst,
1163 v8i16 vec, res0, res1, res2, res3, res4, res5, res6, res7;
1164 v16u8 dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3;
1166 out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), VP9_DCT_CONST_BITS);
1167 out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS);
1168 out = ROUND_POWER_OF_TWO(out, 6);
1171 vec = __msa_fill_h(out);
1174 LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
1175 UNPCK_UB_SH(dst0, res0, res4);
1176 UNPCK_UB_SH(dst1, res1, res5);
1177 UNPCK_UB_SH(dst2, res2, res6);
1178 UNPCK_UB_SH(dst3, res3, res7);
1179 ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2,
1181 ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6,
1183 CLIP_SH4_0_255(res0, res1, res2, res3);
1184 CLIP_SH4_0_255(res4, res5, res6, res7);
1185 PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3,
1186 tmp0, tmp1, tmp2, tmp3);
1187 ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride);
1188 dst += (4 * dst_stride);
1192 static void vp9_idct16x16_10_colcol_addblk_msa(int16_t *input, uint8_t *dst,
1196 int16_t out_arr[16 * 16] ALLOC_ALIGNED(ALIGNMENT);
1197 int16_t *out = out_arr;
1199 /* transform rows */
1200 vp9_idct16_1d_columns_msa(input, out);
1202 /* short case just considers top 4 rows as valid output */
1204 for (i = 12; i--;) {
1206 "sw $zero, 0(%[out]) \n\t"
1207 "sw $zero, 4(%[out]) \n\t"
1208 "sw $zero, 8(%[out]) \n\t"
1209 "sw $zero, 12(%[out]) \n\t"
1210 "sw $zero, 16(%[out]) \n\t"
1211 "sw $zero, 20(%[out]) \n\t"
1212 "sw $zero, 24(%[out]) \n\t"
1213 "sw $zero, 28(%[out]) \n\t"
1224 /* transform columns */
1225 for (i = 0; i < 2; i++) {
1226 /* process 8 * 16 block */
1227 vp9_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
1232 static void vp9_idct16x16_colcol_addblk_msa(int16_t *input, uint8_t *dst,
1236 int16_t out_arr[16 * 16] ALLOC_ALIGNED(ALIGNMENT);
1237 int16_t *out = out_arr;
1239 /* transform rows */
1240 for (i = 0; i < 2; i++) {
1241 /* process 8 * 16 block */
1242 vp9_idct16_1d_columns_msa((input + (i << 3)), (out + (i << 7)));
1245 /* transform columns */
1246 for (i = 0; i < 2; i++) {
1247 /* process 8 * 16 block */
1248 vp9_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
1253 static void vp9_iadst16_1d_columns_msa(int16_t *input, int16_t *output)
1255 v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
1256 v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15;
1259 /* load input data */
1261 l0, l1, l2, l3, l4, l5, l6, l7,
1262 l8, l9, l10, l11, l12, l13, l14, l15);
1264 ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16);
1266 ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16);
1268 /* ADST in horizontal */
1269 VP9_IADST8x16_1D(l0, l1, l2, l3, l4, l5, l6, l7,
1270 l8, l9, l10, l11, l12, l13, l14, l15,
1271 r0, r1, r2, r3, r4, r5, r6, r7,
1272 r8, r9, r10, r11, r12, r13, r14, r15);
1279 TRANSPOSE8x8_SH_SH(r0, l1, r12, l3, r6, r14, r10, r2,
1280 l0, l1, l2, l3, l4, l5, l6, l7);
1281 ST_SH8(l0, l1, l2, l3, l4, l5, l6, l7, output, 16);
1282 TRANSPOSE8x8_SH_SH(r3, r11, r15, r7, r5, l13, r9, l15,
1283 l8, l9, l10, l11, l12, l13, l14, l15);
1284 ST_SH8(l8, l9, l10, l11, l12, l13, l14, l15, (output + 8), 16);
1287 static void vp9_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
1290 v8i16 v0, v2, v4, v6, k0, k1, k2, k3;
1291 v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
1292 v8i16 out0, out1, out2, out3, out4, out5, out6, out7;
1293 v8i16 out8, out9, out10, out11, out12, out13, out14, out15;
1294 v8i16 g0, g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, g11, g12, g13, g14, g15;
1295 v8i16 h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11;
1296 v8i16 res0, res1, res2, res3, res4, res5, res6, res7;
1297 v8i16 res8, res9, res10, res11, res12, res13, res14, res15;
1298 v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
1299 v16u8 dst8, dst9, dst10, dst11, dst12, dst13, dst14, dst15;
1302 r0 = LD_SH(input + 0 * 16);
1303 r3 = LD_SH(input + 3 * 16);
1304 r4 = LD_SH(input + 4 * 16);
1305 r7 = LD_SH(input + 7 * 16);
1306 r8 = LD_SH(input + 8 * 16);
1307 r11 = LD_SH(input + 11 * 16);
1308 r12 = LD_SH(input + 12 * 16);
1309 r15 = LD_SH(input + 15 * 16);
1312 k0 = VP9_SET_COSPI_PAIR(cospi_1_64, cospi_31_64);
1313 k1 = VP9_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64);
1314 k2 = VP9_SET_COSPI_PAIR(cospi_17_64, cospi_15_64);
1315 k3 = VP9_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64);
1316 VP9_MADD_BF(r15, r0, r7, r8, k0, k1, k2, k3, g0, g1, g2, g3);
1317 k0 = VP9_SET_COSPI_PAIR(cospi_9_64, cospi_23_64);
1318 k1 = VP9_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64);
1319 k2 = VP9_SET_COSPI_PAIR(cospi_25_64, cospi_7_64);
1320 k3 = VP9_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64);
1321 VP9_MADD_BF(r11, r4, r3, r12, k0, k1, k2, k3, g8, g9, g10, g11);
1322 BUTTERFLY_4(g0, g2, g10, g8, h8, h9, v2, v0);
1323 k0 = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
1324 k1 = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
1325 k2 = VP9_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64);
1326 VP9_MADD_BF(g1, g3, g9, g11, k0, k1, k2, k0, h0, h1, h2, h3);
1328 r1 = LD_SH(input + 1 * 16);
1329 r2 = LD_SH(input + 2 * 16);
1330 r5 = LD_SH(input + 5 * 16);
1331 r6 = LD_SH(input + 6 * 16);
1332 r9 = LD_SH(input + 9 * 16);
1333 r10 = LD_SH(input + 10 * 16);
1334 r13 = LD_SH(input + 13 * 16);
1335 r14 = LD_SH(input + 14 * 16);
1337 k0 = VP9_SET_COSPI_PAIR(cospi_5_64, cospi_27_64);
1338 k1 = VP9_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64);
1339 k2 = VP9_SET_COSPI_PAIR(cospi_21_64, cospi_11_64);
1340 k3 = VP9_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64);
1341 VP9_MADD_BF(r13, r2, r5, r10, k0, k1, k2, k3, g4, g5, g6, g7);
1342 k0 = VP9_SET_COSPI_PAIR(cospi_13_64, cospi_19_64);
1343 k1 = VP9_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64);
1344 k2 = VP9_SET_COSPI_PAIR(cospi_29_64, cospi_3_64);
1345 k3 = VP9_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64);
1346 VP9_MADD_BF(r9, r6, r1, r14, k0, k1, k2, k3, g12, g13, g14, g15);
1347 BUTTERFLY_4(g4, g6, g14, g12, h10, h11, v6, v4);
1348 BUTTERFLY_4(h8, h9, h11, h10, out0, out1, h11, h10);
1350 SRARI_H2_SH(out0, out1, 6);
1351 dst0 = LD_UB(dst + 0 * dst_stride);
1352 dst1 = LD_UB(dst + 15 * dst_stride);
1353 ILVR_B2_SH(zero, dst0, zero, dst1, res0, res1);
1354 ADD2(res0, out0, res1, out1, res0, res1);
1355 CLIP_SH2_0_255(res0, res1);
1356 PCKEV_B2_SH(res0, res0, res1, res1, res0, res1);
1357 ST8x1_UB(res0, dst);
1358 ST8x1_UB(res1, dst + 15 * dst_stride);
1360 k0 = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
1361 k1 = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
1362 k2 = VP9_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64);
1363 VP9_MADD_BF(g7, g5, g15, g13, k0, k1, k2, k0, h4, h5, h6, h7);
1364 BUTTERFLY_4(h0, h2, h6, h4, out8, out9, out11, out10);
1367 SRARI_H2_SH(out8, out9, 6);
1368 dst8 = LD_UB(dst + 1 * dst_stride);
1369 dst9 = LD_UB(dst + 14 * dst_stride);
1370 ILVR_B2_SH(zero, dst8, zero, dst9, res8, res9);
1371 ADD2(res8, out8, res9, out9, res8, res9);
1372 CLIP_SH2_0_255(res8, res9);
1373 PCKEV_B2_SH(res8, res8, res9, res9, res8, res9);
1374 ST8x1_UB(res8, dst + dst_stride);
1375 ST8x1_UB(res9, dst + 14 * dst_stride);
1377 k0 = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
1378 k1 = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
1379 k2 = VP9_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64);
1380 VP9_MADD_BF(v0, v2, v4, v6, k0, k1, k2, k0, out4, out6, out5, out7);
1382 SRARI_H2_SH(out4, out5, 6);
1383 dst4 = LD_UB(dst + 3 * dst_stride);
1384 dst5 = LD_UB(dst + 12 * dst_stride);
1385 ILVR_B2_SH(zero, dst4, zero, dst5, res4, res5);
1386 ADD2(res4, out4, res5, out5, res4, res5);
1387 CLIP_SH2_0_255(res4, res5);
1388 PCKEV_B2_SH(res4, res4, res5, res5, res4, res5);
1389 ST8x1_UB(res4, dst + 3 * dst_stride);
1390 ST8x1_UB(res5, dst + 12 * dst_stride);
1392 VP9_MADD_BF(h1, h3, h5, h7, k0, k1, k2, k0, out12, out14, out13, out15);
1394 SRARI_H2_SH(out12, out13, 6);
1395 dst12 = LD_UB(dst + 2 * dst_stride);
1396 dst13 = LD_UB(dst + 13 * dst_stride);
1397 ILVR_B2_SH(zero, dst12, zero, dst13, res12, res13);
1398 ADD2(res12, out12, res13, out13, res12, res13);
1399 CLIP_SH2_0_255(res12, res13);
1400 PCKEV_B2_SH(res12, res12, res13, res13, res12, res13);
1401 ST8x1_UB(res12, dst + 2 * dst_stride);
1402 ST8x1_UB(res13, dst + 13 * dst_stride);
1404 k0 = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
1405 k3 = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
1406 VP9_MADD_SHORT(out6, out7, k0, k3, out6, out7);
1407 SRARI_H2_SH(out6, out7, 6);
1408 dst6 = LD_UB(dst + 4 * dst_stride);
1409 dst7 = LD_UB(dst + 11 * dst_stride);
1410 ILVR_B2_SH(zero, dst6, zero, dst7, res6, res7);
1411 ADD2(res6, out6, res7, out7, res6, res7);
1412 CLIP_SH2_0_255(res6, res7);
1413 PCKEV_B2_SH(res6, res6, res7, res7, res6, res7);
1414 ST8x1_UB(res6, dst + 4 * dst_stride);
1415 ST8x1_UB(res7, dst + 11 * dst_stride);
1417 VP9_MADD_SHORT(out10, out11, k0, k3, out10, out11);
1418 SRARI_H2_SH(out10, out11, 6);
1419 dst10 = LD_UB(dst + 6 * dst_stride);
1420 dst11 = LD_UB(dst + 9 * dst_stride);
1421 ILVR_B2_SH(zero, dst10, zero, dst11, res10, res11);
1422 ADD2(res10, out10, res11, out11, res10, res11);
1423 CLIP_SH2_0_255(res10, res11);
1424 PCKEV_B2_SH(res10, res10, res11, res11, res10, res11);
1425 ST8x1_UB(res10, dst + 6 * dst_stride);
1426 ST8x1_UB(res11, dst + 9 * dst_stride);
1428 k1 = VP9_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64);
1429 k2 = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
1430 VP9_MADD_SHORT(h10, h11, k1, k2, out2, out3);
1431 SRARI_H2_SH(out2, out3, 6);
1432 dst2 = LD_UB(dst + 7 * dst_stride);
1433 dst3 = LD_UB(dst + 8 * dst_stride);
1434 ILVR_B2_SH(zero, dst2, zero, dst3, res2, res3);
1435 ADD2(res2, out2, res3, out3, res2, res3);
1436 CLIP_SH2_0_255(res2, res3);
1437 PCKEV_B2_SH(res2, res2, res3, res3, res2, res3);
1438 ST8x1_UB(res2, dst + 7 * dst_stride);
1439 ST8x1_UB(res3, dst + 8 * dst_stride);
1441 VP9_MADD_SHORT(out14, out15, k1, k2, out14, out15);
1442 SRARI_H2_SH(out14, out15, 6);
1443 dst14 = LD_UB(dst + 5 * dst_stride);
1444 dst15 = LD_UB(dst + 10 * dst_stride);
1445 ILVR_B2_SH(zero, dst14, zero, dst15, res14, res15);
1446 ADD2(res14, out14, res15, out15, res14, res15);
1447 CLIP_SH2_0_255(res14, res15);
1448 PCKEV_B2_SH(res14, res14, res15, res15, res14, res15);
1449 ST8x1_UB(res14, dst + 5 * dst_stride);
1450 ST8x1_UB(res15, dst + 10 * dst_stride);
1453 static void vp9_iadst16x16_colcol_addblk_msa(int16_t *input, uint8_t *dst,
1456 int16_t out_arr[16 * 16] ALLOC_ALIGNED(ALIGNMENT);
1457 int16_t *out = out_arr;
1460 /* transform rows */
1461 for (i = 0; i < 2; i++) {
1462 /* process 16 * 8 block */
1463 vp9_iadst16_1d_columns_msa((input + (i << 3)), (out + (i << 7)));
1466 /* transform columns */
1467 for (i = 0; i < 2; i++) {
1468 /* process 8 * 16 block */
1469 vp9_iadst16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
1474 static void vp9_iadst_idct_16x16_add_msa(int16_t *input, uint8_t *dst,
1475 int32_t dst_stride, int32_t eob)
1478 int16_t out[16 * 16];
1479 int16_t *out_ptr = &out[0];
1481 /* transform rows */
1482 for (i = 0; i < 2; i++) {
1483 /* process 8 * 16 block */
1484 vp9_iadst16_1d_columns_msa((input + (i << 3)), (out_ptr + (i << 7)));
1487 /* transform columns */
1488 for (i = 0; i < 2; i++) {
1489 /* process 8 * 16 block */
1490 vp9_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)),
1491 (dst + (i << 3)), dst_stride);
1495 static void vp9_idct_iadst_16x16_add_msa(int16_t *input, uint8_t *dst,
1496 int32_t dst_stride, int32_t eob)
1499 int16_t out[16 * 16];
1500 int16_t *out_ptr = &out[0];
1502 /* transform rows */
1503 for (i = 0; i < 2; i++) {
1504 /* process 8 * 16 block */
1505 vp9_idct16_1d_columns_msa((input + (i << 3)), (out_ptr + (i << 7)));
1508 /* transform columns */
1509 for (i = 0; i < 2; i++) {
1510 /* process 8 * 16 block */
1511 vp9_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
1512 (dst + (i << 3)), dst_stride);
1516 static void vp9_idct_butterfly_transpose_store(int16_t *tmp_buf,
1517 int16_t *tmp_eve_buf,
1518 int16_t *tmp_odd_buf,
1521 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
1522 v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
1524 /* FINAL BUTTERFLY : Dependency on Even & Odd */
1525 vec0 = LD_SH(tmp_odd_buf);
1526 vec1 = LD_SH(tmp_odd_buf + 9 * 8);
1527 vec2 = LD_SH(tmp_odd_buf + 14 * 8);
1528 vec3 = LD_SH(tmp_odd_buf + 6 * 8);
1529 loc0 = LD_SH(tmp_eve_buf);
1530 loc1 = LD_SH(tmp_eve_buf + 8 * 8);
1531 loc2 = LD_SH(tmp_eve_buf + 4 * 8);
1532 loc3 = LD_SH(tmp_eve_buf + 12 * 8);
1534 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
1536 ST_SH((loc0 - vec3), (tmp_buf + 31 * 8));
1537 ST_SH((loc1 - vec2), (tmp_buf + 23 * 8));
1538 ST_SH((loc2 - vec1), (tmp_buf + 27 * 8));
1539 ST_SH((loc3 - vec0), (tmp_buf + 19 * 8));
1541 /* Load 8 & Store 8 */
1542 vec0 = LD_SH(tmp_odd_buf + 4 * 8);
1543 vec1 = LD_SH(tmp_odd_buf + 13 * 8);
1544 vec2 = LD_SH(tmp_odd_buf + 10 * 8);
1545 vec3 = LD_SH(tmp_odd_buf + 3 * 8);
1546 loc0 = LD_SH(tmp_eve_buf + 2 * 8);
1547 loc1 = LD_SH(tmp_eve_buf + 10 * 8);
1548 loc2 = LD_SH(tmp_eve_buf + 6 * 8);
1549 loc3 = LD_SH(tmp_eve_buf + 14 * 8);
1551 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
1553 ST_SH((loc0 - vec3), (tmp_buf + 29 * 8));
1554 ST_SH((loc1 - vec2), (tmp_buf + 21 * 8));
1555 ST_SH((loc2 - vec1), (tmp_buf + 25 * 8));
1556 ST_SH((loc3 - vec0), (tmp_buf + 17 * 8));
1558 /* Load 8 & Store 8 */
1559 vec0 = LD_SH(tmp_odd_buf + 2 * 8);
1560 vec1 = LD_SH(tmp_odd_buf + 11 * 8);
1561 vec2 = LD_SH(tmp_odd_buf + 12 * 8);
1562 vec3 = LD_SH(tmp_odd_buf + 7 * 8);
1563 loc0 = LD_SH(tmp_eve_buf + 1 * 8);
1564 loc1 = LD_SH(tmp_eve_buf + 9 * 8);
1565 loc2 = LD_SH(tmp_eve_buf + 5 * 8);
1566 loc3 = LD_SH(tmp_eve_buf + 13 * 8);
1568 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
1570 ST_SH((loc0 - vec3), (tmp_buf + 30 * 8));
1571 ST_SH((loc1 - vec2), (tmp_buf + 22 * 8));
1572 ST_SH((loc2 - vec1), (tmp_buf + 26 * 8));
1573 ST_SH((loc3 - vec0), (tmp_buf + 18 * 8));
1575 /* Load 8 & Store 8 */
1576 vec0 = LD_SH(tmp_odd_buf + 5 * 8);
1577 vec1 = LD_SH(tmp_odd_buf + 15 * 8);
1578 vec2 = LD_SH(tmp_odd_buf + 8 * 8);
1579 vec3 = LD_SH(tmp_odd_buf + 1 * 8);
1580 loc0 = LD_SH(tmp_eve_buf + 3 * 8);
1581 loc1 = LD_SH(tmp_eve_buf + 11 * 8);
1582 loc2 = LD_SH(tmp_eve_buf + 7 * 8);
1583 loc3 = LD_SH(tmp_eve_buf + 15 * 8);
1585 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
1587 ST_SH((loc0 - vec3), (tmp_buf + 28 * 8));
1588 ST_SH((loc1 - vec2), (tmp_buf + 20 * 8));
1589 ST_SH((loc2 - vec1), (tmp_buf + 24 * 8));
1590 ST_SH((loc3 - vec0), (tmp_buf + 16 * 8));
1592 /* Transpose : 16 vectors */
1594 TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
1595 m0, n0, m1, n1, m2, n2, m3, n3);
1596 ST_SH4(m0, n0, m1, n1, (dst + 0), 32);
1597 ST_SH4(m2, n2, m3, n3, (dst + 4 * 32), 32);
1599 TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
1600 m4, n4, m5, n5, m6, n6, m7, n7);
1601 ST_SH4(m4, n4, m5, n5, (dst + 8), 32);
1602 ST_SH4(m6, n6, m7, n7, (dst + 8 + 4 * 32), 32);
1605 LD_SH8((tmp_buf + 8 * 16), 8, m0, n0, m1, n1, m2, n2, m3, n3);
1606 LD_SH8((tmp_buf + 12 * 16), 8, m4, n4, m5, n5, m6, n6, m7, n7);
1607 TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
1608 m0, n0, m1, n1, m2, n2, m3, n3);
1609 ST_SH4(m0, n0, m1, n1, (dst + 16), 32);
1610 ST_SH4(m2, n2, m3, n3, (dst + 16 + 4 * 32), 32);
1612 TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
1613 m4, n4, m5, n5, m6, n6, m7, n7);
1614 ST_SH4(m4, n4, m5, n5, (dst + 24), 32);
1615 ST_SH4(m6, n6, m7, n7, (dst + 24 + 4 * 32), 32);
1618 static void vp9_idct8x32_column_even_process_store(int16_t *tmp_buf,
1619 int16_t *tmp_eve_buf)
1621 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
1622 v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
1623 v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7;
1627 LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
1628 ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, tmp_buf, (4 * 32));
1629 tmp_buf += (2 * 32);
1631 VP9_DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7);
1632 VP9_DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3);
1633 BUTTERFLY_4(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0);
1634 VP9_DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
1639 VP9_DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4);
1640 VP9_DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6);
1641 BUTTERFLY_4(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0);
1642 BUTTERFLY_4(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4);
1643 BUTTERFLY_4(vec2, vec3, loc3, loc2, stp2, stp1, stp6, stp5);
1647 LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
1648 ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, tmp_buf, (4 * 32));
1650 VP9_DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7);
1651 VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3);
1652 VP9_DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5);
1653 VP9_DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1);
1671 VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7);
1672 VP9_DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1);
1679 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1);
1680 VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4);
1682 /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */
1684 BUTTERFLY_4(stp0, stp1, reg7, reg5, loc1, loc3, loc2, loc0);
1685 ST_SH2(loc1, loc3, tmp_eve_buf, 8);
1686 ST_SH2(loc2, loc0, (tmp_eve_buf + 14 * 8), 8);
1688 BUTTERFLY_4(stp2, stp3, reg4, reg1, loc1, loc3, loc2, loc0);
1689 ST_SH2(loc1, loc3, (tmp_eve_buf + 2 * 8), 8);
1690 ST_SH2(loc2, loc0, (tmp_eve_buf + 12 * 8), 8);
1693 BUTTERFLY_4(stp4, stp5, reg6, reg3, loc1, loc3, loc2, loc0);
1694 ST_SH2(loc1, loc3, (tmp_eve_buf + 4 * 8), 8);
1695 ST_SH2(loc2, loc0, (tmp_eve_buf + 10 * 8), 8);
1697 BUTTERFLY_4(stp6, stp7, reg2, reg0, loc1, loc3, loc2, loc0);
1698 ST_SH2(loc1, loc3, (tmp_eve_buf + 6 * 8), 8);
1699 ST_SH2(loc2, loc0, (tmp_eve_buf + 8 * 8), 8);
1702 static void vp9_idct8x32_column_odd_process_store(int16_t *tmp_buf,
1703 int16_t *tmp_odd_buf)
1705 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
1706 v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
1710 reg0 = LD_SH(tmp_buf + 32);
1711 reg1 = LD_SH(tmp_buf + 7 * 32);
1712 reg2 = LD_SH(tmp_buf + 9 * 32);
1713 reg3 = LD_SH(tmp_buf + 15 * 32);
1714 reg4 = LD_SH(tmp_buf + 17 * 32);
1715 reg5 = LD_SH(tmp_buf + 23 * 32);
1716 reg6 = LD_SH(tmp_buf + 25 * 32);
1717 reg7 = LD_SH(tmp_buf + 31 * 32);
1719 ST_SH(zero, tmp_buf + 32);
1720 ST_SH(zero, tmp_buf + 7 * 32);
1721 ST_SH(zero, tmp_buf + 9 * 32);
1722 ST_SH(zero, tmp_buf + 15 * 32);
1723 ST_SH(zero, tmp_buf + 17 * 32);
1724 ST_SH(zero, tmp_buf + 23 * 32);
1725 ST_SH(zero, tmp_buf + 25 * 32);
1726 ST_SH(zero, tmp_buf + 31 * 32);
1728 VP9_DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7);
1729 VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4);
1730 VP9_DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5);
1731 VP9_DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6);
1744 ADD2(reg5, reg4, reg3, reg2, vec0, vec1);
1745 ST_SH2(vec0, vec1, (tmp_odd_buf + 4 * 8), 8);
1746 SUB2(reg5, reg4, reg3, reg2, vec0, vec1);
1747 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1);
1748 ST_SH2(vec0, vec1, tmp_odd_buf, 8);
1751 VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7);
1752 VP9_DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6);
1753 BUTTERFLY_4(reg0, reg7, reg6, reg1, vec0, vec1, vec2, vec3);
1754 ST_SH2(vec0, vec1, (tmp_odd_buf + 6 * 8), 8);
1755 VP9_DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3);
1756 ST_SH2(vec2, vec3, (tmp_odd_buf + 2 * 8), 8);
1760 reg0 = LD_SH(tmp_buf + 3 * 32);
1761 reg1 = LD_SH(tmp_buf + 5 * 32);
1762 reg2 = LD_SH(tmp_buf + 11 * 32);
1763 reg3 = LD_SH(tmp_buf + 13 * 32);
1764 reg4 = LD_SH(tmp_buf + 19 * 32);
1765 reg5 = LD_SH(tmp_buf + 21 * 32);
1766 reg6 = LD_SH(tmp_buf + 27 * 32);
1767 reg7 = LD_SH(tmp_buf + 29 * 32);
1769 ST_SH(zero, tmp_buf + 3 * 32);
1770 ST_SH(zero, tmp_buf + 5 * 32);
1771 ST_SH(zero, tmp_buf + 11 * 32);
1772 ST_SH(zero, tmp_buf + 13 * 32);
1773 ST_SH(zero, tmp_buf + 19 * 32);
1774 ST_SH(zero, tmp_buf + 21 * 32);
1775 ST_SH(zero, tmp_buf + 27 * 32);
1776 ST_SH(zero, tmp_buf + 29 * 32);
1778 VP9_DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6);
1779 VP9_DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5);
1780 VP9_DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4);
1781 VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7);
1784 SUB4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4,
1785 vec0, vec1, vec2, vec3);
1786 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
1787 VP9_DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
1788 BUTTERFLY_4(loc2, loc3, loc1, loc0, vec0, vec1, vec3, vec2);
1789 ST_SH2(vec0, vec1, (tmp_odd_buf + 12 * 8), 3 * 8);
1790 VP9_DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1);
1791 ST_SH2(vec0, vec1, (tmp_odd_buf + 10 * 8), 8);
1794 ADD4(reg0, reg3, reg1, reg2, reg5, reg6, reg4, reg7,
1795 vec0, vec1, vec2, vec3);
1796 BUTTERFLY_4(vec0, vec3, vec2, vec1, reg0, reg1, reg3, reg2);
1797 ST_SH2(reg0, reg1, (tmp_odd_buf + 13 * 8), 8);
1798 VP9_DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1);
1799 ST_SH2(reg0, reg1, (tmp_odd_buf + 8 * 8), 8);
1801 /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */
1802 /* Load 8 & Store 8 */
1803 LD_SH4(tmp_odd_buf, 8, reg0, reg1, reg2, reg3);
1804 LD_SH4((tmp_odd_buf + 8 * 8), 8, reg4, reg5, reg6, reg7);
1806 ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
1807 loc0, loc1, loc2, loc3);
1808 ST_SH4(loc0, loc1, loc2, loc3, tmp_odd_buf, 8);
1810 SUB2(reg0, reg4, reg1, reg5, vec0, vec1);
1811 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
1813 SUB2(reg2, reg6, reg3, reg7, vec0, vec1);
1814 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
1815 ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 8 * 8), 8);
1817 /* Load 8 & Store 8 */
1818 LD_SH4((tmp_odd_buf + 4 * 8), 8, reg1, reg2, reg0, reg3);
1819 LD_SH4((tmp_odd_buf + 12 * 8), 8, reg4, reg5, reg6, reg7);
1821 ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
1822 loc0, loc1, loc2, loc3);
1823 ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 4 * 8), 8);
1825 SUB2(reg0, reg4, reg3, reg7, vec0, vec1);
1826 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
1828 SUB2(reg1, reg5, reg2, reg6, vec0, vec1);
1829 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
1830 ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 12 * 8), 8);
1833 static void vp9_idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf,
1834 int16_t *tmp_odd_buf,
1838 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
1839 v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
1841 /* FINAL BUTTERFLY : Dependency on Even & Odd */
1842 vec0 = LD_SH(tmp_odd_buf);
1843 vec1 = LD_SH(tmp_odd_buf + 9 * 8);
1844 vec2 = LD_SH(tmp_odd_buf + 14 * 8);
1845 vec3 = LD_SH(tmp_odd_buf + 6 * 8);
1846 loc0 = LD_SH(tmp_eve_buf);
1847 loc1 = LD_SH(tmp_eve_buf + 8 * 8);
1848 loc2 = LD_SH(tmp_eve_buf + 4 * 8);
1849 loc3 = LD_SH(tmp_eve_buf + 12 * 8);
1851 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
1852 SRARI_H4_SH(m0, m2, m4, m6, 6);
1853 VP9_ADDBLK_ST8x4_UB(dst, (4 * dst_stride), m0, m2, m4, m6);
1855 SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m6, m2, m4, m0);
1856 SRARI_H4_SH(m0, m2, m4, m6, 6);
1857 VP9_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride),
1860 /* Load 8 & Store 8 */
1861 vec0 = LD_SH(tmp_odd_buf + 4 * 8);
1862 vec1 = LD_SH(tmp_odd_buf + 13 * 8);
1863 vec2 = LD_SH(tmp_odd_buf + 10 * 8);
1864 vec3 = LD_SH(tmp_odd_buf + 3 * 8);
1865 loc0 = LD_SH(tmp_eve_buf + 2 * 8);
1866 loc1 = LD_SH(tmp_eve_buf + 10 * 8);
1867 loc2 = LD_SH(tmp_eve_buf + 6 * 8);
1868 loc3 = LD_SH(tmp_eve_buf + 14 * 8);
1870 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
1871 SRARI_H4_SH(m1, m3, m5, m7, 6);
1872 VP9_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride),
1875 SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m7, m3, m5, m1);
1876 SRARI_H4_SH(m1, m3, m5, m7, 6);
1877 VP9_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride),
1880 /* Load 8 & Store 8 */
1881 vec0 = LD_SH(tmp_odd_buf + 2 * 8);
1882 vec1 = LD_SH(tmp_odd_buf + 11 * 8);
1883 vec2 = LD_SH(tmp_odd_buf + 12 * 8);
1884 vec3 = LD_SH(tmp_odd_buf + 7 * 8);
1885 loc0 = LD_SH(tmp_eve_buf + 1 * 8);
1886 loc1 = LD_SH(tmp_eve_buf + 9 * 8);
1887 loc2 = LD_SH(tmp_eve_buf + 5 * 8);
1888 loc3 = LD_SH(tmp_eve_buf + 13 * 8);
1890 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
1891 SRARI_H4_SH(n0, n2, n4, n6, 6);
1892 VP9_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride),
1895 SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n6, n2, n4, n0);
1896 SRARI_H4_SH(n0, n2, n4, n6, 6);
1897 VP9_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride),
1900 /* Load 8 & Store 8 */
1901 vec0 = LD_SH(tmp_odd_buf + 5 * 8);
1902 vec1 = LD_SH(tmp_odd_buf + 15 * 8);
1903 vec2 = LD_SH(tmp_odd_buf + 8 * 8);
1904 vec3 = LD_SH(tmp_odd_buf + 1 * 8);
1905 loc0 = LD_SH(tmp_eve_buf + 3 * 8);
1906 loc1 = LD_SH(tmp_eve_buf + 11 * 8);
1907 loc2 = LD_SH(tmp_eve_buf + 7 * 8);
1908 loc3 = LD_SH(tmp_eve_buf + 15 * 8);
1910 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
1911 SRARI_H4_SH(n1, n3, n5, n7, 6);
1912 VP9_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride),
1915 SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n7, n3, n5, n1);
1916 SRARI_H4_SH(n1, n3, n5, n7, 6);
1917 VP9_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride),
1921 static void vp9_idct8x32_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
1924 int16_t tmp_odd_buf[16 * 8] ALLOC_ALIGNED(ALIGNMENT);
1925 int16_t tmp_eve_buf[16 * 8] ALLOC_ALIGNED(ALIGNMENT);
1927 vp9_idct8x32_column_even_process_store(input, &tmp_eve_buf[0]);
1928 vp9_idct8x32_column_odd_process_store(input, &tmp_odd_buf[0]);
1929 vp9_idct8x32_column_butterfly_addblk(&tmp_eve_buf[0], &tmp_odd_buf[0],
1933 static void vp9_idct8x32_1d_columns_msa(int16_t *input, int16_t *output,
1936 int16_t tmp_odd_buf[16 * 8] ALLOC_ALIGNED(ALIGNMENT);
1937 int16_t tmp_eve_buf[16 * 8] ALLOC_ALIGNED(ALIGNMENT);
1939 vp9_idct8x32_column_even_process_store(input, &tmp_eve_buf[0]);
1940 vp9_idct8x32_column_odd_process_store(input, &tmp_odd_buf[0]);
1941 vp9_idct_butterfly_transpose_store(tmp_buf, &tmp_eve_buf[0],
1942 &tmp_odd_buf[0], output);
1945 static void vp9_idct32x32_1_add_msa(int16_t *input, uint8_t *dst,
1950 v16u8 dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3;
1951 v8i16 res0, res1, res2, res3, res4, res5, res6, res7, vec;
1953 out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), VP9_DCT_CONST_BITS);
1954 out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS);
1955 out = ROUND_POWER_OF_TWO(out, 6);
1958 vec = __msa_fill_h(out);
1960 for (i = 16; i--;) {
1961 LD_UB2(dst, 16, dst0, dst1);
1962 LD_UB2(dst + dst_stride, 16, dst2, dst3);
1964 UNPCK_UB_SH(dst0, res0, res4);
1965 UNPCK_UB_SH(dst1, res1, res5);
1966 UNPCK_UB_SH(dst2, res2, res6);
1967 UNPCK_UB_SH(dst3, res3, res7);
1968 ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2,
1970 ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6,
1972 CLIP_SH4_0_255(res0, res1, res2, res3);
1973 CLIP_SH4_0_255(res4, res5, res6, res7);
1974 PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3,
1975 tmp0, tmp1, tmp2, tmp3);
1977 ST_UB2(tmp0, tmp1, dst, 16);
1979 ST_UB2(tmp2, tmp3, dst, 16);
1984 static void vp9_idct32x32_34_colcol_addblk_msa(int16_t *input, uint8_t *dst,
1988 int16_t out_arr[32 * 32] ALLOC_ALIGNED(ALIGNMENT);
1989 int16_t *out_ptr = out_arr;
1990 int16_t tmp_buf[8 * 32] ALLOC_ALIGNED(ALIGNMENT);
1992 for (i = 32; i--;) {
1994 "sw $zero, (%[out_ptr]) \n\t"
1995 "sw $zero, 4(%[out_ptr]) \n\t"
1996 "sw $zero, 8(%[out_ptr]) \n\t"
1997 "sw $zero, 12(%[out_ptr]) \n\t"
1998 "sw $zero, 16(%[out_ptr]) \n\t"
1999 "sw $zero, 20(%[out_ptr]) \n\t"
2000 "sw $zero, 24(%[out_ptr]) \n\t"
2001 "sw $zero, 28(%[out_ptr]) \n\t"
2002 "sw $zero, 32(%[out_ptr]) \n\t"
2003 "sw $zero, 36(%[out_ptr]) \n\t"
2004 "sw $zero, 40(%[out_ptr]) \n\t"
2005 "sw $zero, 44(%[out_ptr]) \n\t"
2006 "sw $zero, 48(%[out_ptr]) \n\t"
2007 "sw $zero, 52(%[out_ptr]) \n\t"
2008 "sw $zero, 56(%[out_ptr]) \n\t"
2009 "sw $zero, 60(%[out_ptr]) \n\t"
2012 : [out_ptr] "r" (out_ptr)
2020 /* process 8*32 block */
2021 vp9_idct8x32_1d_columns_msa(input, out_ptr, &tmp_buf[0]);
2023 /* transform columns */
2024 for (i = 0; i < 4; i++) {
2025 /* process 8*32 block */
2026 vp9_idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)),
2027 (dst + (i << 3)), dst_stride);
2031 static void vp9_idct32x32_colcol_addblk_msa(int16_t *input, uint8_t *dst,
2035 int16_t out_arr[32 * 32] ALLOC_ALIGNED(ALIGNMENT);
2036 int16_t *out_ptr = out_arr;
2037 int16_t tmp_buf[8 * 32] ALLOC_ALIGNED(ALIGNMENT);
2039 /* transform rows */
2040 for (i = 0; i < 4; i++) {
2041 /* process 8*32 block */
2042 vp9_idct8x32_1d_columns_msa((input + (i << 3)), (out_ptr + (i << 8)),
2046 /* transform columns */
2047 for (i = 0; i < 4; i++) {
2048 /* process 8*32 block */
2049 vp9_idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)),
2050 (dst + (i << 3)), dst_stride);
2054 void ff_idct_idct_4x4_add_msa(uint8_t *dst, ptrdiff_t stride,
2055 int16_t *block, int eob)
2058 vp9_idct4x4_colcol_addblk_msa(block, dst, stride);
2061 vp9_idct4x4_1_add_msa(block, dst, stride);
2065 void ff_idct_idct_8x8_add_msa(uint8_t *dst, ptrdiff_t stride,
2066 int16_t *block, int eob)
2069 vp9_idct8x8_1_add_msa(block, dst, stride);
2071 else if (eob <= 12) {
2072 vp9_idct8x8_12_colcol_addblk_msa(block, dst, stride);
2075 vp9_idct8x8_colcol_addblk_msa(block, dst, stride);
2079 void ff_idct_idct_16x16_add_msa(uint8_t *dst, ptrdiff_t stride,
2080 int16_t *block, int eob)
2083 /* DC only DCT coefficient. */
2084 vp9_idct16x16_1_add_msa(block, dst, stride);
2086 else if (eob <= 10) {
2087 vp9_idct16x16_10_colcol_addblk_msa(block, dst, stride);
2090 vp9_idct16x16_colcol_addblk_msa(block, dst, stride);
2094 void ff_idct_idct_32x32_add_msa(uint8_t *dst, ptrdiff_t stride,
2095 int16_t *block, int eob)
2098 vp9_idct32x32_1_add_msa(block, dst, stride);
2100 else if (eob <= 34) {
2101 vp9_idct32x32_34_colcol_addblk_msa(block, dst, stride);
2104 vp9_idct32x32_colcol_addblk_msa(block, dst, stride);
2108 void ff_iadst_iadst_4x4_add_msa(uint8_t *dst, ptrdiff_t stride,
2109 int16_t *block, int eob)
2111 vp9_iadst4x4_colcol_addblk_msa(block, dst, stride);
2114 void ff_iadst_iadst_8x8_add_msa(uint8_t *dst, ptrdiff_t stride,
2115 int16_t *block, int eob)
2117 vp9_iadst8x8_colcol_addblk_msa(block, dst, stride);
2120 void ff_iadst_iadst_16x16_add_msa(uint8_t *dst, ptrdiff_t stride,
2121 int16_t *block, int eob)
2123 vp9_iadst16x16_colcol_addblk_msa(block, dst, stride);
2126 void ff_idct_iadst_4x4_add_msa(uint8_t *dst, ptrdiff_t stride,
2127 int16_t *block, int eob)
2129 vp9_idct_iadst_4x4_add_msa(block, dst, stride, eob);
2132 void ff_idct_iadst_8x8_add_msa(uint8_t *dst, ptrdiff_t stride,
2133 int16_t *block, int eob)
2135 vp9_idct_iadst_8x8_add_msa(block, dst, stride, eob);
2138 void ff_idct_iadst_16x16_add_msa(uint8_t *dst, ptrdiff_t stride,
2139 int16_t *block, int eob)
2141 vp9_idct_iadst_16x16_add_msa(block, dst, stride, eob);
2144 void ff_iadst_idct_4x4_add_msa(uint8_t *dst, ptrdiff_t stride,
2145 int16_t *block, int eob)
2147 vp9_iadst_idct_4x4_add_msa(block, dst, stride, eob);
2150 void ff_iadst_idct_8x8_add_msa(uint8_t *dst, ptrdiff_t stride,
2151 int16_t *block, int eob)
2153 vp9_iadst_idct_8x8_add_msa(block, dst, stride, eob);
2156 void ff_iadst_idct_16x16_add_msa(uint8_t *dst, ptrdiff_t stride,
2157 int16_t *block, int eob)
2159 vp9_iadst_idct_16x16_add_msa(block, dst, stride, eob);