2 * Copyright (c) 2015 - 2017 Manojkumar Bhosale (Manojkumar.Bhosale@imgtec.com)
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libavutil/mips/generic_macros_msa.h"
22 #include "libavcodec/mips/hevcdsp_mips.h"
23 #include "libavcodec/mips/hevc_macros_msa.h"
25 static const uint8_t ff_hevc_mask_arr[16 * 2] __attribute__((aligned(0x40))) = {
27 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
29 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20
32 static void hevc_copy_4w_msa(uint8_t *src, int32_t src_stride,
33 int16_t *dst, int32_t dst_stride,
42 LD_SB2(src, src_stride, src0, src1);
44 src0 = (v16i8) __msa_ilvr_w((v4i32) src1, (v4i32) src0);
45 in0 = (v8i16) __msa_ilvr_b(zero, src0);
47 ST8x2_UB(in0, dst, 2 * dst_stride);
48 } else if (4 == height) {
49 v16i8 src0, src1, src2, src3;
52 LD_SB4(src, src_stride, src0, src1, src2, src3);
54 ILVR_W2_SB(src1, src0, src3, src2, src0, src1);
55 ILVR_B2_SH(zero, src0, zero, src1, in0, in1);
58 ST8x4_UB(in0, in1, dst, 2 * dst_stride);
59 } else if (0 == height % 8) {
60 v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
61 v8i16 in0, in1, in2, in3;
64 for (loop_cnt = (height >> 3); loop_cnt--;) {
65 LD_SB8(src, src_stride,
66 src0, src1, src2, src3, src4, src5, src6, src7);
67 src += (8 * src_stride);
69 ILVR_W4_SB(src1, src0, src3, src2, src5, src4, src7, src6,
70 src0, src1, src2, src3);
71 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
73 SLLI_4V(in0, in1, in2, in3, 6);
74 ST8x8_UB(in0, in1, in2, in3, dst, 2 * dst_stride);
75 dst += (8 * dst_stride);
80 static void hevc_copy_6w_msa(uint8_t *src, int32_t src_stride,
81 int16_t *dst, int32_t dst_stride,
86 v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
87 v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
89 for (loop_cnt = (height >> 3); loop_cnt--;) {
90 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
91 src += (8 * src_stride);
93 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
95 ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
97 SLLI_4V(in0, in1, in2, in3, 6);
98 SLLI_4V(in4, in5, in6, in7, 6);
99 ST12x8_UB(in0, in1, in2, in3, in4, in5, in6, in7, dst, 2 * dst_stride);
100 dst += (8 * dst_stride);
104 static void hevc_copy_8w_msa(uint8_t *src, int32_t src_stride,
105 int16_t *dst, int32_t dst_stride,
114 LD_SB2(src, src_stride, src0, src1);
116 ILVR_B2_SH(zero, src0, zero, src1, in0, in1);
119 ST_SH2(in0, in1, dst, dst_stride);
120 } else if (4 == height) {
121 v16i8 src0, src1, src2, src3;
122 v8i16 in0, in1, in2, in3;
124 LD_SB4(src, src_stride, src0, src1, src2, src3);
126 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
128 SLLI_4V(in0, in1, in2, in3, 6);
129 ST_SH4(in0, in1, in2, in3, dst, dst_stride);
130 } else if (6 == height) {
131 v16i8 src0, src1, src2, src3, src4, src5;
132 v8i16 in0, in1, in2, in3, in4, in5;
134 LD_SB6(src, src_stride, src0, src1, src2, src3, src4, src5);
136 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
138 ILVR_B2_SH(zero, src4, zero, src5, in4, in5);
139 SLLI_4V(in0, in1, in2, in3, 6);
142 ST_SH6(in0, in1, in2, in3, in4, in5, dst, dst_stride);
143 } else if (0 == height % 8) {
145 v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
146 v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
148 for (loop_cnt = (height >> 3); loop_cnt--;) {
149 LD_SB8(src, src_stride,
150 src0, src1, src2, src3, src4, src5, src6, src7);
151 src += (8 * src_stride);
153 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
155 ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
157 SLLI_4V(in0, in1, in2, in3, 6);
158 SLLI_4V(in4, in5, in6, in7, 6);
159 ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, dst, dst_stride);
160 dst += (8 * dst_stride);
165 static void hevc_copy_12w_msa(uint8_t *src, int32_t src_stride,
166 int16_t *dst, int32_t dst_stride,
171 v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
172 v8i16 in0, in1, in0_r, in1_r, in2_r, in3_r;
174 for (loop_cnt = (height >> 3); loop_cnt--;) {
175 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
176 src += (8 * src_stride);
178 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
179 in0_r, in1_r, in2_r, in3_r);
180 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
181 ILVL_W2_SB(src1, src0, src3, src2, src0, src1);
182 ILVR_B2_SH(zero, src0, zero, src1, in0, in1);
185 ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
186 ST8x4_UB(in0, in1, dst + 8, 2 * dst_stride);
187 dst += (4 * dst_stride);
189 ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
190 in0_r, in1_r, in2_r, in3_r);
191 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
192 ILVL_W2_SB(src5, src4, src7, src6, src0, src1);
193 ILVR_B2_SH(zero, src0, zero, src1, in0, in1);
196 ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
197 ST8x4_UB(in0, in1, dst + 8, 2 * dst_stride);
198 dst += (4 * dst_stride);
202 static void hevc_copy_16w_msa(uint8_t *src, int32_t src_stride,
203 int16_t *dst, int32_t dst_stride,
209 v16i8 src0, src1, src2, src3;
210 v8i16 in0_r, in1_r, in2_r, in3_r;
211 v8i16 in0_l, in1_l, in2_l, in3_l;
213 LD_SB4(src, src_stride, src0, src1, src2, src3);
215 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
216 in0_r, in1_r, in2_r, in3_r);
217 ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
218 in0_l, in1_l, in2_l, in3_l);
219 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
220 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
221 ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
222 ST_SH4(in0_l, in1_l, in2_l, in3_l, (dst + 8), dst_stride);
223 } else if (12 == height) {
224 v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
225 v16i8 src8, src9, src10, src11;
226 v8i16 in0_r, in1_r, in2_r, in3_r;
227 v8i16 in0_l, in1_l, in2_l, in3_l;
229 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
230 src += (8 * src_stride);
231 LD_SB4(src, src_stride, src8, src9, src10, src11);
233 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
234 in0_r, in1_r, in2_r, in3_r);
235 ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
236 in0_l, in1_l, in2_l, in3_l);
237 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
238 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
239 ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
240 ST_SH4(in0_l, in1_l, in2_l, in3_l, (dst + 8), dst_stride);
241 dst += (4 * dst_stride);
243 ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
244 in0_r, in1_r, in2_r, in3_r);
245 ILVL_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
246 in0_l, in1_l, in2_l, in3_l);
247 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
248 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
249 ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
250 ST_SH4(in0_l, in1_l, in2_l, in3_l, (dst + 8), dst_stride);
251 dst += (4 * dst_stride);
253 ILVR_B4_SH(zero, src8, zero, src9, zero, src10, zero, src11,
254 in0_r, in1_r, in2_r, in3_r);
255 ILVL_B4_SH(zero, src8, zero, src9, zero, src10, zero, src11,
256 in0_l, in1_l, in2_l, in3_l);
257 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
258 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
259 ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
260 ST_SH4(in0_l, in1_l, in2_l, in3_l, (dst + 8), dst_stride);
261 } else if (0 == (height % 8)) {
263 v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
264 v8i16 in0_r, in1_r, in2_r, in3_r, in0_l, in1_l, in2_l, in3_l;
266 for (loop_cnt = (height >> 3); loop_cnt--;) {
267 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6,
269 src += (8 * src_stride);
270 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, in0_r,
271 in1_r, in2_r, in3_r);
272 ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, in0_l,
273 in1_l, in2_l, in3_l);
274 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
275 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
276 ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
277 ST_SH4(in0_l, in1_l, in2_l, in3_l, (dst + 8), dst_stride);
278 dst += (4 * dst_stride);
280 ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7, in0_r,
281 in1_r, in2_r, in3_r);
282 ILVL_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7, in0_l,
283 in1_l, in2_l, in3_l);
284 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
285 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
286 ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
287 ST_SH4(in0_l, in1_l, in2_l, in3_l, (dst + 8), dst_stride);
288 dst += (4 * dst_stride);
293 static void hevc_copy_24w_msa(uint8_t *src, int32_t src_stride,
294 int16_t *dst, int32_t dst_stride,
299 v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
300 v8i16 in0_r, in1_r, in2_r, in3_r, in0_l, in1_l, in2_l, in3_l;
302 for (loop_cnt = (height >> 2); loop_cnt--;) {
303 LD_SB4(src, src_stride, src0, src1, src2, src3);
304 LD_SB4((src + 16), src_stride, src4, src5, src6, src7);
305 src += (4 * src_stride);
306 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, in0_r, in1_r,
308 ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, in0_l, in1_l,
310 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
311 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
312 ST_SH4(in0_r, in1_r, in2_r, in3_r, dst, dst_stride);
313 ST_SH4(in0_l, in1_l, in2_l, in3_l, (dst + 8), dst_stride);
314 ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7, in0_r, in1_r,
316 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
317 ST_SH4(in0_r, in1_r, in2_r, in3_r, (dst + 16), dst_stride);
318 dst += (4 * dst_stride);
322 static void hevc_copy_32w_msa(uint8_t *src, int32_t src_stride,
323 int16_t *dst, int32_t dst_stride,
328 v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
329 v8i16 in0_r, in1_r, in2_r, in3_r, in0_l, in1_l, in2_l, in3_l;
331 for (loop_cnt = (height >> 2); loop_cnt--;) {
332 LD_SB4(src, src_stride, src0, src2, src4, src6);
333 LD_SB4((src + 16), src_stride, src1, src3, src5, src7);
334 src += (4 * src_stride);
336 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, in0_r, in1_r,
338 ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3, in0_l, in1_l,
340 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
341 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
342 ST_SH4(in0_r, in0_l, in1_r, in1_l, dst, 8);
344 ST_SH4(in2_r, in2_l, in3_r, in3_l, dst, 8);
347 ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7, in0_r, in1_r,
349 ILVL_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7, in0_l, in1_l,
351 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
352 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
353 ST_SH4(in0_r, in0_l, in1_r, in1_l, dst, 8);
355 ST_SH4(in2_r, in2_l, in3_r, in3_l, dst, 8);
360 static void hevc_copy_48w_msa(uint8_t *src, int32_t src_stride,
361 int16_t *dst, int32_t dst_stride,
366 v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
367 v16i8 src8, src9, src10, src11;
368 v8i16 in0_r, in1_r, in2_r, in3_r, in4_r, in5_r;
369 v8i16 in0_l, in1_l, in2_l, in3_l, in4_l, in5_l;
371 for (loop_cnt = (height >> 2); loop_cnt--;) {
372 LD_SB3(src, 16, src0, src1, src2);
374 LD_SB3(src, 16, src3, src4, src5);
376 LD_SB3(src, 16, src6, src7, src8);
378 LD_SB3(src, 16, src9, src10, src11);
381 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
382 in0_r, in1_r, in2_r, in3_r);
383 ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
384 in0_l, in1_l, in2_l, in3_l);
385 ILVR_B2_SH(zero, src4, zero, src5, in4_r, in5_r);
386 ILVL_B2_SH(zero, src4, zero, src5, in4_l, in5_l);
387 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
388 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
389 SLLI_4V(in4_r, in5_r, in4_l, in5_l, 6);
390 ST_SH6(in0_r, in0_l, in1_r, in1_l, in2_r, in2_l, dst, 8);
392 ST_SH6(in3_r, in3_l, in4_r, in4_l, in5_r, in5_l, dst, 8);
395 ILVR_B4_SH(zero, src6, zero, src7, zero, src8, zero, src9,
396 in0_r, in1_r, in2_r, in3_r);
397 ILVL_B4_SH(zero, src6, zero, src7, zero, src8, zero, src9,
398 in0_l, in1_l, in2_l, in3_l);
399 ILVR_B2_SH(zero, src10, zero, src11, in4_r, in5_r);
400 ILVL_B2_SH(zero, src10, zero, src11, in4_l, in5_l);
401 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
402 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
403 SLLI_4V(in4_r, in5_r, in4_l, in5_l, 6);
404 ST_SH6(in0_r, in0_l, in1_r, in1_l, in2_r, in2_l, dst, 8);
406 ST_SH6(in3_r, in3_l, in4_r, in4_l, in5_r, in5_l, dst, 8);
411 static void hevc_copy_64w_msa(uint8_t *src, int32_t src_stride,
412 int16_t *dst, int32_t dst_stride,
417 v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
418 v8i16 in0_r, in1_r, in2_r, in3_r, in0_l, in1_l, in2_l, in3_l;
420 for (loop_cnt = (height >> 1); loop_cnt--;) {
421 LD_SB4(src, 16, src0, src1, src2, src3);
423 LD_SB4(src, 16, src4, src5, src6, src7);
426 ILVR_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
427 in0_r, in1_r, in2_r, in3_r);
428 ILVL_B4_SH(zero, src0, zero, src1, zero, src2, zero, src3,
429 in0_l, in1_l, in2_l, in3_l);
430 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
431 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
432 ST_SH4(in0_r, in0_l, in1_r, in1_l, dst, 8);
433 ST_SH4(in2_r, in2_l, in3_r, in3_l, (dst + 32), 8);
436 ILVR_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
437 in0_r, in1_r, in2_r, in3_r);
438 ILVL_B4_SH(zero, src4, zero, src5, zero, src6, zero, src7,
439 in0_l, in1_l, in2_l, in3_l);
440 SLLI_4V(in0_r, in1_r, in2_r, in3_r, 6);
441 SLLI_4V(in0_l, in1_l, in2_l, in3_l, 6);
442 ST_SH4(in0_r, in0_l, in1_r, in1_l, dst, 8);
443 ST_SH4(in2_r, in2_l, in3_r, in3_l, (dst + 32), 8);
448 static void hevc_hz_8t_4w_msa(uint8_t *src, int32_t src_stride,
449 int16_t *dst, int32_t dst_stride,
450 const int8_t *filter, int32_t height)
453 v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
454 v8i16 filt0, filt1, filt2, filt3;
455 v16i8 mask1, mask2, mask3;
456 v16i8 vec0, vec1, vec2, vec3;
457 v8i16 dst0, dst1, dst2, dst3;
458 v8i16 filter_vec, const_vec;
459 v16i8 mask0 = LD_SB(ff_hevc_mask_arr + 16);
462 const_vec = __msa_ldi_h(128);
465 filter_vec = LD_SH(filter);
466 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
472 for (loop_cnt = (height >> 3); loop_cnt--;) {
473 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
474 src += (8 * src_stride);
475 XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7);
477 VSHF_B4_SB(src0, src1, mask0, mask1, mask2, mask3,
478 vec0, vec1, vec2, vec3);
480 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
481 dst0, dst0, dst0, dst0);
482 VSHF_B4_SB(src2, src3, mask0, mask1, mask2, mask3,
483 vec0, vec1, vec2, vec3);
485 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
486 dst1, dst1, dst1, dst1);
487 VSHF_B4_SB(src4, src5, mask0, mask1, mask2, mask3,
488 vec0, vec1, vec2, vec3);
490 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
491 dst2, dst2, dst2, dst2);
492 VSHF_B4_SB(src6, src7, mask0, mask1, mask2, mask3,
493 vec0, vec1, vec2, vec3);
495 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
496 dst3, dst3, dst3, dst3);
498 ST8x8_UB(dst0, dst1, dst2, dst3, dst, 2 * dst_stride);
499 dst += (8 * dst_stride);
503 static void hevc_hz_8t_8w_msa(uint8_t *src, int32_t src_stride,
504 int16_t *dst, int32_t dst_stride,
505 const int8_t *filter, int32_t height)
508 v16i8 src0, src1, src2, src3;
509 v8i16 filt0, filt1, filt2, filt3;
510 v16i8 mask1, mask2, mask3;
511 v16i8 vec0, vec1, vec2, vec3;
512 v8i16 dst0, dst1, dst2, dst3;
513 v8i16 filter_vec, const_vec;
514 v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
517 const_vec = __msa_ldi_h(128);
520 filter_vec = LD_SH(filter);
521 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
527 for (loop_cnt = (height >> 2); loop_cnt--;) {
528 LD_SB4(src, src_stride, src0, src1, src2, src3);
529 src += (4 * src_stride);
530 XORI_B4_128_SB(src0, src1, src2, src3);
532 VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
533 vec0, vec1, vec2, vec3);
535 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
536 dst0, dst0, dst0, dst0);
537 VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
538 vec0, vec1, vec2, vec3);
540 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
541 dst1, dst1, dst1, dst1);
542 VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
543 vec0, vec1, vec2, vec3);
545 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
546 dst2, dst2, dst2, dst2);
547 VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
548 vec0, vec1, vec2, vec3);
550 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
551 dst3, dst3, dst3, dst3);
553 ST_SH4(dst0, dst1, dst2, dst3, dst, dst_stride);
554 dst += (4 * dst_stride);
558 static void hevc_hz_8t_12w_msa(uint8_t *src, int32_t src_stride,
559 int16_t *dst, int32_t dst_stride,
560 const int8_t *filter, int32_t height)
563 int64_t res0, res1, res2, res3;
564 v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
565 v16i8 mask0, mask1, mask2, mask3, mask4, mask5, mask6, mask7;
566 v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
567 v8i16 filt0, filt1, filt2, filt3, dst0, dst1, dst2, dst3, dst4, dst5;
568 v8i16 filter_vec, const_vec;
571 const_vec = __msa_ldi_h(128);
574 filter_vec = LD_SH(filter);
575 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
577 mask0 = LD_SB(ff_hevc_mask_arr);
581 mask4 = LD_SB(ff_hevc_mask_arr + 16);
586 for (loop_cnt = 4; loop_cnt--;) {
587 LD_SB4(src, src_stride, src0, src1, src2, src3);
588 LD_SB4(src + 8, src_stride, src4, src5, src6, src7);
589 src += (4 * src_stride);
590 XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7);
598 VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0, vec1);
599 VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2, vec3);
600 VSHF_B2_SB(src4, src5, src6, src7, mask4, mask4, vec4, vec5);
601 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, dst0,
603 DPADD_SB2_SH(vec4, vec5, filt0, filt0, dst4, dst5);
604 VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec0, vec1);
605 VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec2, vec3);
606 VSHF_B2_SB(src4, src5, src6, src7, mask5, mask5, vec4, vec5);
607 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt1, filt1, filt1, filt1, dst0,
609 DPADD_SB2_SH(vec4, vec5, filt1, filt1, dst4, dst5);
610 VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec0, vec1);
611 VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec2, vec3);
612 VSHF_B2_SB(src4, src5, src6, src7, mask6, mask6, vec4, vec5);
613 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt2, filt2, filt2, filt2, dst0,
615 DPADD_SB2_SH(vec4, vec5, filt2, filt2, dst4, dst5);
616 VSHF_B2_SB(src0, src0, src1, src1, mask3, mask3, vec0, vec1);
617 VSHF_B2_SB(src2, src2, src3, src3, mask3, mask3, vec2, vec3);
618 VSHF_B2_SB(src4, src5, src6, src7, mask7, mask7, vec4, vec5);
619 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt3, filt3, filt3, filt3, dst0,
621 DPADD_SB2_SH(vec4, vec5, filt3, filt3, dst4, dst5);
623 res0 = __msa_copy_s_d((v2i64) dst4, 0);
624 res1 = __msa_copy_s_d((v2i64) dst4, 1);
625 res2 = __msa_copy_s_d((v2i64) dst5, 0);
626 res3 = __msa_copy_s_d((v2i64) dst5, 1);
627 ST_SH4(dst0, dst1, dst2, dst3, dst, dst_stride);
628 SD4(res0, res1, res2, res3, (dst + 8), dst_stride);
629 dst += (4 * dst_stride);
633 static void hevc_hz_8t_16w_msa(uint8_t *src, int32_t src_stride,
634 int16_t *dst, int32_t dst_stride,
635 const int8_t *filter, int32_t height)
638 v16i8 src0, src1, src2, src3;
639 v8i16 filt0, filt1, filt2, filt3;
640 v16i8 mask1, mask2, mask3;
641 v16i8 vec0, vec1, vec2, vec3;
642 v8i16 dst0, dst1, dst2, dst3;
643 v8i16 filter_vec, const_vec;
644 v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
647 const_vec = __msa_ldi_h(128);
650 filter_vec = LD_SH(filter);
651 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
657 for (loop_cnt = (height >> 1); loop_cnt--;) {
658 LD_SB2(src, src_stride, src0, src2);
659 LD_SB2(src + 8, src_stride, src1, src3);
660 src += (2 * src_stride);
661 XORI_B4_128_SB(src0, src1, src2, src3);
667 VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0, vec1);
668 VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2, vec3);
669 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, dst0,
671 VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec0, vec1);
672 VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec2, vec3);
673 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt1, filt1, filt1, filt1, dst0,
675 VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec0, vec1);
676 VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec2, vec3);
677 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt2, filt2, filt2, filt2, dst0,
679 VSHF_B2_SB(src0, src0, src1, src1, mask3, mask3, vec0, vec1);
680 VSHF_B2_SB(src2, src2, src3, src3, mask3, mask3, vec2, vec3);
681 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt3, filt3, filt3, filt3, dst0,
684 ST_SH2(dst0, dst2, dst, dst_stride);
685 ST_SH2(dst1, dst3, dst + 8, dst_stride);
686 dst += (2 * dst_stride);
690 static void hevc_hz_8t_24w_msa(uint8_t *src, int32_t src_stride,
691 int16_t *dst, int32_t dst_stride,
692 const int8_t *filter, int32_t height)
695 v16i8 src0, src1, src2, src3;
696 v8i16 filt0, filt1, filt2, filt3;
697 v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
698 v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
699 v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
700 v8i16 filter_vec, const_vec;
701 v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
704 filter_vec = LD_SH(filter);
705 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
715 const_vec = __msa_ldi_h(128);
718 for (loop_cnt = (height >> 1); loop_cnt--;) {
719 LD_SB2(src, 16, src0, src1);
721 LD_SB2(src, 16, src2, src3);
723 XORI_B4_128_SB(src0, src1, src2, src3);
731 VSHF_B2_SB(src0, src0, src0, src1, mask0, mask4, vec0, vec1);
732 VSHF_B2_SB(src1, src1, src2, src2, mask0, mask0, vec2, vec3);
733 VSHF_B2_SB(src2, src3, src3, src3, mask4, mask0, vec4, vec5);
734 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, dst0,
736 DPADD_SB2_SH(vec4, vec5, filt0, filt0, dst4, dst5);
737 VSHF_B2_SB(src0, src0, src0, src1, mask1, mask5, vec0, vec1);
738 VSHF_B2_SB(src1, src1, src2, src2, mask1, mask1, vec2, vec3);
739 VSHF_B2_SB(src2, src3, src3, src3, mask5, mask1, vec4, vec5);
740 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt1, filt1, filt1, filt1, dst0,
742 DPADD_SB2_SH(vec4, vec5, filt1, filt1, dst4, dst5);
743 VSHF_B2_SB(src0, src0, src0, src1, mask2, mask6, vec0, vec1);
744 VSHF_B2_SB(src1, src1, src2, src2, mask2, mask2, vec2, vec3);
745 VSHF_B2_SB(src2, src3, src3, src3, mask6, mask2, vec4, vec5);
746 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt2, filt2, filt2, filt2, dst0,
748 DPADD_SB2_SH(vec4, vec5, filt2, filt2, dst4, dst5);
749 VSHF_B2_SB(src0, src0, src0, src1, mask3, mask7, vec0, vec1);
750 VSHF_B2_SB(src1, src1, src2, src2, mask3, mask3, vec2, vec3);
751 VSHF_B2_SB(src2, src3, src3, src3, mask7, mask3, vec4, vec5);
752 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt3, filt3, filt3, filt3, dst0,
754 DPADD_SB2_SH(vec4, vec5, filt3, filt3, dst4, dst5);
756 ST_SH2(dst0, dst1, dst, 8);
757 ST_SH(dst2, dst + 16);
759 ST_SH2(dst3, dst4, dst, 8);
760 ST_SH(dst5, dst + 16);
765 static void hevc_hz_8t_32w_msa(uint8_t *src, int32_t src_stride,
766 int16_t *dst, int32_t dst_stride,
767 const int8_t *filter, int32_t height)
770 v16i8 src0, src1, src2;
771 v8i16 filt0, filt1, filt2, filt3;
772 v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
773 v16i8 vec0, vec1, vec2, vec3;
774 v8i16 dst0, dst1, dst2, dst3;
775 v8i16 filter_vec, const_vec;
776 v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
779 filter_vec = LD_SH(filter);
780 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
790 const_vec = __msa_ldi_h(128);
793 for (loop_cnt = height; loop_cnt--;) {
794 LD_SB2(src, 16, src0, src1);
795 src2 = LD_SB(src + 24);
797 XORI_B3_128_SB(src0, src1, src2);
799 VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
800 vec0, vec1, vec2, vec3);
802 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
803 dst0, dst0, dst0, dst0);
804 VSHF_B4_SB(src0, src1, mask4, mask5, mask6, mask7,
805 vec0, vec1, vec2, vec3);
807 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
808 dst1, dst1, dst1, dst1);
809 VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
810 vec0, vec1, vec2, vec3);
812 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
813 dst2, dst2, dst2, dst2);
814 VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
815 vec0, vec1, vec2, vec3);
817 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
818 dst3, dst3, dst3, dst3);
820 ST_SH4(dst0, dst1, dst2, dst3, dst, 8);
825 static void hevc_hz_8t_48w_msa(uint8_t *src, int32_t src_stride,
826 int16_t *dst, int32_t dst_stride,
827 const int8_t *filter, int32_t height)
830 v16i8 src0, src1, src2, src3;
831 v8i16 filt0, filt1, filt2, filt3;
832 v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
833 v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
834 v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
835 v8i16 filter_vec, const_vec;
836 v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
839 filter_vec = LD_SH(filter);
840 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
850 const_vec = __msa_ldi_h(128);
853 for (loop_cnt = height; loop_cnt--;) {
854 LD_SB3(src, 16, src0, src1, src2);
855 src3 = LD_SB(src + 40);
857 XORI_B4_128_SB(src0, src1, src2, src3);
865 VSHF_B2_SB(src0, src0, src0, src1, mask0, mask4, vec0, vec1);
866 VSHF_B2_SB(src1, src1, src1, src2, mask0, mask4, vec2, vec3);
867 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, dst0,
869 VSHF_B2_SB(src0, src0, src0, src1, mask1, mask5, vec0, vec1);
870 VSHF_B2_SB(src1, src1, src1, src2, mask1, mask5, vec2, vec3);
871 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt1, filt1, filt1, filt1, dst0,
873 VSHF_B2_SB(src0, src0, src0, src1, mask2, mask6, vec0, vec1);
874 VSHF_B2_SB(src1, src1, src1, src2, mask2, mask6, vec2, vec3);
875 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt2, filt2, filt2, filt2, dst0,
877 VSHF_B2_SB(src0, src0, src0, src1, mask3, mask7, vec0, vec1);
878 VSHF_B2_SB(src1, src1, src1, src2, mask3, mask7, vec2, vec3);
879 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt3, filt3, filt3, filt3, dst0,
881 ST_SH4(dst0, dst1, dst2, dst3, dst, 8);
883 VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec4, vec5);
884 DPADD_SB2_SH(vec4, vec5, filt0, filt0, dst4, dst5);
885 VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec4, vec5);
886 DPADD_SB2_SH(vec4, vec5, filt1, filt1, dst4, dst5);
887 VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec4, vec5);
888 DPADD_SB2_SH(vec4, vec5, filt2, filt2, dst4, dst5);
889 VSHF_B2_SB(src2, src2, src3, src3, mask3, mask3, vec4, vec5);
890 DPADD_SB2_SH(vec4, vec5, filt3, filt3, dst4, dst5);
891 ST_SH2(dst4, dst5, (dst + 32), 8);
896 static void hevc_hz_8t_64w_msa(uint8_t *src, int32_t src_stride,
897 int16_t *dst, int32_t dst_stride,
898 const int8_t *filter, int32_t height)
901 v16i8 src0, src1, src2, src3, src4;
902 v8i16 filt0, filt1, filt2, filt3;
903 v16i8 mask1, mask2, mask3, mask4, mask5, mask6, mask7;
904 v16i8 vec0, vec1, vec2, vec3;
905 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
906 v8i16 filter_vec, const_vec;
907 v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
911 filter_vec = LD_SH(filter);
912 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
922 const_vec = __msa_ldi_h(128);
925 for (loop_cnt = height; loop_cnt--;) {
926 LD_SB4(src, 16, src0, src1, src2, src3);
927 src4 = LD_SB(src + 56);
929 XORI_B5_128_SB(src0, src1, src2, src3, src4);
931 VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
932 vec0, vec1, vec2, vec3);
934 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
935 dst0, dst0, dst0, dst0);
938 VSHF_B4_SB(src0, src1, mask4, mask5, mask6, mask7,
939 vec0, vec1, vec2, vec3);
941 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
942 dst1, dst1, dst1, dst1);
943 ST_SH(dst1, dst + 8);
945 VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
946 vec0, vec1, vec2, vec3);
948 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
949 dst2, dst2, dst2, dst2);
950 ST_SH(dst2, dst + 16);
952 VSHF_B4_SB(src1, src2, mask4, mask5, mask6, mask7,
953 vec0, vec1, vec2, vec3);
955 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
956 dst3, dst3, dst3, dst3);
957 ST_SH(dst3, dst + 24);
959 VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
960 vec0, vec1, vec2, vec3);
962 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
963 dst4, dst4, dst4, dst4);
964 ST_SH(dst4, dst + 32);
966 VSHF_B4_SB(src2, src3, mask4, mask5, mask6, mask7,
967 vec0, vec1, vec2, vec3);
969 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
970 dst5, dst5, dst5, dst5);
971 ST_SH(dst5, dst + 40);
973 VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
974 vec0, vec1, vec2, vec3);
976 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
977 dst6, dst6, dst6, dst6);
978 ST_SH(dst6, dst + 48);
980 VSHF_B4_SB(src4, src4, mask0, mask1, mask2, mask3,
981 vec0, vec1, vec2, vec3);
983 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
984 dst7, dst7, dst7, dst7);
985 ST_SH(dst7, dst + 56);
990 static void hevc_vt_8t_4w_msa(uint8_t *src, int32_t src_stride,
991 int16_t *dst, int32_t dst_stride,
992 const int8_t *filter, int32_t height)
995 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
996 v16i8 src9, src10, src11, src12, src13, src14;
997 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
998 v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
999 v16i8 src1110_r, src1211_r, src1312_r, src1413_r;
1000 v16i8 src2110, src4332, src6554, src8776, src10998;
1001 v16i8 src12111110, src14131312;
1002 v8i16 dst10, dst32, dst54, dst76;
1003 v8i16 filt0, filt1, filt2, filt3;
1004 v8i16 filter_vec, const_vec;
1006 src -= (3 * src_stride);
1008 const_vec = __msa_ldi_h(128);
1011 filter_vec = LD_SH(filter);
1012 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1014 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
1015 src += (7 * src_stride);
1016 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
1017 src10_r, src32_r, src54_r, src21_r);
1018 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
1019 ILVR_D3_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r,
1020 src2110, src4332, src6554);
1021 XORI_B3_128_SB(src2110, src4332, src6554);
1023 for (loop_cnt = (height >> 3); loop_cnt--;) {
1024 LD_SB8(src, src_stride,
1025 src7, src8, src9, src10, src11, src12, src13, src14);
1026 src += (8 * src_stride);
1028 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
1029 src76_r, src87_r, src98_r, src109_r);
1030 ILVR_B4_SB(src11, src10, src12, src11, src13, src12, src14, src13,
1031 src1110_r, src1211_r, src1312_r, src1413_r);
1032 ILVR_D4_SB(src87_r, src76_r, src109_r, src98_r,
1033 src1211_r, src1110_r, src1413_r, src1312_r,
1034 src8776, src10998, src12111110, src14131312);
1035 XORI_B4_128_SB(src8776, src10998, src12111110, src14131312);
1038 DPADD_SB4_SH(src2110, src4332, src6554, src8776,
1039 filt0, filt1, filt2, filt3, dst10, dst10, dst10, dst10);
1041 DPADD_SB4_SH(src4332, src6554, src8776, src10998,
1042 filt0, filt1, filt2, filt3, dst32, dst32, dst32, dst32);
1044 DPADD_SB4_SH(src6554, src8776, src10998, src12111110,
1045 filt0, filt1, filt2, filt3, dst54, dst54, dst54, dst54);
1047 DPADD_SB4_SH(src8776, src10998, src12111110, src14131312,
1048 filt0, filt1, filt2, filt3, dst76, dst76, dst76, dst76);
1050 ST8x8_UB(dst10, dst32, dst54, dst76, dst, 2 * dst_stride);
1051 dst += (8 * dst_stride);
1054 src4332 = src12111110;
1055 src6554 = src14131312;
1060 static void hevc_vt_8t_8w_msa(uint8_t *src, int32_t src_stride,
1061 int16_t *dst, int32_t dst_stride,
1062 const int8_t *filter, int32_t height)
1065 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
1066 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
1067 v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
1068 v8i16 dst0_r, dst1_r, dst2_r, dst3_r;
1069 v8i16 filter_vec, const_vec;
1070 v8i16 filt0, filt1, filt2, filt3;
1072 src -= (3 * src_stride);
1073 const_vec = __msa_ldi_h(128);
1076 filter_vec = LD_SH(filter);
1077 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1079 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
1080 src += (7 * src_stride);
1081 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
1082 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
1083 src10_r, src32_r, src54_r, src21_r);
1084 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
1086 for (loop_cnt = (height >> 2); loop_cnt--;) {
1087 LD_SB4(src, src_stride, src7, src8, src9, src10);
1088 src += (4 * src_stride);
1089 XORI_B4_128_SB(src7, src8, src9, src10);
1090 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
1091 src76_r, src87_r, src98_r, src109_r);
1094 DPADD_SB4_SH(src10_r, src32_r, src54_r, src76_r,
1095 filt0, filt1, filt2, filt3,
1096 dst0_r, dst0_r, dst0_r, dst0_r);
1098 DPADD_SB4_SH(src21_r, src43_r, src65_r, src87_r,
1099 filt0, filt1, filt2, filt3,
1100 dst1_r, dst1_r, dst1_r, dst1_r);
1102 DPADD_SB4_SH(src32_r, src54_r, src76_r, src98_r,
1103 filt0, filt1, filt2, filt3,
1104 dst2_r, dst2_r, dst2_r, dst2_r);
1106 DPADD_SB4_SH(src43_r, src65_r, src87_r, src109_r,
1107 filt0, filt1, filt2, filt3,
1108 dst3_r, dst3_r, dst3_r, dst3_r);
1110 ST_SH4(dst0_r, dst1_r, dst2_r, dst3_r, dst, dst_stride);
1111 dst += (4 * dst_stride);
1123 static void hevc_vt_8t_12w_msa(uint8_t *src, int32_t src_stride,
1124 int16_t *dst, int32_t dst_stride,
1125 const int8_t *filter, int32_t height)
1128 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
1129 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
1130 v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
1131 v8i16 dst0_r, dst1_r, dst2_r, dst3_r;
1132 v16i8 src10_l, src32_l, src54_l, src76_l, src98_l;
1133 v16i8 src21_l, src43_l, src65_l, src87_l, src109_l;
1134 v16i8 src2110, src4332, src6554, src8776, src10998;
1135 v8i16 dst0_l, dst1_l;
1136 v8i16 filter_vec, const_vec;
1137 v8i16 filt0, filt1, filt2, filt3;
1139 src -= (3 * src_stride);
1140 const_vec = __msa_ldi_h(128);
1143 filter_vec = LD_SH(filter);
1144 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1146 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
1147 src += (7 * src_stride);
1148 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
1149 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
1150 src10_r, src32_r, src54_r, src21_r);
1151 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
1152 ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
1153 src10_l, src32_l, src54_l, src21_l);
1154 ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l);
1155 ILVR_D3_SB(src21_l, src10_l, src43_l, src32_l, src65_l, src54_l,
1156 src2110, src4332, src6554);
1158 for (loop_cnt = (height >> 2); loop_cnt--;) {
1159 LD_SB4(src, src_stride, src7, src8, src9, src10);
1160 src += (4 * src_stride);
1161 XORI_B4_128_SB(src7, src8, src9, src10);
1162 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
1163 src76_r, src87_r, src98_r, src109_r);
1164 ILVL_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
1165 src76_l, src87_l, src98_l, src109_l);
1166 ILVR_D2_SB(src87_l, src76_l, src109_l, src98_l, src8776, src10998);
1169 DPADD_SB4_SH(src10_r, src32_r, src54_r, src76_r,
1170 filt0, filt1, filt2, filt3,
1171 dst0_r, dst0_r, dst0_r, dst0_r);
1173 DPADD_SB4_SH(src21_r, src43_r, src65_r, src87_r,
1174 filt0, filt1, filt2, filt3,
1175 dst1_r, dst1_r, dst1_r, dst1_r);
1177 DPADD_SB4_SH(src32_r, src54_r, src76_r, src98_r,
1178 filt0, filt1, filt2, filt3,
1179 dst2_r, dst2_r, dst2_r, dst2_r);
1181 DPADD_SB4_SH(src43_r, src65_r, src87_r, src109_r,
1182 filt0, filt1, filt2, filt3,
1183 dst3_r, dst3_r, dst3_r, dst3_r);
1185 DPADD_SB4_SH(src2110, src4332, src6554, src8776,
1186 filt0, filt1, filt2, filt3,
1187 dst0_l, dst0_l, dst0_l, dst0_l);
1189 DPADD_SB4_SH(src4332, src6554, src8776, src10998,
1190 filt0, filt1, filt2, filt3,
1191 dst1_l, dst1_l, dst1_l, dst1_l);
1193 ST_SH4(dst0_r, dst1_r, dst2_r, dst3_r, dst, dst_stride);
1194 ST8x4_UB(dst0_l, dst1_l, dst + 8, 2 * dst_stride);
1195 dst += (4 * dst_stride);
1210 static void hevc_vt_8t_16multx4mult_msa(uint8_t *src,
1214 const int8_t *filter,
1220 int32_t loop_cnt, cnt;
1221 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
1222 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
1223 v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
1224 v8i16 dst0_r, dst1_r, dst2_r, dst3_r;
1225 v16i8 src10_l, src32_l, src54_l, src76_l, src98_l;
1226 v16i8 src21_l, src43_l, src65_l, src87_l, src109_l;
1227 v8i16 dst0_l, dst1_l, dst2_l, dst3_l;
1228 v8i16 filter_vec, const_vec;
1229 v8i16 filt0, filt1, filt2, filt3;
1231 src -= (3 * src_stride);
1232 const_vec = __msa_ldi_h(128);
1235 filter_vec = LD_SH(filter);
1236 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1238 for (cnt = width >> 4; cnt--;) {
1242 LD_SB7(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6);
1243 src_tmp += (7 * src_stride);
1244 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
1245 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
1246 src10_r, src32_r, src54_r, src21_r);
1247 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r);
1248 ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1,
1249 src10_l, src32_l, src54_l, src21_l);
1250 ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l);
1252 for (loop_cnt = (height >> 2); loop_cnt--;) {
1253 LD_SB4(src_tmp, src_stride, src7, src8, src9, src10);
1254 src_tmp += (4 * src_stride);
1255 XORI_B4_128_SB(src7, src8, src9, src10);
1256 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
1257 src76_r, src87_r, src98_r, src109_r);
1258 ILVL_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
1259 src76_l, src87_l, src98_l, src109_l);
1262 DPADD_SB4_SH(src10_r, src32_r, src54_r, src76_r,
1263 filt0, filt1, filt2, filt3,
1264 dst0_r, dst0_r, dst0_r, dst0_r);
1266 DPADD_SB4_SH(src21_r, src43_r, src65_r, src87_r,
1267 filt0, filt1, filt2, filt3,
1268 dst1_r, dst1_r, dst1_r, dst1_r);
1270 DPADD_SB4_SH(src32_r, src54_r, src76_r, src98_r,
1271 filt0, filt1, filt2, filt3,
1272 dst2_r, dst2_r, dst2_r, dst2_r);
1274 DPADD_SB4_SH(src43_r, src65_r, src87_r, src109_r,
1275 filt0, filt1, filt2, filt3,
1276 dst3_r, dst3_r, dst3_r, dst3_r);
1278 DPADD_SB4_SH(src10_l, src32_l, src54_l, src76_l,
1279 filt0, filt1, filt2, filt3,
1280 dst0_l, dst0_l, dst0_l, dst0_l);
1282 DPADD_SB4_SH(src21_l, src43_l, src65_l, src87_l,
1283 filt0, filt1, filt2, filt3,
1284 dst1_l, dst1_l, dst1_l, dst1_l);
1286 DPADD_SB4_SH(src32_l, src54_l, src76_l, src98_l,
1287 filt0, filt1, filt2, filt3,
1288 dst2_l, dst2_l, dst2_l, dst2_l);
1290 DPADD_SB4_SH(src43_l, src65_l, src87_l, src109_l,
1291 filt0, filt1, filt2, filt3,
1292 dst3_l, dst3_l, dst3_l, dst3_l);
1294 ST_SH4(dst0_r, dst1_r, dst2_r, dst3_r, dst_tmp, dst_stride);
1295 ST_SH4(dst0_l, dst1_l, dst2_l, dst3_l, dst_tmp + 8, dst_stride);
1296 dst_tmp += (4 * dst_stride);
1318 static void hevc_vt_8t_16w_msa(uint8_t *src, int32_t src_stride,
1319 int16_t *dst, int32_t dst_stride,
1320 const int8_t *filter, int32_t height)
1322 hevc_vt_8t_16multx4mult_msa(src, src_stride, dst, dst_stride,
1323 filter, height, 16);
1326 static void hevc_vt_8t_24w_msa(uint8_t *src, int32_t src_stride,
1327 int16_t *dst, int32_t dst_stride,
1328 const int8_t *filter, int32_t height)
1330 hevc_vt_8t_16multx4mult_msa(src, src_stride, dst, dst_stride,
1331 filter, height, 16);
1332 hevc_vt_8t_8w_msa(src + 16, src_stride, dst + 16, dst_stride,
1336 static void hevc_vt_8t_32w_msa(uint8_t *src, int32_t src_stride,
1337 int16_t *dst, int32_t dst_stride,
1338 const int8_t *filter, int32_t height)
1340 hevc_vt_8t_16multx4mult_msa(src, src_stride, dst, dst_stride,
1341 filter, height, 32);
1344 static void hevc_vt_8t_48w_msa(uint8_t *src, int32_t src_stride,
1345 int16_t *dst, int32_t dst_stride,
1346 const int8_t *filter, int32_t height)
1348 hevc_vt_8t_16multx4mult_msa(src, src_stride, dst, dst_stride,
1349 filter, height, 48);
1352 static void hevc_vt_8t_64w_msa(uint8_t *src, int32_t src_stride,
1353 int16_t *dst, int32_t dst_stride,
1354 const int8_t *filter, int32_t height)
1356 hevc_vt_8t_16multx4mult_msa(src, src_stride, dst, dst_stride,
1357 filter, height, 64);
1360 static void hevc_hv_8t_4w_msa(uint8_t *src, int32_t src_stride,
1361 int16_t *dst, int32_t dst_stride,
1362 const int8_t *filter_x, const int8_t *filter_y,
1366 int32_t dst_stride_in_bytes = 2 * dst_stride;
1367 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
1368 v8i16 filt0, filt1, filt2, filt3;
1369 v8i16 filt_h0, filt_h1, filt_h2, filt_h3;
1370 v16i8 mask1, mask2, mask3;
1371 v8i16 filter_vec, const_vec;
1372 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
1373 v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
1374 v8i16 dst30, dst41, dst52, dst63, dst66, dst97, dst108;
1375 v4i32 dst0_r, dst1_r, dst2_r, dst3_r;
1376 v8i16 dst10_r, dst32_r, dst54_r, dst76_r, dst98_r;
1377 v8i16 dst21_r, dst43_r, dst65_r, dst87_r, dst109_r;
1378 v16i8 mask0 = LD_SB(ff_hevc_mask_arr + 16);
1380 src -= ((3 * src_stride) + 3);
1381 filter_vec = LD_SH(filter_x);
1382 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1384 filter_vec = LD_SH(filter_y);
1385 UNPCK_R_SB_SH(filter_vec, filter_vec);
1387 SPLATI_W4_SH(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
1393 const_vec = __msa_ldi_h(128);
1396 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
1397 src += (7 * src_stride);
1398 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
1400 VSHF_B4_SB(src0, src3, mask0, mask1, mask2, mask3, vec0, vec1, vec2, vec3);
1401 VSHF_B4_SB(src1, src4, mask0, mask1, mask2, mask3, vec4, vec5, vec6, vec7);
1402 VSHF_B4_SB(src2, src5, mask0, mask1, mask2, mask3,
1403 vec8, vec9, vec10, vec11);
1404 VSHF_B4_SB(src3, src6, mask0, mask1, mask2, mask3,
1405 vec12, vec13, vec14, vec15);
1407 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
1408 dst30, dst30, dst30, dst30);
1410 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3,
1411 dst41, dst41, dst41, dst41);
1413 DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3,
1414 dst52, dst52, dst52, dst52);
1416 DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt0, filt1, filt2, filt3,
1417 dst63, dst63, dst63, dst63);
1419 ILVRL_H2_SH(dst41, dst30, dst10_r, dst43_r);
1420 ILVRL_H2_SH(dst52, dst41, dst21_r, dst54_r);
1421 ILVRL_H2_SH(dst63, dst52, dst32_r, dst65_r);
1422 dst66 = (v8i16) __msa_splati_d((v2i64) dst63, 1);
1424 for (loop_cnt = height >> 2; loop_cnt--;) {
1425 LD_SB4(src, src_stride, src7, src8, src9, src10);
1426 src += (4 * src_stride);
1427 XORI_B4_128_SB(src7, src8, src9, src10);
1429 VSHF_B4_SB(src7, src9, mask0, mask1, mask2, mask3,
1430 vec0, vec1, vec2, vec3);
1431 VSHF_B4_SB(src8, src10, mask0, mask1, mask2, mask3,
1432 vec4, vec5, vec6, vec7);
1435 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
1436 dst97, dst97, dst97, dst97);
1437 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3,
1438 dst108, dst108, dst108, dst108);
1440 dst76_r = __msa_ilvr_h(dst97, dst66);
1441 ILVRL_H2_SH(dst108, dst97, dst87_r, dst109_r);
1442 dst66 = (v8i16) __msa_splati_d((v2i64) dst97, 1);
1443 dst98_r = __msa_ilvr_h(dst66, dst108);
1445 dst0_r = HEVC_FILT_8TAP(dst10_r, dst32_r, dst54_r, dst76_r,
1446 filt_h0, filt_h1, filt_h2, filt_h3);
1447 dst1_r = HEVC_FILT_8TAP(dst21_r, dst43_r, dst65_r, dst87_r,
1448 filt_h0, filt_h1, filt_h2, filt_h3);
1449 dst2_r = HEVC_FILT_8TAP(dst32_r, dst54_r, dst76_r, dst98_r,
1450 filt_h0, filt_h1, filt_h2, filt_h3);
1451 dst3_r = HEVC_FILT_8TAP(dst43_r, dst65_r, dst87_r, dst109_r,
1452 filt_h0, filt_h1, filt_h2, filt_h3);
1453 SRA_4V(dst0_r, dst1_r, dst2_r, dst3_r, 6);
1454 PCKEV_H2_SW(dst1_r, dst0_r, dst3_r, dst2_r, dst0_r, dst2_r);
1455 ST8x4_UB(dst0_r, dst2_r, dst, dst_stride_in_bytes);
1456 dst += (4 * dst_stride);
1464 dst66 = (v8i16) __msa_splati_d((v2i64) dst108, 1);
1468 static void hevc_hv_8t_8multx1mult_msa(uint8_t *src,
1472 const int8_t *filter_x,
1473 const int8_t *filter_y,
1474 int32_t height, int32_t width)
1476 uint32_t loop_cnt, cnt;
1479 v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
1480 v8i16 filt0, filt1, filt2, filt3;
1481 v8i16 filt_h0, filt_h1, filt_h2, filt_h3;
1482 v16i8 mask1, mask2, mask3;
1483 v8i16 filter_vec, const_vec;
1484 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
1485 v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
1486 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
1487 v4i32 dst0_r, dst0_l;
1488 v8i16 dst10_r, dst32_r, dst54_r, dst76_r;
1489 v8i16 dst10_l, dst32_l, dst54_l, dst76_l;
1490 v16i8 mask0 = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
1492 src -= ((3 * src_stride) + 3);
1493 filter_vec = LD_SH(filter_x);
1494 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1496 filter_vec = LD_SH(filter_y);
1497 UNPCK_R_SB_SH(filter_vec, filter_vec);
1499 SPLATI_W4_SH(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
1505 const_vec = __msa_ldi_h(128);
1508 for (cnt = width >> 3; cnt--;) {
1512 LD_SB7(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6);
1513 src_tmp += (7 * src_stride);
1514 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
1516 /* row 0 row 1 row 2 row 3 */
1517 VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3,
1518 vec0, vec1, vec2, vec3);
1519 VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3,
1520 vec4, vec5, vec6, vec7);
1521 VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3,
1522 vec8, vec9, vec10, vec11);
1523 VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3,
1524 vec12, vec13, vec14, vec15);
1526 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
1527 dst0, dst0, dst0, dst0);
1529 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3,
1530 dst1, dst1, dst1, dst1);
1532 DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3,
1533 dst2, dst2, dst2, dst2);
1535 DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt0, filt1, filt2, filt3,
1536 dst3, dst3, dst3, dst3);
1538 /* row 4 row 5 row 6 */
1539 VSHF_B4_SB(src4, src4, mask0, mask1, mask2, mask3,
1540 vec0, vec1, vec2, vec3);
1541 VSHF_B4_SB(src5, src5, mask0, mask1, mask2, mask3,
1542 vec4, vec5, vec6, vec7);
1543 VSHF_B4_SB(src6, src6, mask0, mask1, mask2, mask3,
1544 vec8, vec9, vec10, vec11);
1546 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
1547 dst4, dst4, dst4, dst4);
1549 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3,
1550 dst5, dst5, dst5, dst5);
1552 DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3,
1553 dst6, dst6, dst6, dst6);
1555 for (loop_cnt = height; loop_cnt--;) {
1556 src7 = LD_SB(src_tmp);
1557 src7 = (v16i8) __msa_xori_b((v16u8) src7, 128);
1558 src_tmp += src_stride;
1560 VSHF_B4_SB(src7, src7, mask0, mask1, mask2, mask3,
1561 vec0, vec1, vec2, vec3);
1563 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3,
1564 dst7, dst7, dst7, dst7);
1566 ILVRL_H2_SH(dst1, dst0, dst10_r, dst10_l);
1567 ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
1568 ILVRL_H2_SH(dst5, dst4, dst54_r, dst54_l);
1569 ILVRL_H2_SH(dst7, dst6, dst76_r, dst76_l);
1570 dst0_r = HEVC_FILT_8TAP(dst10_r, dst32_r, dst54_r, dst76_r,
1571 filt_h0, filt_h1, filt_h2, filt_h3);
1572 dst0_l = HEVC_FILT_8TAP(dst10_l, dst32_l, dst54_l, dst76_l,
1573 filt_h0, filt_h1, filt_h2, filt_h3);
1577 dst0_r = (v4i32) __msa_pckev_h((v8i16) dst0_l, (v8i16) dst0_r);
1578 ST_SW(dst0_r, dst_tmp);
1579 dst_tmp += dst_stride;
1595 static void hevc_hv_8t_8w_msa(uint8_t *src, int32_t src_stride,
1596 int16_t *dst, int32_t dst_stride,
1597 const int8_t *filter_x, const int8_t *filter_y,
1600 hevc_hv_8t_8multx1mult_msa(src, src_stride, dst, dst_stride,
1601 filter_x, filter_y, height, 8);
1604 static void hevc_hv_8t_12w_msa(uint8_t *src, int32_t src_stride,
1605 int16_t *dst, int32_t dst_stride,
1606 const int8_t *filter_x, const int8_t *filter_y,
1610 int32_t dst_stride_in_bytes = 2 * dst_stride;
1613 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
1614 v16i8 mask0, mask1, mask2, mask3, mask4, mask5, mask6, mask7;
1615 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
1616 v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
1617 v8i16 filt0, filt1, filt2, filt3, filt_h0, filt_h1, filt_h2, filt_h3;
1618 v8i16 filter_vec, const_vec;
1619 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
1620 v8i16 dst30, dst41, dst52, dst63, dst66, dst97, dst108;
1621 v8i16 dst10_r, dst32_r, dst54_r, dst76_r, dst98_r, dst21_r, dst43_r;
1622 v8i16 dst65_r, dst87_r, dst109_r, dst10_l, dst32_l, dst54_l, dst76_l;
1623 v4i32 dst0_r, dst0_l, dst1_r, dst2_r, dst3_r;
1625 src -= ((3 * src_stride) + 3);
1626 filter_vec = LD_SH(filter_x);
1627 SPLATI_H4_SH(filter_vec, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
1629 filter_vec = LD_SH(filter_y);
1630 UNPCK_R_SB_SH(filter_vec, filter_vec);
1632 SPLATI_W4_SH(filter_vec, filt_h0, filt_h1, filt_h2, filt_h3);
1634 mask0 = LD_SB(ff_hevc_mask_arr);
1639 const_vec = __msa_ldi_h(128);
1645 LD_SB7(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6);
1646 src_tmp += (7 * src_stride);
1647 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
1649 /* row 0 row 1 row 2 row 3 */
1650 VSHF_B4_SB(src0, src0, mask0, mask1, mask2, mask3, vec0, vec1, vec2, vec3);
1651 VSHF_B4_SB(src1, src1, mask0, mask1, mask2, mask3, vec4, vec5, vec6, vec7);
1652 VSHF_B4_SB(src2, src2, mask0, mask1, mask2, mask3, vec8, vec9, vec10,
1654 VSHF_B4_SB(src3, src3, mask0, mask1, mask2, mask3, vec12, vec13, vec14,
1657 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3, dst0, dst0,
1660 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3, dst1, dst1,
1663 DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3, dst2,
1666 DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt0, filt1, filt2, filt3, dst3,
1669 /* row 4 row 5 row 6 */
1670 VSHF_B4_SB(src4, src4, mask0, mask1, mask2, mask3, vec0, vec1, vec2, vec3);
1671 VSHF_B4_SB(src5, src5, mask0, mask1, mask2, mask3, vec4, vec5, vec6, vec7);
1672 VSHF_B4_SB(src6, src6, mask0, mask1, mask2, mask3, vec8, vec9, vec10,
1675 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3, dst4, dst4,
1678 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3, dst5, dst5,
1681 DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3, dst6,
1684 for (loop_cnt = height; loop_cnt--;) {
1685 src7 = LD_SB(src_tmp);
1686 src7 = (v16i8) __msa_xori_b((v16u8) src7, 128);
1687 src_tmp += src_stride;
1689 VSHF_B4_SB(src7, src7, mask0, mask1, mask2, mask3, vec0, vec1, vec2,
1692 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3, dst7,
1695 ILVRL_H2_SH(dst1, dst0, dst10_r, dst10_l);
1696 ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
1697 ILVRL_H2_SH(dst5, dst4, dst54_r, dst54_l);
1698 ILVRL_H2_SH(dst7, dst6, dst76_r, dst76_l);
1699 dst0_r = HEVC_FILT_8TAP(dst10_r, dst32_r, dst54_r, dst76_r, filt_h0,
1700 filt_h1, filt_h2, filt_h3);
1701 dst0_l = HEVC_FILT_8TAP(dst10_l, dst32_l, dst54_l, dst76_l, filt_h0,
1702 filt_h1, filt_h2, filt_h3);
1706 dst0_r = (v4i32) __msa_pckev_h((v8i16) dst0_l, (v8i16) dst0_r);
1707 ST_SW(dst0_r, dst_tmp);
1708 dst_tmp += dst_stride;
1722 mask4 = LD_SB(ff_hevc_mask_arr + 16);
1727 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
1728 src += (7 * src_stride);
1729 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
1731 VSHF_B4_SB(src0, src3, mask4, mask5, mask6, mask7, vec0, vec1, vec2, vec3);
1732 VSHF_B4_SB(src1, src4, mask4, mask5, mask6, mask7, vec4, vec5, vec6, vec7);
1733 VSHF_B4_SB(src2, src5, mask4, mask5, mask6, mask7, vec8, vec9, vec10,
1735 VSHF_B4_SB(src3, src6, mask4, mask5, mask6, mask7, vec12, vec13, vec14,
1738 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3, dst30,
1739 dst30, dst30, dst30);
1741 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3, dst41,
1742 dst41, dst41, dst41);
1744 DPADD_SB4_SH(vec8, vec9, vec10, vec11, filt0, filt1, filt2, filt3, dst52,
1745 dst52, dst52, dst52);
1747 DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt0, filt1, filt2, filt3, dst63,
1748 dst63, dst63, dst63);
1750 ILVRL_H2_SH(dst41, dst30, dst10_r, dst43_r);
1751 ILVRL_H2_SH(dst52, dst41, dst21_r, dst54_r);
1752 ILVRL_H2_SH(dst63, dst52, dst32_r, dst65_r);
1754 dst66 = (v8i16) __msa_splati_d((v2i64) dst63, 1);
1756 for (loop_cnt = height >> 2; loop_cnt--;) {
1757 LD_SB4(src, src_stride, src7, src8, src9, src10);
1758 src += (4 * src_stride);
1759 XORI_B4_128_SB(src7, src8, src9, src10);
1761 VSHF_B4_SB(src7, src9, mask4, mask5, mask6, mask7, vec0, vec1, vec2,
1763 VSHF_B4_SB(src8, src10, mask4, mask5, mask6, mask7, vec4, vec5, vec6,
1767 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3, dst97,
1768 dst97, dst97, dst97);
1769 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt0, filt1, filt2, filt3, dst108,
1770 dst108, dst108, dst108);
1772 dst76_r = __msa_ilvr_h(dst97, dst66);
1773 ILVRL_H2_SH(dst108, dst97, dst87_r, dst109_r);
1774 dst66 = (v8i16) __msa_splati_d((v2i64) dst97, 1);
1775 dst98_r = __msa_ilvr_h(dst66, dst108);
1777 dst0_r = HEVC_FILT_8TAP(dst10_r, dst32_r, dst54_r, dst76_r, filt_h0,
1778 filt_h1, filt_h2, filt_h3);
1779 dst1_r = HEVC_FILT_8TAP(dst21_r, dst43_r, dst65_r, dst87_r, filt_h0,
1780 filt_h1, filt_h2, filt_h3);
1781 dst2_r = HEVC_FILT_8TAP(dst32_r, dst54_r, dst76_r, dst98_r, filt_h0,
1782 filt_h1, filt_h2, filt_h3);
1783 dst3_r = HEVC_FILT_8TAP(dst43_r, dst65_r, dst87_r, dst109_r, filt_h0,
1784 filt_h1, filt_h2, filt_h3);
1785 SRA_4V(dst0_r, dst1_r, dst2_r, dst3_r, 6);
1786 PCKEV_H2_SW(dst1_r, dst0_r, dst3_r, dst2_r, dst0_r, dst2_r);
1787 ST8x4_UB(dst0_r, dst2_r, dst, dst_stride_in_bytes);
1788 dst += (4 * dst_stride);
1796 dst66 = (v8i16) __msa_splati_d((v2i64) dst108, 1);
1800 static void hevc_hv_8t_16w_msa(uint8_t *src, int32_t src_stride,
1801 int16_t *dst, int32_t dst_stride,
1802 const int8_t *filter_x, const int8_t *filter_y,
1805 hevc_hv_8t_8multx1mult_msa(src, src_stride, dst, dst_stride,
1806 filter_x, filter_y, height, 16);
1809 static void hevc_hv_8t_24w_msa(uint8_t *src, int32_t src_stride,
1810 int16_t *dst, int32_t dst_stride,
1811 const int8_t *filter_x, const int8_t *filter_y,
1814 hevc_hv_8t_8multx1mult_msa(src, src_stride, dst, dst_stride,
1815 filter_x, filter_y, height, 24);
1818 static void hevc_hv_8t_32w_msa(uint8_t *src, int32_t src_stride,
1819 int16_t *dst, int32_t dst_stride,
1820 const int8_t *filter_x, const int8_t *filter_y,
1823 hevc_hv_8t_8multx1mult_msa(src, src_stride, dst, dst_stride,
1824 filter_x, filter_y, height, 32);
1827 static void hevc_hv_8t_48w_msa(uint8_t *src, int32_t src_stride,
1828 int16_t *dst, int32_t dst_stride,
1829 const int8_t *filter_x, const int8_t *filter_y,
1832 hevc_hv_8t_8multx1mult_msa(src, src_stride, dst, dst_stride,
1833 filter_x, filter_y, height, 48);
1836 static void hevc_hv_8t_64w_msa(uint8_t *src, int32_t src_stride,
1837 int16_t *dst, int32_t dst_stride,
1838 const int8_t *filter_x, const int8_t *filter_y,
1841 hevc_hv_8t_8multx1mult_msa(src, src_stride, dst, dst_stride,
1842 filter_x, filter_y, height, 64);
1845 static void hevc_hz_4t_4x2_msa(uint8_t *src,
1849 const int8_t *filter)
1853 v16i8 mask1, vec0, vec1;
1855 v8i16 filter_vec, const_vec;
1856 v16i8 mask0 = LD_SB(ff_hevc_mask_arr + 16);
1860 filter_vec = LD_SH(filter);
1861 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
1865 const_vec = __msa_ldi_h(128);
1868 LD_SB2(src, src_stride, src0, src1);
1869 XORI_B2_128_SB(src0, src1);
1871 VSHF_B2_SB(src0, src1, src0, src1, mask0, mask1, vec0, vec1);
1873 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
1875 ST8x2_UB(dst0, dst, 2 * dst_stride);
1878 static void hevc_hz_4t_4x4_msa(uint8_t *src,
1882 const int8_t *filter)
1885 v16i8 src0, src1, src2, src3;
1886 v16i8 mask1, vec0, vec1;
1888 v8i16 filter_vec, const_vec;
1889 v16i8 mask0 = LD_SB(ff_hevc_mask_arr + 16);
1893 filter_vec = LD_SH(filter);
1894 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
1898 const_vec = __msa_ldi_h(128);
1901 LD_SB4(src, src_stride, src0, src1, src2, src3);
1902 XORI_B4_128_SB(src0, src1, src2, src3);
1904 VSHF_B2_SB(src0, src1, src0, src1, mask0, mask1, vec0, vec1);
1906 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
1908 VSHF_B2_SB(src2, src3, src2, src3, mask0, mask1, vec0, vec1);
1910 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst1, dst1);
1912 ST8x4_UB(dst0, dst1, dst, 2 * dst_stride);
1915 static void hevc_hz_4t_4x8multiple_msa(uint8_t *src,
1919 const int8_t *filter,
1924 v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
1925 v16i8 mask1, vec0, vec1;
1926 v8i16 dst0, dst1, dst2, dst3;
1927 v8i16 filter_vec, const_vec;
1928 v16i8 mask0 = LD_SB(ff_hevc_mask_arr + 16);
1932 filter_vec = LD_SH(filter);
1933 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
1937 const_vec = __msa_ldi_h(128);
1940 for (loop_cnt = (height >> 3); loop_cnt--;) {
1941 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
1942 src += (8 * src_stride);
1944 XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7);
1946 VSHF_B2_SB(src0, src1, src0, src1, mask0, mask1, vec0, vec1);
1948 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
1949 VSHF_B2_SB(src2, src3, src2, src3, mask0, mask1, vec0, vec1);
1951 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst1, dst1);
1952 VSHF_B2_SB(src4, src5, src4, src5, mask0, mask1, vec0, vec1);
1954 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst2, dst2);
1955 VSHF_B2_SB(src6, src7, src6, src7, mask0, mask1, vec0, vec1);
1957 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
1959 ST8x8_UB(dst0, dst1, dst2, dst3, dst, 2 * dst_stride);
1960 dst += (8 * dst_stride);
1964 static void hevc_hz_4t_4w_msa(uint8_t *src,
1968 const int8_t *filter,
1972 hevc_hz_4t_4x2_msa(src, src_stride, dst, dst_stride, filter);
1973 } else if (4 == height) {
1974 hevc_hz_4t_4x4_msa(src, src_stride, dst, dst_stride, filter);
1975 } else if (0 == height % 8) {
1976 hevc_hz_4t_4x8multiple_msa(src, src_stride, dst, dst_stride,
1981 static void hevc_hz_4t_6w_msa(uint8_t *src,
1985 const int8_t *filter,
1989 uint64_t dst_val0, dst_val1, dst_val2, dst_val3;
1990 uint32_t dst_val_int0, dst_val_int1, dst_val_int2, dst_val_int3;
1991 v8i16 filt0, filt1, dst0, dst1, dst2, dst3;
1992 v16i8 src0, src1, src2, src3;
1993 v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
1996 v8i16 filter_vec, const_vec;
2000 filter_vec = LD_SH(filter);
2001 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
2005 const_vec = __msa_ldi_h(128);
2008 for (loop_cnt = 2; loop_cnt--;) {
2009 LD_SB4(src, src_stride, src0, src1, src2, src3);
2010 src += (4 * src_stride);
2012 XORI_B4_128_SB(src0, src1, src2, src3);
2014 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
2016 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
2017 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec0, vec1);
2019 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst1, dst1);
2020 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec0, vec1);
2022 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst2, dst2);
2023 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
2025 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
2027 dst_val0 = __msa_copy_u_d((v2i64) dst0, 0);
2028 dst_val1 = __msa_copy_u_d((v2i64) dst1, 0);
2029 dst_val2 = __msa_copy_u_d((v2i64) dst2, 0);
2030 dst_val3 = __msa_copy_u_d((v2i64) dst3, 0);
2032 dst_val_int0 = __msa_copy_u_w((v4i32) dst0, 2);
2033 dst_val_int1 = __msa_copy_u_w((v4i32) dst1, 2);
2034 dst_val_int2 = __msa_copy_u_w((v4i32) dst2, 2);
2035 dst_val_int3 = __msa_copy_u_w((v4i32) dst3, 2);
2038 SW(dst_val_int0, dst + 4);
2041 SW(dst_val_int1, dst + 4);
2044 SW(dst_val_int2, dst + 4);
2047 SW(dst_val_int3, dst + 4);
2052 static void hevc_hz_4t_8x2multiple_msa(uint8_t *src,
2056 const int8_t *filter,
2060 v8i16 filt0, filt1, dst0, dst1;
2062 v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
2065 v8i16 filter_vec, const_vec;
2069 filter_vec = LD_SH(filter);
2070 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
2074 const_vec = __msa_ldi_h(128);
2077 for (loop_cnt = (height >> 1); loop_cnt--;) {
2078 LD_SB2(src, src_stride, src0, src1);
2079 src += (2 * src_stride);
2081 XORI_B2_128_SB(src0, src1);
2083 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
2085 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
2087 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec0, vec1);
2089 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst1, dst1);
2091 ST_SH2(dst0, dst1, dst, dst_stride);
2092 dst += (2 * dst_stride);
2096 static void hevc_hz_4t_8x4multiple_msa(uint8_t *src,
2100 const int8_t *filter,
2105 v16i8 src0, src1, src2, src3;
2106 v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
2109 v8i16 dst0, dst1, dst2, dst3;
2110 v8i16 filter_vec, const_vec;
2114 filter_vec = LD_SH(filter);
2115 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
2119 const_vec = __msa_ldi_h(128);
2122 for (loop_cnt = (height >> 2); loop_cnt--;) {
2123 LD_SB4(src, src_stride, src0, src1, src2, src3);
2124 src += (4 * src_stride);
2126 XORI_B4_128_SB(src0, src1, src2, src3);
2128 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
2130 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
2132 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec0, vec1);
2134 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst1, dst1);
2136 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec0, vec1);
2138 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst2, dst2);
2140 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
2142 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
2144 ST_SH4(dst0, dst1, dst2, dst3, dst, dst_stride);
2145 dst += (4 * dst_stride);
2149 static void hevc_hz_4t_8w_msa(uint8_t *src,
2153 const int8_t *filter,
2156 if (2 == height || 6 == height) {
2157 hevc_hz_4t_8x2multiple_msa(src, src_stride, dst, dst_stride,
2160 hevc_hz_4t_8x4multiple_msa(src, src_stride, dst, dst_stride,
2165 static void hevc_hz_4t_12w_msa(uint8_t *src,
2169 const int8_t *filter,
2174 v16i8 src0, src1, src2, src3;
2177 v8i16 dst0, dst1, dst2, dst3, dst4, dst5;
2178 v8i16 filter_vec, const_vec;
2180 v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
2182 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28
2187 filter_vec = LD_SH(filter);
2188 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
2193 const_vec = __msa_ldi_h(128);
2196 for (loop_cnt = (height >> 2); loop_cnt--;) {
2197 LD_SB4(src, src_stride, src0, src1, src2, src3);
2198 src += (4 * src_stride);
2199 XORI_B4_128_SB(src0, src1, src2, src3);
2201 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
2203 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
2204 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec0, vec1);
2206 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst1, dst1);
2207 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec0, vec1);
2209 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst2, dst2);
2210 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
2212 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
2213 VSHF_B2_SB(src0, src1, src0, src1, mask2, mask3, vec0, vec1);
2215 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst4, dst4);
2216 VSHF_B2_SB(src2, src3, src2, src3, mask2, mask3, vec0, vec1);
2218 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst5, dst5);
2220 ST_SH4(dst0, dst1, dst2, dst3, dst, dst_stride);
2221 ST8x4_UB(dst4, dst5, dst + 8, 2 * dst_stride);
2222 dst += (4 * dst_stride);
2226 static void hevc_hz_4t_16w_msa(uint8_t *src,
2230 const int8_t *filter,
2234 v16i8 src0, src1, src2, src3;
2235 v16i8 src4, src5, src6, src7;
2237 v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
2239 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
2241 v8i16 filter_vec, const_vec;
2245 filter_vec = LD_SH(filter);
2246 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
2250 const_vec = __msa_ldi_h(128);
2253 for (loop_cnt = (height >> 2); loop_cnt--;) {
2254 LD_SB4(src, src_stride, src0, src2, src4, src6);
2255 LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
2256 src += (4 * src_stride);
2258 XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7);
2260 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
2262 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
2264 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec0, vec1);
2266 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst1, dst1);
2268 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec0, vec1);
2270 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst2, dst2);
2272 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
2274 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
2276 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
2278 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst4, dst4);
2280 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec0, vec1);
2282 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst5, dst5);
2284 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec0, vec1);
2286 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst6, dst6);
2288 VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec0, vec1);
2290 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst7, dst7);
2292 ST_SH4(dst0, dst2, dst4, dst6, dst, dst_stride);
2293 ST_SH4(dst1, dst3, dst5, dst7, dst + 8, dst_stride);
2294 dst += (4 * dst_stride);
2298 static void hevc_hz_4t_24w_msa(uint8_t *src,
2302 const int8_t *filter,
2306 int16_t *dst_tmp = dst + 16;
2307 v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
2309 v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
2310 v16i8 mask1, mask00, mask11;
2312 v8i16 dst0, dst1, dst2, dst3;
2313 v8i16 filter_vec, const_vec;
2317 filter_vec = LD_SH(filter);
2318 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
2322 mask11 = mask0 + 10;
2324 const_vec = __msa_ldi_h(128);
2327 for (loop_cnt = (height >> 2); loop_cnt--;) {
2329 LD_SB4(src, src_stride, src0, src2, src4, src6);
2330 LD_SB4(src + 16, src_stride, src1, src3, src5, src7);
2331 src += (4 * src_stride);
2333 XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7);
2335 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
2337 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
2339 VSHF_B2_SB(src0, src1, src0, src1, mask00, mask11, vec0, vec1);
2341 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst1, dst1);
2343 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec0, vec1);
2345 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst2, dst2);
2347 VSHF_B2_SB(src2, src3, src2, src3, mask00, mask11, vec0, vec1);
2349 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
2351 ST_SH2(dst0, dst1, dst, 8);
2353 ST_SH2(dst2, dst3, dst, 8);
2356 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec0, vec1);
2358 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
2360 VSHF_B2_SB(src4, src5, src4, src5, mask00, mask11, vec0, vec1);
2362 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst1, dst1);
2364 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec0, vec1);
2366 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst2, dst2);
2368 VSHF_B2_SB(src6, src7, src6, src7, mask00, mask11, vec0, vec1);
2370 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
2372 ST_SH2(dst0, dst1, dst, 8);
2374 ST_SH2(dst2, dst3, dst, 8);
2378 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec0, vec1);
2380 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
2382 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
2384 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst1, dst1);
2386 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec0, vec1);
2388 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst2, dst2);
2390 VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec0, vec1);
2392 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
2394 ST_SH4(dst0, dst1, dst2, dst3, dst_tmp, dst_stride);
2395 dst_tmp += (4 * dst_stride);
2399 static void hevc_hz_4t_32w_msa(uint8_t *src,
2403 const int8_t *filter,
2407 v16i8 src0, src1, src2;
2409 v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
2410 v16i8 mask1, mask2, mask3;
2411 v8i16 dst0, dst1, dst2, dst3;
2412 v16i8 vec0, vec1, vec2, vec3;
2413 v8i16 filter_vec, const_vec;
2417 filter_vec = LD_SH(filter);
2418 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
2420 const_vec = __msa_ldi_h(128);
2427 for (loop_cnt = height; loop_cnt--;) {
2428 LD_SB2(src, 16, src0, src1);
2429 src2 = LD_SB(src + 24);
2432 XORI_B3_128_SB(src0, src1, src2);
2438 VSHF_B2_SB(src0, src0, src0, src1, mask0, mask2, vec0, vec1);
2439 VSHF_B2_SB(src1, src1, src2, src2, mask0, mask0, vec2, vec3);
2440 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, dst0,
2442 VSHF_B2_SB(src0, src0, src0, src1, mask1, mask3, vec0, vec1);
2443 VSHF_B2_SB(src1, src1, src2, src2, mask1, mask1, vec2, vec3);
2444 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt1, filt1, filt1, filt1, dst0,
2446 ST_SH4(dst0, dst1, dst2, dst3, dst, 8);
2451 static void hevc_vt_4t_4x2_msa(uint8_t *src,
2455 const int8_t *filter)
2457 v16i8 src0, src1, src2, src3, src4;
2458 v16i8 src10_r, src32_r, src21_r, src43_r;
2459 v16i8 src2110, src4332;
2462 v8i16 filter_vec, const_vec;
2466 const_vec = __msa_ldi_h(128);
2469 filter_vec = LD_SH(filter);
2470 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
2472 LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
2473 ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3,
2474 src10_r, src21_r, src32_r, src43_r);
2476 ILVR_D2_SB(src21_r, src10_r, src43_r, src32_r, src2110, src4332);
2477 XORI_B2_128_SB(src2110, src4332);
2479 DPADD_SB2_SH(src2110, src4332, filt0, filt1, dst10, dst10);
2481 ST8x2_UB(dst10, dst, 2 * dst_stride);
2484 static void hevc_vt_4t_4x4_msa(uint8_t *src,
2488 const int8_t *filter,
2491 v16i8 src0, src1, src2, src3, src4, src5, src6;
2492 v16i8 src10_r, src32_r, src54_r, src21_r, src43_r, src65_r;
2493 v16i8 src2110, src4332, src6554;
2496 v8i16 filter_vec, const_vec;
2500 const_vec = __msa_ldi_h(128);
2503 filter_vec = LD_SH(filter);
2504 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
2506 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
2507 ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3,
2508 src10_r, src21_r, src32_r, src43_r);
2509 ILVR_B2_SB(src5, src4, src6, src5, src54_r, src65_r);
2510 ILVR_D3_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r,
2511 src2110, src4332, src6554);
2512 XORI_B3_128_SB(src2110, src4332, src6554);
2514 DPADD_SB2_SH(src2110, src4332, filt0, filt1, dst10, dst10);
2516 DPADD_SB2_SH(src4332, src6554, filt0, filt1, dst32, dst32);
2518 ST8x4_UB(dst10, dst32, dst, 2 * dst_stride);
2521 static void hevc_vt_4t_4x8_msa(uint8_t *src,
2525 const int8_t *filter,
2528 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
2529 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
2530 v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
2531 v16i8 src2110, src4332, src6554, src8776, src10998;
2532 v8i16 dst10, dst32, dst54, dst76;
2534 v8i16 filter_vec, const_vec;
2537 const_vec = __msa_ldi_h(128);
2540 filter_vec = LD_SH(filter);
2541 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
2543 LD_SB3(src, src_stride, src0, src1, src2);
2544 src += (3 * src_stride);
2546 ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
2547 src2110 = (v16i8) __msa_ilvr_d((v2i64) src21_r, (v2i64) src10_r);
2548 src2110 = (v16i8) __msa_xori_b((v16u8) src2110, 128);
2550 LD_SB8(src, src_stride, src3, src4, src5, src6, src7, src8, src9, src10);
2551 src += (8 * src_stride);
2552 ILVR_B4_SB(src3, src2, src4, src3, src5, src4, src6, src5,
2553 src32_r, src43_r, src54_r, src65_r);
2554 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9,
2555 src76_r, src87_r, src98_r, src109_r);
2556 ILVR_D4_SB(src43_r, src32_r, src65_r, src54_r, src87_r, src76_r, src109_r,
2557 src98_r, src4332, src6554, src8776, src10998);
2558 XORI_B4_128_SB(src4332, src6554, src8776, src10998);
2563 DPADD_SB2_SH(src2110, src4332, filt0, filt1, dst10, dst10);
2564 DPADD_SB2_SH(src4332, src6554, filt0, filt1, dst32, dst32);
2565 DPADD_SB2_SH(src6554, src8776, filt0, filt1, dst54, dst54);
2566 DPADD_SB2_SH(src8776, src10998, filt0, filt1, dst76, dst76);
2567 ST8x8_UB(dst10, dst32, dst54, dst76, dst, 2 * dst_stride);
2568 dst += (8 * dst_stride);
2571 static void hevc_vt_4t_4x16_msa(uint8_t *src, int32_t src_stride,
2572 int16_t *dst, int32_t dst_stride,
2573 const int8_t *filter, int32_t height)
2575 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
2576 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r;
2577 v16i8 src65_r, src87_r, src109_r, src2110, src4332, src6554, src8776;
2579 v8i16 dst10, dst32, dst54, dst76, filt0, filt1, filter_vec, const_vec;
2582 const_vec = __msa_ldi_h(128);
2585 filter_vec = LD_SH(filter);
2586 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
2588 LD_SB3(src, src_stride, src0, src1, src2);
2589 src += (3 * src_stride);
2591 ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
2592 src2110 = (v16i8) __msa_ilvr_d((v2i64) src21_r, (v2i64) src10_r);
2593 src2110 = (v16i8) __msa_xori_b((v16u8) src2110, 128);
2595 LD_SB8(src, src_stride, src3, src4, src5, src6, src7, src8, src9, src10);
2596 src += (8 * src_stride);
2597 ILVR_B4_SB(src3, src2, src4, src3, src5, src4, src6, src5, src32_r, src43_r,
2599 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r,
2600 src87_r, src98_r, src109_r);
2601 ILVR_D4_SB(src43_r, src32_r, src65_r, src54_r, src87_r, src76_r, src109_r,
2602 src98_r, src4332, src6554, src8776, src10998);
2603 XORI_B4_128_SB(src4332, src6554, src8776, src10998);
2609 DPADD_SB2_SH(src2110, src4332, filt0, filt1, dst10, dst10);
2610 DPADD_SB2_SH(src4332, src6554, filt0, filt1, dst32, dst32);
2611 DPADD_SB2_SH(src6554, src8776, filt0, filt1, dst54, dst54);
2612 DPADD_SB2_SH(src8776, src10998, filt0, filt1, dst76, dst76);
2613 ST8x8_UB(dst10, dst32, dst54, dst76, dst, 2 * dst_stride);
2614 dst += (8 * dst_stride);
2619 LD_SB8(src, src_stride, src3, src4, src5, src6, src7, src8, src9, src10);
2620 src += (8 * src_stride);
2622 ILVR_B4_SB(src3, src2, src4, src3, src5, src4, src6, src5, src32_r, src43_r,
2624 ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r,
2625 src87_r, src98_r, src109_r);
2626 ILVR_D4_SB(src43_r, src32_r, src65_r, src54_r, src87_r, src76_r, src109_r,
2627 src98_r, src4332, src6554, src8776, src10998);
2628 XORI_B4_128_SB(src4332, src6554, src8776, src10998);
2634 DPADD_SB2_SH(src2110, src4332, filt0, filt1, dst10, dst10);
2635 DPADD_SB2_SH(src4332, src6554, filt0, filt1, dst32, dst32);
2636 DPADD_SB2_SH(src6554, src8776, filt0, filt1, dst54, dst54);
2637 DPADD_SB2_SH(src8776, src10998, filt0, filt1, dst76, dst76);
2638 ST8x8_UB(dst10, dst32, dst54, dst76, dst, 2 * dst_stride);
2639 dst += (8 * dst_stride);
2642 static void hevc_vt_4t_4w_msa(uint8_t *src,
2646 const int8_t *filter,
2650 hevc_vt_4t_4x2_msa(src, src_stride, dst, dst_stride, filter);
2651 } else if (4 == height) {
2652 hevc_vt_4t_4x4_msa(src, src_stride, dst, dst_stride, filter, height);
2653 } else if (8 == height) {
2654 hevc_vt_4t_4x8_msa(src, src_stride, dst, dst_stride, filter, height);
2655 } else if (16 == height) {
2656 hevc_vt_4t_4x16_msa(src, src_stride, dst, dst_stride, filter, height);
2660 static void hevc_vt_4t_6w_msa(uint8_t *src,
2664 const int8_t *filter,
2668 uint32_t dst_val_int0, dst_val_int1, dst_val_int2, dst_val_int3;
2669 uint64_t dst_val0, dst_val1, dst_val2, dst_val3;
2670 v16i8 src0, src1, src2, src3, src4;
2671 v16i8 src10_r, src32_r, src21_r, src43_r;
2672 v8i16 dst0_r, dst1_r, dst2_r, dst3_r;
2674 v8i16 filter_vec, const_vec;
2677 const_vec = __msa_ldi_h(128);
2680 filter_vec = LD_SH(filter);
2681 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
2683 LD_SB3(src, src_stride, src0, src1, src2);
2684 src += (3 * src_stride);
2685 XORI_B3_128_SB(src0, src1, src2);
2686 ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
2688 for (loop_cnt = (height >> 2); loop_cnt--;) {
2689 LD_SB2(src, src_stride, src3, src4);
2690 src += (2 * src_stride);
2691 XORI_B2_128_SB(src3, src4);
2692 ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
2695 DPADD_SB2_SH(src10_r, src32_r, filt0, filt1, dst0_r, dst0_r);
2697 DPADD_SB2_SH(src21_r, src43_r, filt0, filt1, dst1_r, dst1_r);
2699 LD_SB2(src, src_stride, src1, src2);
2700 src += (2 * src_stride);
2701 XORI_B2_128_SB(src1, src2);
2702 ILVR_B2_SB(src1, src4, src2, src1, src10_r, src21_r);
2705 DPADD_SB2_SH(src32_r, src10_r, filt0, filt1, dst2_r, dst2_r);
2707 DPADD_SB2_SH(src43_r, src21_r, filt0, filt1, dst3_r, dst3_r);
2709 dst_val0 = __msa_copy_u_d((v2i64) dst0_r, 0);
2710 dst_val1 = __msa_copy_u_d((v2i64) dst1_r, 0);
2711 dst_val2 = __msa_copy_u_d((v2i64) dst2_r, 0);
2712 dst_val3 = __msa_copy_u_d((v2i64) dst3_r, 0);
2714 dst_val_int0 = __msa_copy_u_w((v4i32) dst0_r, 2);
2715 dst_val_int1 = __msa_copy_u_w((v4i32) dst1_r, 2);
2716 dst_val_int2 = __msa_copy_u_w((v4i32) dst2_r, 2);
2717 dst_val_int3 = __msa_copy_u_w((v4i32) dst3_r, 2);
2720 SW(dst_val_int0, dst + 4);
2723 SW(dst_val_int1, dst + 4);
2726 SW(dst_val_int2, dst + 4);
2729 SW(dst_val_int3, dst + 4);
2734 static void hevc_vt_4t_8x2_msa(uint8_t *src,
2738 const int8_t *filter)
2740 v16i8 src0, src1, src2, src3, src4;
2741 v16i8 src10_r, src32_r, src21_r, src43_r;
2742 v8i16 dst0_r, dst1_r;
2744 v8i16 filter_vec, const_vec;
2747 const_vec = __msa_ldi_h(128);
2750 filter_vec = LD_SH(filter);
2751 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
2753 LD_SB3(src, src_stride, src0, src1, src2);
2754 src += (3 * src_stride);
2755 XORI_B3_128_SB(src0, src1, src2);
2756 ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
2758 LD_SB2(src, src_stride, src3, src4);
2759 XORI_B2_128_SB(src3, src4);
2760 ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
2762 DPADD_SB2_SH(src10_r, src32_r, filt0, filt1, dst0_r, dst0_r);
2764 DPADD_SB2_SH(src21_r, src43_r, filt0, filt1, dst1_r, dst1_r);
2766 ST_SH2(dst0_r, dst1_r, dst, dst_stride);
2769 static void hevc_vt_4t_8x6_msa(uint8_t *src,
2773 const int8_t *filter)
2775 v16i8 src0, src1, src2, src3, src4;
2776 v16i8 src10_r, src32_r, src21_r, src43_r;
2777 v8i16 dst0_r, dst1_r;
2779 v8i16 filter_vec, const_vec;
2782 const_vec = __msa_ldi_h(128);
2785 filter_vec = LD_SH(filter);
2786 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
2788 LD_SB3(src, src_stride, src0, src1, src2);
2789 src += (3 * src_stride);
2790 XORI_B3_128_SB(src0, src1, src2);
2791 ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
2793 LD_SB2(src, src_stride, src3, src4);
2794 src += (2 * src_stride);
2795 XORI_B2_128_SB(src3, src4);
2797 ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
2799 DPADD_SB2_SH(src10_r, src32_r, filt0, filt1, dst0_r, dst0_r);
2801 DPADD_SB2_SH(src21_r, src43_r, filt0, filt1, dst1_r, dst1_r);
2803 ST_SH2(dst0_r, dst1_r, dst, dst_stride);
2804 dst += (2 * dst_stride);
2806 LD_SB2(src, src_stride, src1, src2);
2807 src += (2 * src_stride);
2808 XORI_B2_128_SB(src1, src2);
2810 ILVR_B2_SB(src1, src4, src2, src1, src10_r, src21_r);
2812 DPADD_SB2_SH(src32_r, src10_r, filt0, filt1, dst0_r, dst0_r);
2814 DPADD_SB2_SH(src43_r, src21_r, filt0, filt1, dst1_r, dst1_r);
2816 ST_SH2(dst0_r, dst1_r, dst, dst_stride);
2817 dst += (2 * dst_stride);
2819 LD_SB2(src, src_stride, src3, src4);
2820 XORI_B2_128_SB(src3, src4);
2822 ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
2824 DPADD_SB2_SH(src10_r, src32_r, filt0, filt1, dst0_r, dst0_r);
2826 DPADD_SB2_SH(src21_r, src43_r, filt0, filt1, dst1_r, dst1_r);
2828 ST_SH2(dst0_r, dst1_r, dst, dst_stride);
2831 static void hevc_vt_4t_8x4multiple_msa(uint8_t *src,
2835 const int8_t *filter,
2839 v16i8 src0, src1, src2, src3, src4, src5, src6;
2840 v16i8 src10_r, src32_r, src21_r, src43_r, src54_r, src65_r;
2841 v8i16 dst0_r, dst1_r, dst2_r, dst3_r;
2843 v8i16 filter_vec, const_vec;
2846 const_vec = __msa_ldi_h(128);
2849 filter_vec = LD_SH(filter);
2850 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
2852 LD_SB3(src, src_stride, src0, src1, src2);
2853 src += (3 * src_stride);
2854 XORI_B3_128_SB(src0, src1, src2);
2855 ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
2857 for (loop_cnt = (height >> 2); loop_cnt--;) {
2858 LD_SB4(src, src_stride, src3, src4, src5, src6);
2859 src += (4 * src_stride);
2860 XORI_B4_128_SB(src3, src4, src5, src6);
2861 ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
2862 ILVR_B2_SB(src5, src4, src6, src5, src54_r, src65_r);
2867 DPADD_SB2_SH(src10_r, src32_r, filt0, filt1, dst0_r, dst0_r);
2868 DPADD_SB2_SH(src21_r, src43_r, filt0, filt1, dst1_r, dst1_r);
2869 DPADD_SB2_SH(src32_r, src54_r, filt0, filt1, dst2_r, dst2_r);
2870 DPADD_SB2_SH(src43_r, src65_r, filt0, filt1, dst3_r, dst3_r);
2871 ST_SH4(dst0_r, dst1_r, dst2_r, dst3_r, dst, dst_stride);
2872 dst += (4 * dst_stride);
2880 static void hevc_vt_4t_8w_msa(uint8_t *src,
2884 const int8_t *filter,
2888 hevc_vt_4t_8x2_msa(src, src_stride, dst, dst_stride, filter);
2889 } else if (6 == height) {
2890 hevc_vt_4t_8x6_msa(src, src_stride, dst, dst_stride, filter);
2892 hevc_vt_4t_8x4multiple_msa(src, src_stride, dst, dst_stride,
2897 static void hevc_vt_4t_12w_msa(uint8_t *src,
2901 const int8_t *filter,
2905 v16i8 src0, src1, src2, src3, src4, src5, src6;
2906 v16i8 src10_r, src32_r, src21_r, src43_r;
2907 v8i16 dst0_r, dst1_r, dst2_r, dst3_r;
2908 v16i8 src10_l, src32_l, src54_l, src21_l, src43_l, src65_l;
2909 v16i8 src2110, src4332;
2910 v16i8 src54_r, src65_r, src6554;
2911 v8i16 dst0_l, dst1_l;
2913 v8i16 filter_vec, const_vec;
2915 src -= (1 * src_stride);
2916 const_vec = __msa_ldi_h(128);
2919 filter_vec = LD_SH(filter);
2920 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
2922 LD_SB3(src, src_stride, src0, src1, src2);
2923 src += (3 * src_stride);
2924 XORI_B3_128_SB(src0, src1, src2);
2925 ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
2926 ILVL_B2_SB(src1, src0, src2, src1, src10_l, src21_l);
2927 src2110 = (v16i8) __msa_ilvr_d((v2i64) src21_l, (v2i64) src10_l);
2929 for (loop_cnt = 4; loop_cnt--;) {
2930 LD_SB2(src, src_stride, src3, src4);
2931 src += (2 * src_stride);
2932 LD_SB2(src, src_stride, src5, src6);
2933 src += (2 * src_stride);
2934 XORI_B2_128_SB(src3, src4);
2935 XORI_B2_128_SB(src5, src6);
2937 ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
2938 ILVL_B2_SB(src3, src2, src4, src3, src32_l, src43_l);
2939 src4332 = (v16i8) __msa_ilvr_d((v2i64) src43_l, (v2i64) src32_l);
2940 ILVR_B2_SB(src5, src4, src6, src5, src54_r, src65_r);
2941 ILVL_B2_SB(src5, src4, src6, src5, src54_l, src65_l);
2942 src6554 = (v16i8) __msa_ilvr_d((v2i64) src65_l, (v2i64) src54_l);
2945 DPADD_SB2_SH(src10_r, src32_r, filt0, filt1, dst0_r, dst0_r);
2947 DPADD_SB2_SH(src21_r, src43_r, filt0, filt1, dst1_r, dst1_r);
2949 DPADD_SB2_SH(src32_r, src54_r, filt0, filt1, dst2_r, dst2_r);
2951 DPADD_SB2_SH(src43_r, src65_r, filt0, filt1, dst3_r, dst3_r);
2953 DPADD_SB2_SH(src2110, src4332, filt0, filt1, dst0_l, dst0_l);
2955 DPADD_SB2_SH(src4332, src6554, filt0, filt1, dst1_l, dst1_l);
2957 ST_SH4(dst0_r, dst1_r, dst2_r, dst3_r, dst, dst_stride);
2958 ST8x4_UB(dst0_l, dst1_l, dst + 8, (2 * dst_stride));
2959 dst += (4 * dst_stride);
2968 static void hevc_vt_4t_16w_msa(uint8_t *src,
2972 const int8_t *filter,
2976 v16i8 src0, src1, src2, src3, src4, src5;
2977 v16i8 src10_r, src32_r, src21_r, src43_r;
2978 v16i8 src10_l, src32_l, src21_l, src43_l;
2979 v8i16 dst0_r, dst1_r, dst0_l, dst1_l;
2981 v8i16 filter_vec, const_vec;
2984 const_vec = __msa_ldi_h(128);
2987 filter_vec = LD_SH(filter);
2988 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
2990 LD_SB3(src, src_stride, src0, src1, src2);
2991 src += (3 * src_stride);
2992 XORI_B3_128_SB(src0, src1, src2);
2993 ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
2994 ILVL_B2_SB(src1, src0, src2, src1, src10_l, src21_l);
2996 for (loop_cnt = (height >> 2); loop_cnt--;) {
2997 LD_SB2(src, src_stride, src3, src4);
2998 src += (2 * src_stride);
2999 XORI_B2_128_SB(src3, src4);
3000 ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
3001 ILVL_B2_SB(src3, src2, src4, src3, src32_l, src43_l);
3003 DPADD_SB2_SH(src10_r, src32_r, filt0, filt1, dst0_r, dst0_r);
3005 DPADD_SB2_SH(src10_l, src32_l, filt0, filt1, dst0_l, dst0_l);
3007 DPADD_SB2_SH(src21_r, src43_r, filt0, filt1, dst1_r, dst1_r);
3009 DPADD_SB2_SH(src21_l, src43_l, filt0, filt1, dst1_l, dst1_l);
3010 ST_SH2(dst0_r, dst0_l, dst, 8);
3012 ST_SH2(dst1_r, dst1_l, dst, 8);
3015 LD_SB2(src, src_stride, src5, src2);
3016 src += (2 * src_stride);
3017 XORI_B2_128_SB(src5, src2);
3018 ILVR_B2_SB(src5, src4, src2, src5, src10_r, src21_r);
3019 ILVL_B2_SB(src5, src4, src2, src5, src10_l, src21_l);
3021 DPADD_SB2_SH(src32_r, src10_r, filt0, filt1, dst0_r, dst0_r);
3023 DPADD_SB2_SH(src32_l, src10_l, filt0, filt1, dst0_l, dst0_l);
3025 DPADD_SB2_SH(src43_r, src21_r, filt0, filt1, dst1_r, dst1_r);
3027 DPADD_SB2_SH(src43_l, src21_l, filt0, filt1, dst1_l, dst1_l);
3028 ST_SH2(dst0_r, dst0_l, dst, 8);
3030 ST_SH2(dst1_r, dst1_l, dst, 8);
3035 static void hevc_vt_4t_24w_msa(uint8_t *src,
3039 const int8_t *filter,
3043 v16i8 src0, src1, src2, src3, src4, src5;
3044 v16i8 src6, src7, src8, src9, src10, src11;
3045 v16i8 src10_r, src32_r, src76_r, src98_r;
3046 v16i8 src21_r, src43_r, src87_r, src109_r;
3047 v8i16 dst0_r, dst1_r, dst2_r, dst3_r;
3048 v16i8 src10_l, src32_l, src21_l, src43_l;
3049 v8i16 dst0_l, dst1_l;
3051 v8i16 filter_vec, const_vec;
3054 const_vec = __msa_ldi_h(128);
3057 filter_vec = LD_SH(filter);
3058 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
3060 LD_SB3(src, src_stride, src0, src1, src2);
3061 XORI_B3_128_SB(src0, src1, src2);
3062 ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
3063 ILVL_B2_SB(src1, src0, src2, src1, src10_l, src21_l);
3065 LD_SB3(src + 16, src_stride, src6, src7, src8);
3066 src += (3 * src_stride);
3067 XORI_B3_128_SB(src6, src7, src8);
3068 ILVR_B2_SB(src7, src6, src8, src7, src76_r, src87_r);
3070 for (loop_cnt = (height >> 2); loop_cnt--;) {
3071 LD_SB2(src, src_stride, src3, src4);
3072 XORI_B2_128_SB(src3, src4);
3073 ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
3074 ILVL_B2_SB(src3, src2, src4, src3, src32_l, src43_l);
3076 LD_SB2(src + 16, src_stride, src9, src10);
3077 src += (2 * src_stride);
3078 XORI_B2_128_SB(src9, src10);
3079 ILVR_B2_SB(src9, src8, src10, src9, src98_r, src109_r);
3082 DPADD_SB2_SH(src10_r, src32_r, filt0, filt1, dst0_r, dst0_r);
3084 DPADD_SB2_SH(src10_l, src32_l, filt0, filt1, dst0_l, dst0_l);
3086 DPADD_SB2_SH(src21_r, src43_r, filt0, filt1, dst1_r, dst1_r);
3088 DPADD_SB2_SH(src21_l, src43_l, filt0, filt1, dst1_l, dst1_l);
3090 DPADD_SB2_SH(src76_r, src98_r, filt0, filt1, dst2_r, dst2_r);
3092 DPADD_SB2_SH(src87_r, src109_r, filt0, filt1, dst3_r, dst3_r);
3094 ST_SH2(dst0_r, dst0_l, dst, 8);
3095 ST_SH(dst2_r, dst + 16);
3097 ST_SH2(dst1_r, dst1_l, dst, 8);
3098 ST_SH(dst3_r, dst + 16);
3101 LD_SB2(src, src_stride, src5, src2);
3102 XORI_B2_128_SB(src5, src2);
3103 ILVR_B2_SB(src5, src4, src2, src5, src10_r, src21_r);
3104 ILVL_B2_SB(src5, src4, src2, src5, src10_l, src21_l);
3106 LD_SB2(src + 16, src_stride, src11, src8);
3107 src += (2 * src_stride);
3108 XORI_B2_128_SB(src11, src8);
3109 ILVR_B2_SB(src11, src10, src8, src11, src76_r, src87_r);
3112 DPADD_SB2_SH(src32_r, src10_r, filt0, filt1, dst0_r, dst0_r);
3114 DPADD_SB2_SH(src32_l, src10_l, filt0, filt1, dst0_l, dst0_l);
3116 DPADD_SB2_SH(src43_r, src21_r, filt0, filt1, dst1_r, dst1_r);
3118 DPADD_SB2_SH(src43_l, src21_l, filt0, filt1, dst1_l, dst1_l);
3120 DPADD_SB2_SH(src98_r, src76_r, filt0, filt1, dst2_r, dst2_r);
3122 DPADD_SB2_SH(src109_r, src87_r, filt0, filt1, dst3_r, dst3_r);
3124 ST_SH2(dst0_r, dst0_l, dst, 8);
3125 ST_SH(dst2_r, dst + 16);
3127 ST_SH2(dst1_r, dst1_l, dst, 8);
3128 ST_SH(dst3_r, dst + 16);
3133 static void hevc_vt_4t_32w_msa(uint8_t *src,
3137 const int8_t *filter,
3141 v16i8 src0, src1, src2, src3, src4, src5;
3142 v16i8 src6, src7, src8, src9, src10, src11;
3143 v16i8 src10_r, src32_r, src76_r, src98_r;
3144 v16i8 src21_r, src43_r, src87_r, src109_r;
3145 v8i16 dst0_r, dst1_r, dst2_r, dst3_r;
3146 v16i8 src10_l, src32_l, src76_l, src98_l;
3147 v16i8 src21_l, src43_l, src87_l, src109_l;
3148 v8i16 dst0_l, dst1_l, dst2_l, dst3_l;
3150 v8i16 filter_vec, const_vec;
3153 const_vec = __msa_ldi_h(128);
3156 filter_vec = LD_SH(filter);
3157 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
3159 LD_SB3(src, src_stride, src0, src1, src2);
3160 XORI_B3_128_SB(src0, src1, src2);
3161 ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r);
3162 ILVL_B2_SB(src1, src0, src2, src1, src10_l, src21_l);
3164 LD_SB3(src + 16, src_stride, src6, src7, src8);
3165 src += (3 * src_stride);
3166 XORI_B3_128_SB(src6, src7, src8);
3167 ILVR_B2_SB(src7, src6, src8, src7, src76_r, src87_r);
3168 ILVL_B2_SB(src7, src6, src8, src7, src76_l, src87_l);
3170 for (loop_cnt = (height >> 2); loop_cnt--;) {
3171 LD_SB2(src, src_stride, src3, src4);
3172 XORI_B2_128_SB(src3, src4);
3173 ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r);
3174 ILVL_B2_SB(src3, src2, src4, src3, src32_l, src43_l);
3176 LD_SB2(src + 16, src_stride, src9, src10);
3177 src += (2 * src_stride);
3178 XORI_B2_128_SB(src9, src10);
3179 ILVR_B2_SB(src9, src8, src10, src9, src98_r, src109_r);
3180 ILVL_B2_SB(src9, src8, src10, src9, src98_l, src109_l);
3183 DPADD_SB2_SH(src10_r, src32_r, filt0, filt1, dst0_r, dst0_r);
3185 DPADD_SB2_SH(src10_l, src32_l, filt0, filt1, dst0_l, dst0_l);
3187 DPADD_SB2_SH(src21_r, src43_r, filt0, filt1, dst1_r, dst1_r);
3189 DPADD_SB2_SH(src21_l, src43_l, filt0, filt1, dst1_l, dst1_l);
3191 DPADD_SB2_SH(src76_r, src98_r, filt0, filt1, dst2_r, dst2_r);
3193 DPADD_SB2_SH(src76_l, src98_l, filt0, filt1, dst2_l, dst2_l);
3195 DPADD_SB2_SH(src87_r, src109_r, filt0, filt1, dst3_r, dst3_r);
3197 DPADD_SB2_SH(src87_l, src109_l, filt0, filt1, dst3_l, dst3_l);
3199 ST_SH4(dst0_r, dst0_l, dst2_r, dst2_l, dst, 8);
3201 ST_SH4(dst1_r, dst1_l, dst3_r, dst3_l, dst, 8);
3204 LD_SB2(src, src_stride, src5, src2);
3205 XORI_B2_128_SB(src5, src2);
3206 ILVR_B2_SB(src5, src4, src2, src5, src10_r, src21_r);
3207 ILVL_B2_SB(src5, src4, src2, src5, src10_l, src21_l);
3209 LD_SB2(src + 16, src_stride, src11, src8);
3210 src += (2 * src_stride);
3211 XORI_B2_128_SB(src11, src8);
3212 ILVR_B2_SB(src11, src10, src8, src11, src76_r, src87_r);
3213 ILVL_B2_SB(src11, src10, src8, src11, src76_l, src87_l);
3216 DPADD_SB2_SH(src32_r, src10_r, filt0, filt1, dst0_r, dst0_r);
3218 DPADD_SB2_SH(src32_l, src10_l, filt0, filt1, dst0_l, dst0_l);
3220 DPADD_SB2_SH(src43_r, src21_r, filt0, filt1, dst1_r, dst1_r);
3222 DPADD_SB2_SH(src43_l, src21_l, filt0, filt1, dst1_l, dst1_l);
3224 DPADD_SB2_SH(src98_r, src76_r, filt0, filt1, dst2_r, dst2_r);
3226 DPADD_SB2_SH(src98_l, src76_l, filt0, filt1, dst2_l, dst2_l);
3228 DPADD_SB2_SH(src109_r, src87_r, filt0, filt1, dst3_r, dst3_r);
3230 DPADD_SB2_SH(src109_l, src87_l, filt0, filt1, dst3_l, dst3_l);
3232 ST_SH4(dst0_r, dst0_l, dst2_r, dst2_l, dst, 8);
3234 ST_SH4(dst1_r, dst1_l, dst3_r, dst3_l, dst, 8);
3239 static void hevc_hv_4t_4x2_msa(uint8_t *src,
3243 const int8_t *filter_x,
3244 const int8_t *filter_y)
3246 int32_t dst_stride_in_bytes = 2 * dst_stride;
3247 v16i8 src0, src1, src2, src3, src4;
3249 v8i16 filt_h0, filt_h1;
3250 v16i8 mask0 = LD_SB(ff_hevc_mask_arr + 16);
3252 v8i16 filter_vec, const_vec;
3253 v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
3254 v8i16 dst20, dst31, dst42, dst10, dst32, dst21, dst43;
3257 src -= (src_stride + 1);
3258 filter_vec = LD_SH(filter_x);
3259 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
3261 filter_vec = LD_SH(filter_y);
3262 UNPCK_R_SB_SH(filter_vec, filter_vec);
3264 SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
3268 const_vec = __msa_ldi_h(128);
3271 LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
3272 XORI_B5_128_SB(src0, src1, src2, src3, src4);
3273 VSHF_B2_SB(src0, src2, src0, src2, mask0, mask1, vec0, vec1);
3274 VSHF_B2_SB(src1, src3, src1, src3, mask0, mask1, vec2, vec3);
3275 VSHF_B2_SB(src2, src4, src2, src4, mask0, mask1, vec4, vec5);
3280 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst20, dst20);
3281 DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst31, dst31);
3282 DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst42, dst42);
3283 ILVRL_H2_SH(dst31, dst20, dst10, dst32);
3284 ILVRL_H2_SH(dst42, dst31, dst21, dst43);
3286 dst0 = HEVC_FILT_4TAP(dst10, dst32, filt_h0, filt_h1);
3287 dst1 = HEVC_FILT_4TAP(dst21, dst43, filt_h0, filt_h1);
3290 dst0 = (v4i32) __msa_pckev_h((v8i16) dst1, (v8i16) dst0);
3291 ST8x2_UB(dst0, dst, dst_stride_in_bytes);
3294 static void hevc_hv_4t_4x4_msa(uint8_t *src,
3298 const int8_t *filter_x,
3299 const int8_t *filter_y)
3301 int32_t dst_stride_in_bytes = 2 * dst_stride;
3302 v16i8 src0, src1, src2, src3, src4, src5, src6;
3304 v8i16 filt_h0, filt_h1;
3305 v16i8 mask0 = LD_SB(ff_hevc_mask_arr + 16);
3307 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
3308 v8i16 filter_vec, const_vec;
3309 v8i16 dst30, dst41, dst52, dst63, dst10, dst32, dst54, dst21, dst43, dst65;
3310 v4i32 dst0, dst1, dst2, dst3;
3312 src -= (src_stride + 1);
3314 filter_vec = LD_SH(filter_x);
3315 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
3317 filter_vec = LD_SH(filter_y);
3318 UNPCK_R_SB_SH(filter_vec, filter_vec);
3320 SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
3324 const_vec = __msa_ldi_h(128);
3327 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
3328 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
3330 VSHF_B2_SB(src0, src3, src0, src3, mask0, mask1, vec0, vec1);
3331 VSHF_B2_SB(src1, src4, src1, src4, mask0, mask1, vec2, vec3);
3332 VSHF_B2_SB(src2, src5, src2, src5, mask0, mask1, vec4, vec5);
3333 VSHF_B2_SB(src3, src6, src3, src6, mask0, mask1, vec6, vec7);
3339 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst30, dst30);
3340 DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst41, dst41);
3341 DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst52, dst52);
3342 DPADD_SB2_SH(vec6, vec7, filt0, filt1, dst63, dst63);
3344 ILVRL_H2_SH(dst41, dst30, dst10, dst43);
3345 ILVRL_H2_SH(dst52, dst41, dst21, dst54);
3346 ILVRL_H2_SH(dst63, dst52, dst32, dst65);
3348 dst0 = HEVC_FILT_4TAP(dst10, dst32, filt_h0, filt_h1);
3349 dst1 = HEVC_FILT_4TAP(dst21, dst43, filt_h0, filt_h1);
3350 dst2 = HEVC_FILT_4TAP(dst32, dst54, filt_h0, filt_h1);
3351 dst3 = HEVC_FILT_4TAP(dst43, dst65, filt_h0, filt_h1);
3352 SRA_4V(dst0, dst1, dst2, dst3, 6);
3353 PCKEV_H2_SW(dst1, dst0, dst3, dst2, dst0, dst2);
3354 ST8x4_UB(dst0, dst2, dst, dst_stride_in_bytes);
3358 static void hevc_hv_4t_4multx8mult_msa(uint8_t *src,
3362 const int8_t *filter_x,
3363 const int8_t *filter_y,
3367 v16i8 src0, src1, src2, src3, src4, src5, src6;
3368 v16i8 src7, src8, src9, src10;
3370 v8i16 filt_h0, filt_h1;
3371 v16i8 mask0 = LD_SB(ff_hevc_mask_arr + 16);
3373 v8i16 filter_vec, const_vec;
3374 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
3375 v8i16 dst10, dst21, dst22, dst73, dst84, dst95, dst106;
3376 v8i16 dst10_r, dst32_r, dst54_r, dst76_r, dst98_r;
3377 v8i16 dst21_r, dst43_r, dst65_r, dst87_r, dst109_r;
3378 v4i32 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
3380 src -= (src_stride + 1);
3381 filter_vec = LD_SH(filter_x);
3382 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
3384 filter_vec = LD_SH(filter_y);
3385 UNPCK_R_SB_SH(filter_vec, filter_vec);
3387 SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
3391 const_vec = __msa_ldi_h(128);
3394 LD_SB3(src, src_stride, src0, src1, src2);
3395 src += (3 * src_stride);
3396 XORI_B3_128_SB(src0, src1, src2);
3397 VSHF_B2_SB(src0, src1, src0, src1, mask0, mask1, vec0, vec1);
3398 VSHF_B2_SB(src1, src2, src1, src2, mask0, mask1, vec2, vec3);
3400 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst10, dst10);
3402 DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst21, dst21);
3403 ILVRL_H2_SH(dst21, dst10, dst10_r, dst21_r);
3404 dst22 = (v8i16) __msa_splati_d((v2i64) dst21, 1);
3406 for (loop_cnt = height >> 3; loop_cnt--;) {
3407 LD_SB8(src, src_stride,
3408 src3, src4, src5, src6, src7, src8, src9, src10);
3409 src += (8 * src_stride);
3410 XORI_B8_128_SB(src3, src4, src5, src6, src7, src8, src9, src10);
3412 VSHF_B2_SB(src3, src7, src3, src7, mask0, mask1, vec0, vec1);
3413 VSHF_B2_SB(src4, src8, src4, src8, mask0, mask1, vec2, vec3);
3414 VSHF_B2_SB(src5, src9, src5, src9, mask0, mask1, vec4, vec5);
3415 VSHF_B2_SB(src6, src10, src6, src10, mask0, mask1, vec6, vec7);
3421 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst73, dst73);
3422 DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst84, dst84);
3423 DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst95, dst95);
3424 DPADD_SB2_SH(vec6, vec7, filt0, filt1, dst106, dst106);
3426 dst32_r = __msa_ilvr_h(dst73, dst22);
3427 ILVRL_H2_SH(dst84, dst73, dst43_r, dst87_r);
3428 ILVRL_H2_SH(dst95, dst84, dst54_r, dst98_r);
3429 ILVRL_H2_SH(dst106, dst95, dst65_r, dst109_r);
3430 dst22 = (v8i16) __msa_splati_d((v2i64) dst73, 1);
3431 dst76_r = __msa_ilvr_h(dst22, dst106);
3433 dst0 = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
3434 dst1 = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
3435 dst2 = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
3436 dst3 = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
3437 dst4 = HEVC_FILT_4TAP(dst54_r, dst76_r, filt_h0, filt_h1);
3438 dst5 = HEVC_FILT_4TAP(dst65_r, dst87_r, filt_h0, filt_h1);
3439 dst6 = HEVC_FILT_4TAP(dst76_r, dst98_r, filt_h0, filt_h1);
3440 dst7 = HEVC_FILT_4TAP(dst87_r, dst109_r, filt_h0, filt_h1);
3441 SRA_4V(dst0, dst1, dst2, dst3, 6);
3442 SRA_4V(dst4, dst5, dst6, dst7, 6);
3443 PCKEV_H4_SW(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6,
3444 dst0, dst1, dst2, dst3);
3445 ST8x8_UB(dst0, dst1, dst2, dst3, dst, 2 * dst_stride);
3446 dst += (8 * dst_stride);
3450 dst22 = (v8i16) __msa_splati_d((v2i64) dst106, 1);
3454 static void hevc_hv_4t_4w_msa(uint8_t *src,
3458 const int8_t *filter_x,
3459 const int8_t *filter_y,
3463 hevc_hv_4t_4x2_msa(src, src_stride, dst, dst_stride,
3464 filter_x, filter_y);
3465 } else if (4 == height) {
3466 hevc_hv_4t_4x4_msa(src, src_stride, dst, dst_stride,
3467 filter_x, filter_y);
3468 } else if (0 == (height % 8)) {
3469 hevc_hv_4t_4multx8mult_msa(src, src_stride, dst, dst_stride,
3470 filter_x, filter_y, height);
3474 static void hevc_hv_4t_6w_msa(uint8_t *src,
3478 const int8_t *filter_x,
3479 const int8_t *filter_y,
3482 int32_t dst_stride_in_bytes = 2 * dst_stride;
3483 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
3485 v8i16 filt_h0, filt_h1;
3486 v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
3488 v8i16 filter_vec, const_vec;
3489 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
3490 v8i16 dsth0, dsth1, dsth2, dsth3, dsth4, dsth5, dsth6, dsth7, dsth8, dsth9;
3491 v8i16 dsth10, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
3492 v8i16 dst10_r, dst32_r, dst54_r, dst76_r, dst98_r, dst21_r, dst43_r;
3493 v8i16 dst65_r, dst87_r, dst109_r, dst10_l, dst32_l, dst54_l, dst76_l;
3494 v8i16 dst98_l, dst21_l, dst43_l, dst65_l, dst87_l, dst109_l;
3495 v8i16 dst1021_l, dst3243_l, dst5465_l, dst7687_l, dst98109_l;
3496 v4i32 dst0_r, dst1_r, dst2_r, dst3_r, dst4_r, dst5_r, dst6_r, dst7_r;
3497 v4i32 dst0_l, dst1_l, dst2_l, dst3_l;
3499 src -= (src_stride + 1);
3500 filter_vec = LD_SH(filter_x);
3501 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
3503 filter_vec = LD_SH(filter_y);
3504 UNPCK_R_SB_SH(filter_vec, filter_vec);
3506 SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
3510 const_vec = __msa_ldi_h(128);
3513 LD_SB3(src, src_stride, src0, src1, src2);
3514 src += (3 * src_stride);
3515 XORI_B3_128_SB(src0, src1, src2);
3517 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
3518 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
3519 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
3524 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dsth0, dsth0);
3525 DPADD_SB2_SH(vec2, vec3, filt0, filt1, dsth1, dsth1);
3526 DPADD_SB2_SH(vec4, vec5, filt0, filt1, dsth2, dsth2);
3528 ILVRL_H2_SH(dsth1, dsth0, dst10_r, dst10_l);
3529 ILVRL_H2_SH(dsth2, dsth1, dst21_r, dst21_l);
3531 LD_SB8(src, src_stride, src3, src4, src5, src6, src7, src8, src9, src10);
3532 XORI_B8_128_SB(src3, src4, src5, src6, src7, src8, src9, src10);
3534 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
3535 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
3536 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
3537 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
3543 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dsth3, dsth3);
3544 DPADD_SB2_SH(vec2, vec3, filt0, filt1, dsth4, dsth4);
3545 DPADD_SB2_SH(vec4, vec5, filt0, filt1, dsth5, dsth5);
3546 DPADD_SB2_SH(vec6, vec7, filt0, filt1, dsth6, dsth6);
3548 VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec0, vec1);
3549 VSHF_B2_SB(src8, src8, src8, src8, mask0, mask1, vec2, vec3);
3550 VSHF_B2_SB(src9, src9, src9, src9, mask0, mask1, vec4, vec5);
3551 VSHF_B2_SB(src10, src10, src10, src10, mask0, mask1, vec6, vec7);
3557 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dsth7, dsth7);
3558 DPADD_SB2_SH(vec2, vec3, filt0, filt1, dsth8, dsth8);
3559 DPADD_SB2_SH(vec4, vec5, filt0, filt1, dsth9, dsth9);
3560 DPADD_SB2_SH(vec6, vec7, filt0, filt1, dsth10, dsth10);
3562 ILVRL_H2_SH(dsth3, dsth2, dst32_r, dst32_l);
3563 ILVRL_H2_SH(dsth4, dsth3, dst43_r, dst43_l);
3564 ILVRL_H2_SH(dsth5, dsth4, dst54_r, dst54_l);
3565 ILVRL_H2_SH(dsth6, dsth5, dst65_r, dst65_l);
3566 ILVRL_H2_SH(dsth7, dsth6, dst76_r, dst76_l);
3567 ILVRL_H2_SH(dsth8, dsth7, dst87_r, dst87_l);
3568 ILVRL_H2_SH(dsth9, dsth8, dst98_r, dst98_l);
3569 ILVRL_H2_SH(dsth10, dsth9, dst109_r, dst109_l);
3571 PCKEV_D2_SH(dst21_l, dst10_l, dst43_l, dst32_l, dst1021_l, dst3243_l);
3572 PCKEV_D2_SH(dst65_l, dst54_l, dst87_l, dst76_l, dst5465_l, dst7687_l);
3573 dst98109_l = (v8i16) __msa_pckev_d((v2i64) dst109_l, (v2i64) dst98_l);
3575 dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
3576 dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
3577 dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
3578 dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
3579 dst4_r = HEVC_FILT_4TAP(dst54_r, dst76_r, filt_h0, filt_h1);
3580 dst5_r = HEVC_FILT_4TAP(dst65_r, dst87_r, filt_h0, filt_h1);
3581 dst6_r = HEVC_FILT_4TAP(dst76_r, dst98_r, filt_h0, filt_h1);
3582 dst7_r = HEVC_FILT_4TAP(dst87_r, dst109_r, filt_h0, filt_h1);
3583 dst0_l = HEVC_FILT_4TAP(dst1021_l, dst3243_l, filt_h0, filt_h1);
3584 dst1_l = HEVC_FILT_4TAP(dst3243_l, dst5465_l, filt_h0, filt_h1);
3585 dst2_l = HEVC_FILT_4TAP(dst5465_l, dst7687_l, filt_h0, filt_h1);
3586 dst3_l = HEVC_FILT_4TAP(dst7687_l, dst98109_l, filt_h0, filt_h1);
3587 SRA_4V(dst0_r, dst1_r, dst2_r, dst3_r, 6);
3588 SRA_4V(dst4_r, dst5_r, dst6_r, dst7_r, 6);
3589 SRA_4V(dst0_l, dst1_l, dst2_l, dst3_l, 6);
3590 PCKEV_H2_SH(dst1_r, dst0_r, dst3_r, dst2_r, tmp0, tmp1);
3591 PCKEV_H2_SH(dst5_r, dst4_r, dst7_r, dst6_r, tmp2, tmp3);
3592 PCKEV_H2_SH(dst1_l, dst0_l, dst3_l, dst2_l, tmp4, tmp5);
3593 ST8x4_UB(tmp0, tmp1, dst, dst_stride_in_bytes);
3594 ST4x4_UB(tmp4, tmp4, 0, 1, 2, 3, dst + 4, dst_stride_in_bytes);
3595 dst += 4 * dst_stride;
3596 ST8x4_UB(tmp2, tmp3, dst, dst_stride_in_bytes);
3597 ST4x4_UB(tmp5, tmp5, 0, 1, 2, 3, dst + 4, dst_stride_in_bytes);
3600 static void hevc_hv_4t_8x2_msa(uint8_t *src,
3604 const int8_t *filter_x,
3605 const int8_t *filter_y)
3607 v16i8 src0, src1, src2, src3, src4;
3609 v8i16 filt_h0, filt_h1;
3610 v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
3612 v8i16 filter_vec, const_vec;
3613 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
3614 v8i16 dst0, dst1, dst2, dst3, dst4;
3615 v4i32 dst0_r, dst0_l, dst1_r, dst1_l;
3616 v8i16 dst10_r, dst32_r, dst21_r, dst43_r;
3617 v8i16 dst10_l, dst32_l, dst21_l, dst43_l;
3619 src -= (src_stride + 1);
3621 filter_vec = LD_SH(filter_x);
3622 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
3624 filter_vec = LD_SH(filter_y);
3625 UNPCK_R_SB_SH(filter_vec, filter_vec);
3627 SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
3631 const_vec = __msa_ldi_h(128);
3634 LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
3635 XORI_B5_128_SB(src0, src1, src2, src3, src4);
3637 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
3638 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
3639 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
3640 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec6, vec7);
3641 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec8, vec9);
3644 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
3646 DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst1, dst1);
3648 DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst2, dst2);
3650 DPADD_SB2_SH(vec6, vec7, filt0, filt1, dst3, dst3);
3652 DPADD_SB2_SH(vec8, vec9, filt0, filt1, dst4, dst4);
3654 ILVRL_H2_SH(dst1, dst0, dst10_r, dst10_l);
3655 ILVRL_H2_SH(dst2, dst1, dst21_r, dst21_l);
3656 ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
3657 ILVRL_H2_SH(dst4, dst3, dst43_r, dst43_l);
3658 dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
3659 dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
3660 dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
3661 dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
3662 SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
3663 PCKEV_H2_SW(dst0_l, dst0_r, dst1_l, dst1_r, dst0_r, dst1_r);
3664 ST_SW2(dst0_r, dst1_r, dst, dst_stride);
3667 static void hevc_hv_4t_8multx4_msa(uint8_t *src, int32_t src_stride,
3668 int16_t *dst, int32_t dst_stride,
3669 const int8_t *filter_x,
3670 const int8_t *filter_y, int32_t width8mult)
3673 v16i8 src0, src1, src2, src3, src4, src5, src6, mask0, mask1;
3674 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
3675 v8i16 filt0, filt1, filt_h0, filt_h1, filter_vec, const_vec;
3676 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6;
3677 v8i16 dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
3678 v8i16 dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
3679 v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
3681 src -= (src_stride + 1);
3683 filter_vec = LD_SH(filter_x);
3684 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
3686 filter_vec = LD_SH(filter_y);
3687 UNPCK_R_SB_SH(filter_vec, filter_vec);
3689 SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
3691 mask0 = LD_SB(ff_hevc_mask_arr);
3694 const_vec = __msa_ldi_h(128);
3697 for (cnt = width8mult; cnt--;) {
3698 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
3700 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6);
3702 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
3703 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
3704 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
3709 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
3710 DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst1, dst1);
3711 DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst2, dst2);
3713 ILVRL_H2_SH(dst1, dst0, dst10_r, dst10_l);
3714 ILVRL_H2_SH(dst2, dst1, dst21_r, dst21_l);
3716 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
3717 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
3718 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
3719 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
3724 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
3725 DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst4, dst4);
3726 DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst5, dst5);
3727 DPADD_SB2_SH(vec6, vec7, filt0, filt1, dst6, dst6);
3728 ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
3729 ILVRL_H2_SH(dst4, dst3, dst43_r, dst43_l);
3730 ILVRL_H2_SH(dst5, dst4, dst54_r, dst54_l);
3731 ILVRL_H2_SH(dst6, dst5, dst65_r, dst65_l);
3732 dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
3733 dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
3734 dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
3735 dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
3737 dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
3738 dst2_l = HEVC_FILT_4TAP(dst32_l, dst54_l, filt_h0, filt_h1);
3739 dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
3740 dst3_l = HEVC_FILT_4TAP(dst43_l, dst65_l, filt_h0, filt_h1);
3741 SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
3742 SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
3743 PCKEV_H2_SW(dst0_l, dst0_r, dst1_l, dst1_r, dst0_r, dst1_r);
3744 PCKEV_H2_SW(dst2_l, dst2_r, dst3_l, dst3_r, dst2_r, dst3_r);
3746 ST_SW4(dst0_r, dst1_r, dst2_r, dst3_r, dst, dst_stride);
3751 static void hevc_hv_4t_8x6_msa(uint8_t *src,
3755 const int8_t *filter_x,
3756 const int8_t *filter_y)
3758 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
3760 v8i16 filt_h0, filt_h1;
3761 v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
3763 v8i16 filter_vec, const_vec;
3764 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
3765 v16i8 vec10, vec11, vec12, vec13, vec14, vec15, vec16, vec17;
3766 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8;
3767 v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
3768 v4i32 dst4_r, dst4_l, dst5_r, dst5_l;
3769 v8i16 dst10_r, dst32_r, dst10_l, dst32_l;
3770 v8i16 dst21_r, dst43_r, dst21_l, dst43_l;
3771 v8i16 dst54_r, dst54_l, dst65_r, dst65_l;
3772 v8i16 dst76_r, dst76_l, dst87_r, dst87_l;
3774 src -= (src_stride + 1);
3776 filter_vec = LD_SH(filter_x);
3777 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
3779 filter_vec = LD_SH(filter_y);
3780 UNPCK_R_SB_SH(filter_vec, filter_vec);
3782 SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
3786 const_vec = __msa_ldi_h(128);
3789 LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
3790 src += (5 * src_stride);
3791 LD_SB4(src, src_stride, src5, src6, src7, src8);
3793 XORI_B5_128_SB(src0, src1, src2, src3, src4);
3794 XORI_B4_128_SB(src5, src6, src7, src8);
3796 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
3797 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
3798 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
3799 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec6, vec7);
3800 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec8, vec9);
3801 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec10, vec11);
3802 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec12, vec13);
3803 VSHF_B2_SB(src7, src7, src7, src7, mask0, mask1, vec14, vec15);
3804 VSHF_B2_SB(src8, src8, src8, src8, mask0, mask1, vec16, vec17);
3807 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
3809 DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst1, dst1);
3811 DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst2, dst2);
3813 DPADD_SB2_SH(vec6, vec7, filt0, filt1, dst3, dst3);
3815 DPADD_SB2_SH(vec8, vec9, filt0, filt1, dst4, dst4);
3817 DPADD_SB2_SH(vec10, vec11, filt0, filt1, dst5, dst5);
3819 DPADD_SB2_SH(vec12, vec13, filt0, filt1, dst6, dst6);
3821 DPADD_SB2_SH(vec14, vec15, filt0, filt1, dst7, dst7);
3823 DPADD_SB2_SH(vec16, vec17, filt0, filt1, dst8, dst8);
3825 ILVRL_H2_SH(dst1, dst0, dst10_r, dst10_l);
3826 ILVRL_H2_SH(dst2, dst1, dst21_r, dst21_l);
3827 ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
3828 ILVRL_H2_SH(dst4, dst3, dst43_r, dst43_l);
3829 ILVRL_H2_SH(dst5, dst4, dst54_r, dst54_l);
3830 ILVRL_H2_SH(dst6, dst5, dst65_r, dst65_l);
3831 ILVRL_H2_SH(dst7, dst6, dst76_r, dst76_l);
3832 ILVRL_H2_SH(dst8, dst7, dst87_r, dst87_l);
3834 dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
3835 dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
3836 dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
3837 dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
3838 dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
3839 dst2_l = HEVC_FILT_4TAP(dst32_l, dst54_l, filt_h0, filt_h1);
3840 dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
3841 dst3_l = HEVC_FILT_4TAP(dst43_l, dst65_l, filt_h0, filt_h1);
3842 dst4_r = HEVC_FILT_4TAP(dst54_r, dst76_r, filt_h0, filt_h1);
3843 dst4_l = HEVC_FILT_4TAP(dst54_l, dst76_l, filt_h0, filt_h1);
3844 dst5_r = HEVC_FILT_4TAP(dst65_r, dst87_r, filt_h0, filt_h1);
3845 dst5_l = HEVC_FILT_4TAP(dst65_l, dst87_l, filt_h0, filt_h1);
3847 SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
3848 SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
3849 SRA_4V(dst4_r, dst4_l, dst5_r, dst5_l, 6);
3851 PCKEV_H4_SW(dst0_l, dst0_r, dst1_l, dst1_r,
3852 dst2_l, dst2_r, dst3_l, dst3_r, dst0_r, dst1_r, dst2_r, dst3_r);
3853 PCKEV_H2_SW(dst4_l, dst4_r, dst5_l, dst5_r, dst4_r, dst5_r);
3855 ST_SW2(dst0_r, dst1_r, dst, dst_stride);
3856 dst += (2 * dst_stride);
3857 ST_SW2(dst2_r, dst3_r, dst, dst_stride);
3858 dst += (2 * dst_stride);
3859 ST_SW2(dst4_r, dst5_r, dst, dst_stride);
3862 static void hevc_hv_4t_8multx4mult_msa(uint8_t *src,
3866 const int8_t *filter_x,
3867 const int8_t *filter_y,
3871 uint32_t loop_cnt, cnt;
3874 v16i8 src0, src1, src2, src3, src4, src5, src6;
3876 v8i16 filt_h0, filt_h1;
3877 v16i8 mask0 = LD_SB(ff_hevc_mask_arr);
3879 v8i16 filter_vec, const_vec;
3880 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
3881 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6;
3882 v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
3883 v8i16 dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
3884 v8i16 dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
3886 src -= (src_stride + 1);
3888 filter_vec = LD_SH(filter_x);
3889 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
3891 filter_vec = LD_SH(filter_y);
3892 UNPCK_R_SB_SH(filter_vec, filter_vec);
3894 SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
3898 const_vec = __msa_ldi_h(128);
3901 for (cnt = width8mult; cnt--;) {
3905 LD_SB3(src_tmp, src_stride, src0, src1, src2);
3906 src_tmp += (3 * src_stride);
3908 XORI_B3_128_SB(src0, src1, src2);
3910 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
3911 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
3912 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
3915 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
3917 DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst1, dst1);
3919 DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst2, dst2);
3921 ILVRL_H2_SH(dst1, dst0, dst10_r, dst10_l);
3922 ILVRL_H2_SH(dst2, dst1, dst21_r, dst21_l);
3924 for (loop_cnt = height >> 2; loop_cnt--;) {
3925 LD_SB4(src_tmp, src_stride, src3, src4, src5, src6);
3926 src_tmp += (4 * src_stride);
3927 XORI_B4_128_SB(src3, src4, src5, src6);
3929 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
3930 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
3931 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
3932 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
3938 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
3939 DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst4, dst4);
3940 DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst5, dst5);
3941 DPADD_SB2_SH(vec6, vec7, filt0, filt1, dst6, dst6);
3943 ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
3944 ILVRL_H2_SH(dst4, dst3, dst43_r, dst43_l);
3945 ILVRL_H2_SH(dst5, dst4, dst54_r, dst54_l);
3946 ILVRL_H2_SH(dst6, dst5, dst65_r, dst65_l);
3948 dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
3949 dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
3950 dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
3951 dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
3952 dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
3953 dst2_l = HEVC_FILT_4TAP(dst32_l, dst54_l, filt_h0, filt_h1);
3954 dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
3955 dst3_l = HEVC_FILT_4TAP(dst43_l, dst65_l, filt_h0, filt_h1);
3957 SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
3958 SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
3960 PCKEV_H4_SW(dst0_l, dst0_r, dst1_l, dst1_r,
3961 dst2_l, dst2_r, dst3_l, dst3_r,
3962 dst0_r, dst1_r, dst2_r, dst3_r);
3964 ST_SW4(dst0_r, dst1_r, dst2_r, dst3_r, dst_tmp, dst_stride);
3965 dst_tmp += (4 * dst_stride);
3979 static void hevc_hv_4t_8w_msa(uint8_t *src,
3983 const int8_t *filter_x,
3984 const int8_t *filter_y,
3989 hevc_hv_4t_8x2_msa(src, src_stride, dst, dst_stride,
3990 filter_x, filter_y);
3991 } else if (4 == height) {
3992 hevc_hv_4t_8multx4_msa(src, src_stride, dst, dst_stride,
3993 filter_x, filter_y, 1);
3994 } else if (6 == height) {
3995 hevc_hv_4t_8x6_msa(src, src_stride, dst, dst_stride,
3996 filter_x, filter_y);
3997 } else if (0 == (height % 4)) {
3998 hevc_hv_4t_8multx4mult_msa(src, src_stride, dst, dst_stride,
3999 filter_x, filter_y, height, 1);
4003 static void hevc_hv_4t_12w_msa(uint8_t *src,
4007 const int8_t *filter_x,
4008 const int8_t *filter_y,
4014 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
4015 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
4016 v16i8 mask0, mask1, mask2, mask3;
4017 v8i16 filt0, filt1, filt_h0, filt_h1, filter_vec, const_vec;
4018 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst10, dst21, dst22, dst73;
4019 v8i16 dst84, dst95, dst106, dst76_r, dst98_r, dst87_r, dst109_r;
4020 v8i16 dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
4021 v8i16 dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
4022 v4i32 dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
4023 v4i32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
4025 src -= (src_stride + 1);
4027 filter_vec = LD_SH(filter_x);
4028 SPLATI_H2_SH(filter_vec, 0, 1, filt0, filt1);
4030 filter_vec = LD_SH(filter_y);
4031 UNPCK_R_SB_SH(filter_vec, filter_vec);
4033 SPLATI_W2_SH(filter_vec, 0, filt_h0, filt_h1);
4035 mask0 = LD_SB(ff_hevc_mask_arr);
4038 const_vec = __msa_ldi_h(128);
4044 LD_SB3(src_tmp, src_stride, src0, src1, src2);
4045 src_tmp += (3 * src_stride);
4047 XORI_B3_128_SB(src0, src1, src2);
4049 VSHF_B2_SB(src0, src0, src0, src0, mask0, mask1, vec0, vec1);
4050 VSHF_B2_SB(src1, src1, src1, src1, mask0, mask1, vec2, vec3);
4051 VSHF_B2_SB(src2, src2, src2, src2, mask0, mask1, vec4, vec5);
4056 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst0, dst0);
4057 DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst1, dst1);
4058 DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst2, dst2);
4060 ILVRL_H2_SH(dst1, dst0, dst10_r, dst10_l);
4061 ILVRL_H2_SH(dst2, dst1, dst21_r, dst21_l);
4063 for (loop_cnt = 4; loop_cnt--;) {
4064 LD_SB4(src_tmp, src_stride, src3, src4, src5, src6);
4065 src_tmp += (4 * src_stride);
4066 XORI_B4_128_SB(src3, src4, src5, src6);
4068 VSHF_B2_SB(src3, src3, src3, src3, mask0, mask1, vec0, vec1);
4069 VSHF_B2_SB(src4, src4, src4, src4, mask0, mask1, vec2, vec3);
4070 VSHF_B2_SB(src5, src5, src5, src5, mask0, mask1, vec4, vec5);
4071 VSHF_B2_SB(src6, src6, src6, src6, mask0, mask1, vec6, vec7);
4077 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst3, dst3);
4078 DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst4, dst4);
4079 DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst5, dst5);
4080 DPADD_SB2_SH(vec6, vec7, filt0, filt1, dst6, dst6);
4082 ILVRL_H2_SH(dst3, dst2, dst32_r, dst32_l);
4083 ILVRL_H2_SH(dst4, dst3, dst43_r, dst43_l);
4084 ILVRL_H2_SH(dst5, dst4, dst54_r, dst54_l);
4085 ILVRL_H2_SH(dst6, dst5, dst65_r, dst65_l);
4087 dst0_r = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
4088 dst0_l = HEVC_FILT_4TAP(dst10_l, dst32_l, filt_h0, filt_h1);
4089 dst1_r = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
4090 dst1_l = HEVC_FILT_4TAP(dst21_l, dst43_l, filt_h0, filt_h1);
4091 dst2_r = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
4092 dst2_l = HEVC_FILT_4TAP(dst32_l, dst54_l, filt_h0, filt_h1);
4093 dst3_r = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
4094 dst3_l = HEVC_FILT_4TAP(dst43_l, dst65_l, filt_h0, filt_h1);
4096 SRA_4V(dst0_r, dst0_l, dst1_r, dst1_l, 6);
4097 SRA_4V(dst2_r, dst2_l, dst3_r, dst3_l, 6);
4098 PCKEV_H4_SW(dst0_l, dst0_r, dst1_l, dst1_r, dst2_l, dst2_r, dst3_l,
4099 dst3_r, dst0_r, dst1_r, dst2_r, dst3_r);
4100 ST_SW4(dst0_r, dst1_r, dst2_r, dst3_r, dst_tmp, dst_stride);
4101 dst_tmp += (4 * dst_stride);
4113 mask2 = LD_SB(ff_hevc_mask_arr + 16);
4116 LD_SB3(src, src_stride, src0, src1, src2);
4117 src += (3 * src_stride);
4118 XORI_B3_128_SB(src0, src1, src2);
4119 VSHF_B2_SB(src0, src1, src0, src1, mask2, mask3, vec0, vec1);
4120 VSHF_B2_SB(src1, src2, src1, src2, mask2, mask3, vec2, vec3);
4123 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst10, dst10);
4124 DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst21, dst21);
4125 ILVRL_H2_SH(dst21, dst10, dst10_r, dst21_r);
4126 dst22 = (v8i16) __msa_splati_d((v2i64) dst21, 1);
4128 for (loop_cnt = 2; loop_cnt--;) {
4129 LD_SB8(src, src_stride, src3, src4, src5, src6, src7, src8, src9,
4131 src += (8 * src_stride);
4132 XORI_B8_128_SB(src3, src4, src5, src6, src7, src8, src9, src10);
4133 VSHF_B2_SB(src3, src7, src3, src7, mask2, mask3, vec0, vec1);
4134 VSHF_B2_SB(src4, src8, src4, src8, mask2, mask3, vec2, vec3);
4135 VSHF_B2_SB(src5, src9, src5, src9, mask2, mask3, vec4, vec5);
4136 VSHF_B2_SB(src6, src10, src6, src10, mask2, mask3, vec6, vec7);
4142 DPADD_SB2_SH(vec0, vec1, filt0, filt1, dst73, dst73);
4143 DPADD_SB2_SH(vec2, vec3, filt0, filt1, dst84, dst84);
4144 DPADD_SB2_SH(vec4, vec5, filt0, filt1, dst95, dst95);
4145 DPADD_SB2_SH(vec6, vec7, filt0, filt1, dst106, dst106);
4147 dst32_r = __msa_ilvr_h(dst73, dst22);
4148 ILVRL_H2_SH(dst84, dst73, dst43_r, dst87_r);
4149 ILVRL_H2_SH(dst95, dst84, dst54_r, dst98_r);
4150 ILVRL_H2_SH(dst106, dst95, dst65_r, dst109_r);
4151 dst22 = (v8i16) __msa_splati_d((v2i64) dst73, 1);
4152 dst76_r = __msa_ilvr_h(dst22, dst106);
4154 tmp0 = HEVC_FILT_4TAP(dst10_r, dst32_r, filt_h0, filt_h1);
4155 tmp1 = HEVC_FILT_4TAP(dst21_r, dst43_r, filt_h0, filt_h1);
4156 tmp2 = HEVC_FILT_4TAP(dst32_r, dst54_r, filt_h0, filt_h1);
4157 tmp3 = HEVC_FILT_4TAP(dst43_r, dst65_r, filt_h0, filt_h1);
4158 tmp4 = HEVC_FILT_4TAP(dst54_r, dst76_r, filt_h0, filt_h1);
4159 tmp5 = HEVC_FILT_4TAP(dst65_r, dst87_r, filt_h0, filt_h1);
4160 tmp6 = HEVC_FILT_4TAP(dst76_r, dst98_r, filt_h0, filt_h1);
4161 tmp7 = HEVC_FILT_4TAP(dst87_r, dst109_r, filt_h0, filt_h1);
4163 SRA_4V(tmp0, tmp1, tmp2, tmp3, 6);
4164 SRA_4V(tmp4, tmp5, tmp6, tmp7, 6);
4165 PCKEV_H4_SW(tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6, tmp0, tmp1,
4167 ST8x8_UB(tmp0, tmp1, tmp2, tmp3, dst, 2 * dst_stride);
4168 dst += (8 * dst_stride);
4172 dst22 = (v8i16) __msa_splati_d((v2i64) dst106, 1);
4176 static void hevc_hv_4t_16w_msa(uint8_t *src,
4180 const int8_t *filter_x,
4181 const int8_t *filter_y,
4185 hevc_hv_4t_8multx4_msa(src, src_stride, dst, dst_stride,
4186 filter_x, filter_y, 2);
4188 hevc_hv_4t_8multx4mult_msa(src, src_stride, dst, dst_stride,
4189 filter_x, filter_y, height, 2);
4193 static void hevc_hv_4t_24w_msa(uint8_t *src,
4197 const int8_t *filter_x,
4198 const int8_t *filter_y,
4201 hevc_hv_4t_8multx4mult_msa(src, src_stride, dst, dst_stride,
4202 filter_x, filter_y, height, 3);
4205 static void hevc_hv_4t_32w_msa(uint8_t *src,
4209 const int8_t *filter_x,
4210 const int8_t *filter_y,
4213 hevc_hv_4t_8multx4mult_msa(src, src_stride, dst, dst_stride,
4214 filter_x, filter_y, height, 4);
4217 #define MC_COPY(WIDTH) \
4218 void ff_hevc_put_hevc_pel_pixels##WIDTH##_8_msa(int16_t *dst, \
4220 ptrdiff_t src_stride, \
4226 hevc_copy_##WIDTH##w_msa(src, src_stride, dst, MAX_PB_SIZE, height); \
4241 #define MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR) \
4242 void ff_hevc_put_hevc_##PEL##_##DIR##WIDTH##_8_msa(int16_t *dst, \
4244 ptrdiff_t src_stride, \
4250 const int8_t *filter = ff_hevc_##PEL##_filters[FILT_DIR - 1]; \
4252 hevc_##DIR1##_##TAP##t_##WIDTH##w_msa(src, src_stride, dst, \
4253 MAX_PB_SIZE, filter, height); \
4256 MC(qpel, h, 4, 8, hz, mx);
4257 MC(qpel, h, 8, 8, hz, mx);
4258 MC(qpel, h, 12, 8, hz, mx);
4259 MC(qpel, h, 16, 8, hz, mx);
4260 MC(qpel, h, 24, 8, hz, mx);
4261 MC(qpel, h, 32, 8, hz, mx);
4262 MC(qpel, h, 48, 8, hz, mx);
4263 MC(qpel, h, 64, 8, hz, mx);
4265 MC(qpel, v, 4, 8, vt, my);
4266 MC(qpel, v, 8, 8, vt, my);
4267 MC(qpel, v, 12, 8, vt, my);
4268 MC(qpel, v, 16, 8, vt, my);
4269 MC(qpel, v, 24, 8, vt, my);
4270 MC(qpel, v, 32, 8, vt, my);
4271 MC(qpel, v, 48, 8, vt, my);
4272 MC(qpel, v, 64, 8, vt, my);
4274 MC(epel, h, 4, 4, hz, mx);
4275 MC(epel, h, 6, 4, hz, mx);
4276 MC(epel, h, 8, 4, hz, mx);
4277 MC(epel, h, 12, 4, hz, mx);
4278 MC(epel, h, 16, 4, hz, mx);
4279 MC(epel, h, 24, 4, hz, mx);
4280 MC(epel, h, 32, 4, hz, mx);
4282 MC(epel, v, 4, 4, vt, my);
4283 MC(epel, v, 6, 4, vt, my);
4284 MC(epel, v, 8, 4, vt, my);
4285 MC(epel, v, 12, 4, vt, my);
4286 MC(epel, v, 16, 4, vt, my);
4287 MC(epel, v, 24, 4, vt, my);
4288 MC(epel, v, 32, 4, vt, my);
4292 #define MC_HV(PEL, WIDTH, TAP) \
4293 void ff_hevc_put_hevc_##PEL##_hv##WIDTH##_8_msa(int16_t *dst, \
4295 ptrdiff_t src_stride, \
4301 const int8_t *filter_x = ff_hevc_##PEL##_filters[mx - 1]; \
4302 const int8_t *filter_y = ff_hevc_##PEL##_filters[my - 1]; \
4304 hevc_hv_##TAP##t_##WIDTH##w_msa(src, src_stride, dst, MAX_PB_SIZE, \
4305 filter_x, filter_y, height); \