2 * Copyright (c) 2018 gxw <guxiwei-hf@loongson.cn>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "vp3dsp_mips.h"
22 #include "libavutil/mips/generic_macros_msa.h"
23 #include "libavutil/intreadwrite.h"
24 #include "libavcodec/rnd_avg.h"
26 static void idct_msa(uint8_t *dst, int stride, int16_t *input, int type)
28 v8i16 r0, r1, r2, r3, r4, r5, r6, r7, sign;
29 v4i32 r0_r, r0_l, r1_r, r1_l, r2_r, r2_l, r3_r, r3_l,
30 r4_r, r4_l, r5_r, r5_l, r6_r, r6_l, r7_r, r7_l;
31 v4i32 A, B, C, D, Ad, Bd, Cd, Dd, E, F, G, H;
32 v4i32 Ed, Gd, Add, Bdd, Fd, Hd;
34 v16i8 d0, d1, d2, d3, d4, d5, d6, d7;
35 v4i32 c0, c1, c2, c3, c4, c5, c6, c7;
36 v4i32 f0, f1, f2, f3, f4, f5, f6, f7;
39 v16i8 mask = {0, 4, 8, 12, 16, 20, 24, 28, 0, 0, 0, 0, 0, 0, 0, 0};
40 v4i32 cnst64277w = {64277, 64277, 64277, 64277};
41 v4i32 cnst60547w = {60547, 60547, 60547, 60547};
42 v4i32 cnst54491w = {54491, 54491, 54491, 54491};
43 v4i32 cnst46341w = {46341, 46341, 46341, 46341};
44 v4i32 cnst36410w = {36410, 36410, 36410, 36410};
45 v4i32 cnst25080w = {25080, 25080, 25080, 25080};
46 v4i32 cnst12785w = {12785, 12785, 12785, 12785};
47 v4i32 cnst8w = {8, 8, 8, 8};
48 v4i32 cnst2048w = {2048, 2048, 2048, 2048};
49 v4i32 cnst128w = {128, 128, 128, 128};
51 /* Extended input data */
52 LD_SH8(input, 8, r0, r1, r2, r3, r4, r5, r6, r7);
53 sign = __msa_clti_s_h(r0, 0);
54 r0_r = (v4i32) __msa_ilvr_h(sign, r0);
55 r0_l = (v4i32) __msa_ilvl_h(sign, r0);
56 sign = __msa_clti_s_h(r1, 0);
57 r1_r = (v4i32) __msa_ilvr_h(sign, r1);
58 r1_l = (v4i32) __msa_ilvl_h(sign, r1);
59 sign = __msa_clti_s_h(r2, 0);
60 r2_r = (v4i32) __msa_ilvr_h(sign, r2);
61 r2_l = (v4i32) __msa_ilvl_h(sign, r2);
62 sign = __msa_clti_s_h(r3, 0);
63 r3_r = (v4i32) __msa_ilvr_h(sign, r3);
64 r3_l = (v4i32) __msa_ilvl_h(sign, r3);
65 sign = __msa_clti_s_h(r4, 0);
66 r4_r = (v4i32) __msa_ilvr_h(sign, r4);
67 r4_l = (v4i32) __msa_ilvl_h(sign, r4);
68 sign = __msa_clti_s_h(r5, 0);
69 r5_r = (v4i32) __msa_ilvr_h(sign, r5);
70 r5_l = (v4i32) __msa_ilvl_h(sign, r5);
71 sign = __msa_clti_s_h(r6, 0);
72 r6_r = (v4i32) __msa_ilvr_h(sign, r6);
73 r6_l = (v4i32) __msa_ilvl_h(sign, r6);
74 sign = __msa_clti_s_h(r7, 0);
75 r7_r = (v4i32) __msa_ilvr_h(sign, r7);
76 r7_l = (v4i32) __msa_ilvl_h(sign, r7);
79 A = ((r1_r * cnst64277w) >> 16) + ((r7_r * cnst12785w) >> 16);
80 B = ((r1_r * cnst12785w) >> 16) - ((r7_r * cnst64277w) >> 16);
81 C = ((r3_r * cnst54491w) >> 16) + ((r5_r * cnst36410w) >> 16);
82 D = ((r5_r * cnst54491w) >> 16) - ((r3_r * cnst36410w) >> 16);
83 Ad = ((A - C) * cnst46341w) >> 16;
84 Bd = ((B - D) * cnst46341w) >> 16;
87 E = ((r0_r + r4_r) * cnst46341w) >> 16;
88 F = ((r0_r - r4_r) * cnst46341w) >> 16;
89 G = ((r2_r * cnst60547w) >> 16) + ((r6_r * cnst25080w) >> 16);
90 H = ((r2_r * cnst25080w) >> 16) - ((r6_r * cnst60547w) >> 16);
107 A = ((r1_l * cnst64277w) >> 16) + ((r7_l * cnst12785w) >> 16);
108 B = ((r1_l * cnst12785w) >> 16) - ((r7_l * cnst64277w) >> 16);
109 C = ((r3_l * cnst54491w) >> 16) + ((r5_l * cnst36410w) >> 16);
110 D = ((r5_l * cnst54491w) >> 16) - ((r3_l * cnst36410w) >> 16);
111 Ad = ((A - C) * cnst46341w) >> 16;
112 Bd = ((B - D) * cnst46341w) >> 16;
115 E = ((r0_l + r4_l) * cnst46341w) >> 16;
116 F = ((r0_l - r4_l) * cnst46341w) >> 16;
117 G = ((r2_l * cnst60547w) >> 16) + ((r6_l * cnst25080w) >> 16);
118 H = ((r2_l * cnst25080w) >> 16) - ((r6_l * cnst60547w) >> 16);
135 TRANSPOSE4x4_SW_SW(r0_r, r1_r, r2_r, r3_r,
136 r0_r, r1_r, r2_r, r3_r);
137 TRANSPOSE4x4_SW_SW(r0_l, r1_l, r2_l, r3_l,
138 r0_l, r1_l, r2_l, r3_l);
139 A = ((r1_r * cnst64277w) >> 16) + ((r3_l * cnst12785w) >> 16);
140 B = ((r1_r * cnst12785w) >> 16) - ((r3_l * cnst64277w) >> 16);
141 C = ((r3_r * cnst54491w) >> 16) + ((r1_l * cnst36410w) >> 16);
142 D = ((r1_l * cnst54491w) >> 16) - ((r3_r * cnst36410w) >> 16);
143 Ad = ((A - C) * cnst46341w) >> 16;
144 Bd = ((B - D) * cnst46341w) >> 16;
147 E = ((r0_r + r0_l) * cnst46341w) >> 16;
149 F = ((r0_r - r0_l) * cnst46341w) >> 16;
151 if (type == 1) { // HACK
155 G = ((r2_r * cnst60547w) >> 16) + ((r2_l * cnst25080w) >> 16);
156 H = ((r2_r * cnst25080w) >> 16) - ((r2_l * cnst60547w) >> 16);
172 LD_SB8(dst, stride, d0, d1, d2, d3, d4, d5, d6, d7);
173 ILVR_B4_SW(zero, d0, zero, d1, zero, d2, zero, d3,
175 ILVR_B4_SW(zero, d4, zero, d5, zero, d6, zero, d7,
177 ILVR_H4_SW(zero, f0, zero, f1, zero, f2, zero, f3,
179 ILVR_H4_SW(zero, f4, zero, f5, zero, f6, zero, f7,
190 A = CLIP_SW_0_255(A);
191 B = CLIP_SW_0_255(B);
192 C = CLIP_SW_0_255(C);
193 D = CLIP_SW_0_255(D);
194 E = CLIP_SW_0_255(E);
195 F = CLIP_SW_0_255(F);
196 G = CLIP_SW_0_255(G);
197 H = CLIP_SW_0_255(H);
198 sign_l = __msa_or_v((v16u8)r1_r, (v16u8)r2_r);
199 sign_l = __msa_or_v(sign_l, (v16u8)r3_r);
200 sign_l = __msa_or_v(sign_l, (v16u8)r0_l);
201 sign_l = __msa_or_v(sign_l, (v16u8)r1_l);
202 sign_l = __msa_or_v(sign_l, (v16u8)r2_l);
203 sign_l = __msa_or_v(sign_l, (v16u8)r3_l);
204 sign_t = __msa_ceqi_w((v4i32)sign_l, 0);
205 Add = ((r0_r * cnst46341w) + (8 << 16)) >> 20;
207 Bdd = Add + cnst128w;
208 Bdd = CLIP_SW_0_255(Bdd);
226 Ad = CLIP_SW_0_255(Ad);
227 Bd = CLIP_SW_0_255(Bd);
228 Cd = CLIP_SW_0_255(Cd);
229 Dd = CLIP_SW_0_255(Dd);
230 Ed = CLIP_SW_0_255(Ed);
231 Fd = CLIP_SW_0_255(Fd);
232 Gd = CLIP_SW_0_255(Gd);
233 Hd = CLIP_SW_0_255(Hd);
235 Ad = (v4i32)__msa_and_v((v16u8)Ad, (v16u8)sign_t);
236 Bd = (v4i32)__msa_and_v((v16u8)Bd, (v16u8)sign_t);
237 Cd = (v4i32)__msa_and_v((v16u8)Cd, (v16u8)sign_t);
238 Dd = (v4i32)__msa_and_v((v16u8)Dd, (v16u8)sign_t);
239 Ed = (v4i32)__msa_and_v((v16u8)Ed, (v16u8)sign_t);
240 Fd = (v4i32)__msa_and_v((v16u8)Fd, (v16u8)sign_t);
241 Gd = (v4i32)__msa_and_v((v16u8)Gd, (v16u8)sign_t);
242 Hd = (v4i32)__msa_and_v((v16u8)Hd, (v16u8)sign_t);
243 sign_t = __msa_ceqi_w(sign_t, 0);
244 A = (v4i32)__msa_and_v((v16u8)A, (v16u8)sign_t);
245 B = (v4i32)__msa_and_v((v16u8)B, (v16u8)sign_t);
246 C = (v4i32)__msa_and_v((v16u8)C, (v16u8)sign_t);
247 D = (v4i32)__msa_and_v((v16u8)D, (v16u8)sign_t);
248 E = (v4i32)__msa_and_v((v16u8)E, (v16u8)sign_t);
249 F = (v4i32)__msa_and_v((v16u8)F, (v16u8)sign_t);
250 G = (v4i32)__msa_and_v((v16u8)G, (v16u8)sign_t);
251 H = (v4i32)__msa_and_v((v16u8)H, (v16u8)sign_t);
262 TRANSPOSE4x4_SW_SW(r4_r, r5_r, r6_r, r7_r,
263 r4_r, r5_r, r6_r, r7_r);
264 TRANSPOSE4x4_SW_SW(r4_l, r5_l, r6_l, r7_l,
265 r4_l, r5_l, r6_l, r7_l);
266 A = ((r5_r * cnst64277w) >> 16) + ((r7_l * cnst12785w) >> 16);
267 B = ((r5_r * cnst12785w) >> 16) - ((r7_l * cnst64277w) >> 16);
268 C = ((r7_r * cnst54491w) >> 16) + ((r5_l * cnst36410w) >> 16);
269 D = ((r5_l * cnst54491w) >> 16) - ((r7_r * cnst36410w) >> 16);
270 Ad = ((A - C) * cnst46341w) >> 16;
271 Bd = ((B - D) * cnst46341w) >> 16;
274 E = ((r4_r + r4_l) * cnst46341w) >> 16;
276 F = ((r4_r - r4_l) * cnst46341w) >> 16;
278 if (type == 1) { // HACK
282 G = ((r6_r * cnst60547w) >> 16) + ((r6_l * cnst25080w) >> 16);
283 H = ((r6_r * cnst25080w) >> 16) - ((r6_l * cnst60547w) >> 16);
299 ILVL_H4_SW(zero, f0, zero, f1, zero, f2, zero, f3,
301 ILVL_H4_SW(zero, f4, zero, f5, zero, f6, zero, f7,
312 A = CLIP_SW_0_255(A);
313 B = CLIP_SW_0_255(B);
314 C = CLIP_SW_0_255(C);
315 D = CLIP_SW_0_255(D);
316 E = CLIP_SW_0_255(E);
317 F = CLIP_SW_0_255(F);
318 G = CLIP_SW_0_255(G);
319 H = CLIP_SW_0_255(H);
320 sign_l = __msa_or_v((v16u8)r5_r, (v16u8)r6_r);
321 sign_l = __msa_or_v(sign_l, (v16u8)r7_r);
322 sign_l = __msa_or_v(sign_l, (v16u8)r4_l);
323 sign_l = __msa_or_v(sign_l, (v16u8)r5_l);
324 sign_l = __msa_or_v(sign_l, (v16u8)r6_l);
325 sign_l = __msa_or_v(sign_l, (v16u8)r7_l);
326 sign_t = __msa_ceqi_w((v4i32)sign_l, 0);
327 Add = ((r4_r * cnst46341w) + (8 << 16)) >> 20;
329 Bdd = Add + cnst128w;
330 Bdd = CLIP_SW_0_255(Bdd);
348 Ad = CLIP_SW_0_255(Ad);
349 Bd = CLIP_SW_0_255(Bd);
350 Cd = CLIP_SW_0_255(Cd);
351 Dd = CLIP_SW_0_255(Dd);
352 Ed = CLIP_SW_0_255(Ed);
353 Fd = CLIP_SW_0_255(Fd);
354 Gd = CLIP_SW_0_255(Gd);
355 Hd = CLIP_SW_0_255(Hd);
357 Ad = (v4i32)__msa_and_v((v16u8)Ad, (v16u8)sign_t);
358 Bd = (v4i32)__msa_and_v((v16u8)Bd, (v16u8)sign_t);
359 Cd = (v4i32)__msa_and_v((v16u8)Cd, (v16u8)sign_t);
360 Dd = (v4i32)__msa_and_v((v16u8)Dd, (v16u8)sign_t);
361 Ed = (v4i32)__msa_and_v((v16u8)Ed, (v16u8)sign_t);
362 Fd = (v4i32)__msa_and_v((v16u8)Fd, (v16u8)sign_t);
363 Gd = (v4i32)__msa_and_v((v16u8)Gd, (v16u8)sign_t);
364 Hd = (v4i32)__msa_and_v((v16u8)Hd, (v16u8)sign_t);
365 sign_t = __msa_ceqi_w(sign_t, 0);
366 A = (v4i32)__msa_and_v((v16u8)A, (v16u8)sign_t);
367 B = (v4i32)__msa_and_v((v16u8)B, (v16u8)sign_t);
368 C = (v4i32)__msa_and_v((v16u8)C, (v16u8)sign_t);
369 D = (v4i32)__msa_and_v((v16u8)D, (v16u8)sign_t);
370 E = (v4i32)__msa_and_v((v16u8)E, (v16u8)sign_t);
371 F = (v4i32)__msa_and_v((v16u8)F, (v16u8)sign_t);
372 G = (v4i32)__msa_and_v((v16u8)G, (v16u8)sign_t);
373 H = (v4i32)__msa_and_v((v16u8)H, (v16u8)sign_t);
382 VSHF_B2_SB(r0_r, r4_r, r1_r, r5_r, mask, mask, d0, d1);
383 VSHF_B2_SB(r2_r, r6_r, r3_r, r7_r, mask, mask, d2, d3);
384 VSHF_B2_SB(r0_l, r4_l, r1_l, r5_l, mask, mask, d4, d5);
385 VSHF_B2_SB(r2_l, r6_l, r3_l, r7_l, mask, mask, d6, d7);
387 /* Final sequence of operations over-write original dst */
389 ST_D1(d1, 0, dst + stride);
390 ST_D1(d2, 0, dst + 2 * stride);
391 ST_D1(d3, 0, dst + 3 * stride);
392 ST_D1(d4, 0, dst + 4 * stride);
393 ST_D1(d5, 0, dst + 5 * stride);
394 ST_D1(d6, 0, dst + 6 * stride);
395 ST_D1(d7, 0, dst + 7 * stride);
398 void ff_vp3_idct_put_msa(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
400 idct_msa(dest, line_size, block, 1);
401 memset(block, 0, sizeof(*block) * 64);
404 void ff_vp3_idct_add_msa(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
406 idct_msa(dest, line_size, block, 2);
407 memset(block, 0, sizeof(*block) * 64);
410 void ff_vp3_idct_dc_add_msa(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
412 int i = (block[0] + 15) >> 5;
413 v4i32 dc = {i, i, i, i};
414 v16i8 d0, d1, d2, d3, d4, d5, d6, d7;
415 v4i32 c0, c1, c2, c3, c4, c5, c6, c7;
416 v4i32 e0, e1, e2, e3, e4, e5, e6, e7;
417 v4i32 r0, r1, r2, r3, r4, r5, r6, r7;
418 v16i8 mask = {0, 4, 8, 12, 16, 20, 24, 28, 0, 0, 0, 0, 0, 0, 0, 0};
421 LD_SB8(dest, line_size, d0, d1, d2, d3, d4, d5, d6, d7);
422 ILVR_B4_SW(zero, d0, zero, d1, zero, d2, zero, d3,
424 ILVR_B4_SW(zero, d4, zero, d5, zero, d6, zero, d7,
427 ILVR_H4_SW(zero, c0, zero, c1, zero, c2, zero, c3,
429 ILVR_H4_SW(zero, c4, zero, c5, zero, c6, zero, c7,
439 e0 = CLIP_SW_0_255(e0);
440 e1 = CLIP_SW_0_255(e1);
441 e2 = CLIP_SW_0_255(e2);
442 e3 = CLIP_SW_0_255(e3);
443 e4 = CLIP_SW_0_255(e4);
444 e5 = CLIP_SW_0_255(e5);
445 e6 = CLIP_SW_0_255(e6);
446 e7 = CLIP_SW_0_255(e7);
449 ILVL_H4_SW(zero, c0, zero, c1, zero, c2, zero, c3,
451 ILVL_H4_SW(zero, c4, zero, c5, zero, c6, zero, c7,
461 r0 = CLIP_SW_0_255(r0);
462 r1 = CLIP_SW_0_255(r1);
463 r2 = CLIP_SW_0_255(r2);
464 r3 = CLIP_SW_0_255(r3);
465 r4 = CLIP_SW_0_255(r4);
466 r5 = CLIP_SW_0_255(r5);
467 r6 = CLIP_SW_0_255(r6);
468 r7 = CLIP_SW_0_255(r7);
469 VSHF_B2_SB(e0, r0, e1, r1, mask, mask, d0, d1);
470 VSHF_B2_SB(e2, r2, e3, r3, mask, mask, d2, d3);
471 VSHF_B2_SB(e4, r4, e5, r5, mask, mask, d4, d5);
472 VSHF_B2_SB(e6, r6, e7, r7, mask, mask, d6, d7);
474 /* Final sequence of operations over-write original dst */
476 ST_D1(d1, 0, dest + line_size);
477 ST_D1(d2, 0, dest + 2 * line_size);
478 ST_D1(d3, 0, dest + 3 * line_size);
479 ST_D1(d4, 0, dest + 4 * line_size);
480 ST_D1(d5, 0, dest + 5 * line_size);
481 ST_D1(d6, 0, dest + 6 * line_size);
482 ST_D1(d7, 0, dest + 7 * line_size);
487 void ff_vp3_v_loop_filter_msa(uint8_t *first_pixel, ptrdiff_t stride,
488 int *bounding_values)
490 int nstride = -stride;
491 v4i32 e0, e1, f0, f1, g0, g1;
493 v16i8 d0, d1, d2, d3;
494 v8i16 c0, c1, c2, c3;
496 v8i16 cnst3h = {3, 3, 3, 3, 3, 3, 3, 3},
497 cnst4h = {4, 4, 4, 4, 4, 4, 4, 4};
498 v16i8 mask = {0, 4, 8, 12, 16, 20, 24, 28, 0, 0, 0, 0, 0, 0, 0, 0};
502 LD_SB4(first_pixel + nstride * 2, stride, d0, d1, d2, d3);
503 ILVR_B4_SH(zero, d0, zero, d1, zero, d2, zero, d3,
505 r0 = (c0 - c3) + (c2 - c1) * cnst3h;
508 /* Get filter_value from bounding_values one by one */
510 for (int i = 0; i < 8; i++)
511 temp_32[i] = bounding_values[temp_16[i]];
512 LD_SW2(temp_32, 4, e0, e1);
513 ILVR_H2_SW(zero, c1, zero, c2, f0, g0);
514 ILVL_H2_SW(zero, c1, zero, c2, f1, g1);
519 f0 = CLIP_SW_0_255(f0);
520 f1 = CLIP_SW_0_255(f1);
521 g0 = CLIP_SW_0_255(g0);
522 g1 = CLIP_SW_0_255(g1);
523 VSHF_B2_SB(f0, f1, g0, g1, mask, mask, d1, d2);
525 /* Final move to first_pixel */
526 ST_D1(d1, 0, first_pixel + nstride);
527 ST_D1(d2, 0, first_pixel);
530 void ff_vp3_h_loop_filter_msa(uint8_t *first_pixel, ptrdiff_t stride,
531 int *bounding_values)
533 v16i8 d0, d1, d2, d3, d4, d5, d6, d7;
534 v8i16 c0, c1, c2, c3, c4, c5, c6, c7;
536 v4i32 e0, e1, f0, f1, g0, g1;
538 v8i16 cnst3h = {3, 3, 3, 3, 3, 3, 3, 3},
539 cnst4h = {4, 4, 4, 4, 4, 4, 4, 4};
540 v16i8 mask = {0, 16, 4, 20, 8, 24, 12, 28, 0, 0, 0, 0, 0, 0, 0, 0};
544 LD_SB8(first_pixel - 2, stride, d0, d1, d2, d3, d4, d5, d6, d7);
545 ILVR_B4_SH(zero, d0, zero, d1, zero, d2, zero, d3,
547 ILVR_B4_SH(zero, d4, zero, d5, zero, d6, zero, d7,
549 TRANSPOSE8x8_SH_SH(c0, c1, c2, c3, c4, c5, c6, c7,
550 c0, c1, c2, c3, c4, c5, c6, c7);
551 r0 = (c0 - c3) + (c2 - c1) * cnst3h;
555 /* Get filter_value from bounding_values one by one */
557 for (int i = 0; i < 8; i++)
558 temp_32[i] = bounding_values[temp_16[i]];
559 LD_SW2(temp_32, 4, e0, e1);
560 ILVR_H2_SW(zero, c1, zero, c2, f0, g0);
561 ILVL_H2_SW(zero, c1, zero, c2, f1, g1);
566 f0 = CLIP_SW_0_255(f0);
567 f1 = CLIP_SW_0_255(f1);
568 g0 = CLIP_SW_0_255(g0);
569 g1 = CLIP_SW_0_255(g1);
570 VSHF_B2_SB(f0, g0, f1, g1, mask, mask, d1, d2);
571 /* Final move to first_pixel */
572 ST_H4(d1, 0, 1, 2, 3, first_pixel - 1, stride);
573 ST_H4(d2, 0, 1, 2, 3, first_pixel - 1 + 4 * stride, stride);
576 void ff_put_no_rnd_pixels_l2_msa(uint8_t *dst, const uint8_t *src1,
577 const uint8_t *src2, ptrdiff_t stride, int h)
580 v16i8 d0, d1, d2, d3, d4, d5, d6, d7;
581 v16i8 c0, c1, c2, c3;
582 v4i32 a0, a1, a2, a3, b0, b1, b2, b3;
585 v4u32 t0, t1, t2, t3;
586 v16i8 mask = {0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23};
587 int32_t value = 0xfefefefe;
588 v4i32 fmask = {value, value, value, value};
590 LD_SB8(src1, stride, d0, d1, d2, d3, d4, d5, d6, d7);
591 VSHF_B2_SB(d0, d1, d2, d3, mask, mask, c0, c1);
592 VSHF_B2_SB(d4, d5, d6, d7, mask, mask, c2, c3);
593 a0 = (v4i32) __msa_pckev_d((v2i64)c1, (v2i64)c0);
594 a2 = (v4i32) __msa_pckod_d((v2i64)c1, (v2i64)c0);
595 a1 = (v4i32) __msa_pckev_d((v2i64)c3, (v2i64)c2);
596 a3 = (v4i32) __msa_pckod_d((v2i64)c3, (v2i64)c2);
598 LD_SB8(src2, stride, d0, d1, d2, d3, d4, d5, d6, d7);
599 VSHF_B2_SB(d0, d1, d2, d3, mask, mask, c0, c1);
600 VSHF_B2_SB(d4, d5, d6, d7, mask, mask, c2, c3);
601 b0 = (v4i32) __msa_pckev_d((v2i64)c1, (v2i64)c0);
602 b2 = (v4i32) __msa_pckod_d((v2i64)c1, (v2i64)c0);
603 b1 = (v4i32) __msa_pckev_d((v2i64)c3, (v2i64)c2);
604 b3 = (v4i32) __msa_pckod_d((v2i64)c3, (v2i64)c2);
606 e0 = (v4i32) __msa_xor_v((v16u8)a0, (v16u8)b0);
607 e0 = (v4i32) __msa_and_v((v16u8)e0, (v16u8)fmask);
608 t0 = ((v4u32)e0) >> 1;
609 e2 = (v4i32) __msa_and_v((v16u8)a0, (v16u8)b0);
612 e1 = (v4i32) __msa_xor_v((v16u8)a1, (v16u8)b1);
613 e1 = (v4i32) __msa_and_v((v16u8)e1, (v16u8)fmask);
614 t1 = ((v4u32)e1) >> 1;
615 e2 = (v4i32) __msa_and_v((v16u8)a1, (v16u8)b1);
618 f0 = (v4i32) __msa_xor_v((v16u8)a2, (v16u8)b2);
619 f0 = (v4i32) __msa_and_v((v16u8)f0, (v16u8)fmask);
620 t2 = ((v4u32)f0) >> 1;
621 f2 = (v4i32) __msa_and_v((v16u8)a2, (v16u8)b2);
624 f1 = (v4i32) __msa_xor_v((v16u8)a3, (v16u8)b3);
625 f1 = (v4i32) __msa_and_v((v16u8)f1, (v16u8)fmask);
626 t3 = ((v4u32)f1) >> 1;
627 f2 = (v4i32) __msa_and_v((v16u8)a3, (v16u8)b3);
630 ST_W8(t0, t1, 0, 1, 2, 3, 0, 1, 2, 3, dst, stride);
631 ST_W8(t2, t3, 0, 1, 2, 3, 0, 1, 2, 3, dst + 4, stride);
635 for (i = 0; i < h; i++) {
638 a = AV_RN32(&src1[i * stride]);
639 b = AV_RN32(&src2[i * stride]);
640 AV_WN32A(&dst[i * stride], no_rnd_avg32(a, b));
641 a = AV_RN32(&src1[i * stride + 4]);
642 b = AV_RN32(&src2[i * stride + 4]);
643 AV_WN32A(&dst[i * stride + 4], no_rnd_avg32(a, b));