2 * Copyright (c) 2015 - 2017 Manojkumar Bhosale (Manojkumar.Bhosale@imgtec.com)
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libavutil/mips/generic_macros_msa.h"
22 #include "h264dsp_mips.h"
23 #include "libavcodec/bit_depth_template.c"
25 #define AVC_ITRANS_H(in0, in1, in2, in3, out0, out1, out2, out3) \
27 v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
32 tmp2_m = tmp2_m - in3; \
34 tmp3_m = in1 + tmp3_m; \
36 BUTTERFLY_4(tmp0_m, tmp1_m, tmp2_m, tmp3_m, out0, out1, out2, out3); \
39 static void avc_deq_idct_luma_dc_msa(int16_t *dst, int16_t *src,
42 #define DC_DEST_STRIDE 16
43 int16_t out0, out1, out2, out3, out4, out5, out6, out7;
45 v8i16 vec0, vec1, vec2, vec3;
46 v8i16 tmp0, tmp1, tmp2, tmp3;
47 v8i16 hres0, hres1, hres2, hres3;
48 v8i16 vres0, vres1, vres2, vres3;
49 v4i32 vres0_r, vres1_r, vres2_r, vres3_r;
50 const v4i32 de_q_vec = __msa_fill_w(de_q_val);
51 const v8i16 src0 = LD_SH(src);
52 const v8i16 src2 = LD_SH(src + 8);
54 ILVL_D2_SH(src0, src0, src2, src2, src1, src3);
55 TRANSPOSE4x4_SH_SH(src0, src1, src2, src3, tmp0, tmp1, tmp2, tmp3);
56 BUTTERFLY_4(tmp0, tmp2, tmp3, tmp1, vec0, vec3, vec2, vec1);
57 BUTTERFLY_4(vec0, vec1, vec2, vec3, hres0, hres3, hres2, hres1);
58 TRANSPOSE4x4_SH_SH(hres0, hres1, hres2, hres3, hres0, hres1, hres2, hres3);
59 BUTTERFLY_4(hres0, hres1, hres3, hres2, vec0, vec3, vec2, vec1);
60 BUTTERFLY_4(vec0, vec1, vec2, vec3, vres0, vres1, vres2, vres3);
61 UNPCK_R_SH_SW(vres0, vres0_r);
62 UNPCK_R_SH_SW(vres1, vres1_r);
63 UNPCK_R_SH_SW(vres2, vres2_r);
64 UNPCK_R_SH_SW(vres3, vres3_r);
71 SRARI_W4_SW(vres0_r, vres1_r, vres2_r, vres3_r, 8);
72 PCKEV_H2_SH(vres1_r, vres0_r, vres3_r, vres2_r, vec0, vec1);
74 out0 = __msa_copy_s_h(vec0, 0);
75 out1 = __msa_copy_s_h(vec0, 1);
76 out2 = __msa_copy_s_h(vec0, 2);
77 out3 = __msa_copy_s_h(vec0, 3);
78 out4 = __msa_copy_s_h(vec0, 4);
79 out5 = __msa_copy_s_h(vec0, 5);
80 out6 = __msa_copy_s_h(vec0, 6);
81 out7 = __msa_copy_s_h(vec0, 7);
82 SH(out0, (dst + 0 * DC_DEST_STRIDE));
83 SH(out1, (dst + 2 * DC_DEST_STRIDE));
84 SH(out2, (dst + 8 * DC_DEST_STRIDE));
85 SH(out3, (dst + 10 * DC_DEST_STRIDE));
86 SH(out4, (dst + 1 * DC_DEST_STRIDE));
87 SH(out5, (dst + 3 * DC_DEST_STRIDE));
88 SH(out6, (dst + 9 * DC_DEST_STRIDE));
89 SH(out7, (dst + 11 * DC_DEST_STRIDE));
91 out0 = __msa_copy_s_h(vec1, 0);
92 out1 = __msa_copy_s_h(vec1, 1);
93 out2 = __msa_copy_s_h(vec1, 2);
94 out3 = __msa_copy_s_h(vec1, 3);
95 out4 = __msa_copy_s_h(vec1, 4);
96 out5 = __msa_copy_s_h(vec1, 5);
97 out6 = __msa_copy_s_h(vec1, 6);
98 out7 = __msa_copy_s_h(vec1, 7);
99 SH(out0, (dst + 4 * DC_DEST_STRIDE));
100 SH(out1, (dst + 6 * DC_DEST_STRIDE));
101 SH(out2, (dst + 12 * DC_DEST_STRIDE));
102 SH(out3, (dst + 14 * DC_DEST_STRIDE));
103 SH(out4, (dst + 5 * DC_DEST_STRIDE));
104 SH(out5, (dst + 7 * DC_DEST_STRIDE));
105 SH(out6, (dst + 13 * DC_DEST_STRIDE));
106 SH(out7, (dst + 15 * DC_DEST_STRIDE));
108 #undef DC_DEST_STRIDE
111 static void avc_idct8_addblk_msa(uint8_t *dst, int16_t *src, int32_t dst_stride)
113 v8i16 src0, src1, src2, src3, src4, src5, src6, src7;
114 v8i16 vec0, vec1, vec2, vec3;
115 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
116 v8i16 res0, res1, res2, res3, res4, res5, res6, res7;
117 v4i32 tmp0_r, tmp1_r, tmp2_r, tmp3_r, tmp4_r, tmp5_r, tmp6_r, tmp7_r;
118 v4i32 tmp0_l, tmp1_l, tmp2_l, tmp3_l, tmp4_l, tmp5_l, tmp6_l, tmp7_l;
119 v4i32 vec0_r, vec1_r, vec2_r, vec3_r, vec0_l, vec1_l, vec2_l, vec3_l;
120 v4i32 res0_r, res1_r, res2_r, res3_r, res4_r, res5_r, res6_r, res7_r;
121 v4i32 res0_l, res1_l, res2_l, res3_l, res4_l, res5_l, res6_l, res7_l;
122 v16i8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
127 LD_SH8(src, 8, src0, src1, src2, src3, src4, src5, src6, src7);
128 ST_SH8(zeros, zeros, zeros, zeros, zeros, zeros, zeros, zeros, src, 8);
137 BUTTERFLY_4(vec0, vec1, vec2, vec3, tmp0, tmp1, tmp2, tmp3);
140 vec0 = src5 - vec0 - src3 - src7;
142 vec1 = src1 - vec1 + src7 - src3;
144 vec2 = vec2 - src1 + src7 + src5;
146 vec3 = vec3 + src3 + src5 + src1;
156 BUTTERFLY_8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
157 res0, res1, res2, res3, res4, res5, res6, res7);
158 TRANSPOSE8x8_SH_SH(res0, res1, res2, res3, res4, res5, res6, res7,
159 res0, res1, res2, res3, res4, res5, res6, res7);
160 UNPCK_SH_SW(res0, tmp0_r, tmp0_l);
161 UNPCK_SH_SW(res1, tmp1_r, tmp1_l);
162 UNPCK_SH_SW(res2, tmp2_r, tmp2_l);
163 UNPCK_SH_SW(res3, tmp3_r, tmp3_l);
164 UNPCK_SH_SW(res4, tmp4_r, tmp4_l);
165 UNPCK_SH_SW(res5, tmp5_r, tmp5_l);
166 UNPCK_SH_SW(res6, tmp6_r, tmp6_l);
167 UNPCK_SH_SW(res7, tmp7_r, tmp7_l);
168 BUTTERFLY_4(tmp0_r, tmp0_l, tmp4_l, tmp4_r, vec0_r, vec0_l, vec1_l, vec1_r);
170 vec2_r = tmp2_r >> 1;
171 vec2_l = tmp2_l >> 1;
174 vec3_r = tmp6_r >> 1;
175 vec3_l = tmp6_l >> 1;
179 BUTTERFLY_4(vec0_r, vec1_r, vec2_r, vec3_r, tmp0_r, tmp2_r, tmp4_r, tmp6_r);
180 BUTTERFLY_4(vec0_l, vec1_l, vec2_l, vec3_l, tmp0_l, tmp2_l, tmp4_l, tmp6_l);
182 vec0_r = tmp7_r >> 1;
183 vec0_l = tmp7_l >> 1;
184 vec0_r = tmp5_r - vec0_r - tmp3_r - tmp7_r;
185 vec0_l = tmp5_l - vec0_l - tmp3_l - tmp7_l;
186 vec1_r = tmp3_r >> 1;
187 vec1_l = tmp3_l >> 1;
188 vec1_r = tmp1_r - vec1_r + tmp7_r - tmp3_r;
189 vec1_l = tmp1_l - vec1_l + tmp7_l - tmp3_l;
190 vec2_r = tmp5_r >> 1;
191 vec2_l = tmp5_l >> 1;
192 vec2_r = vec2_r - tmp1_r + tmp7_r + tmp5_r;
193 vec2_l = vec2_l - tmp1_l + tmp7_l + tmp5_l;
194 vec3_r = tmp1_r >> 1;
195 vec3_l = tmp1_l >> 1;
196 vec3_r = vec3_r + tmp3_r + tmp5_r + tmp1_r;
197 vec3_l = vec3_l + tmp3_l + tmp5_l + tmp1_l;
198 tmp1_r = vec3_r >> 2;
199 tmp1_l = vec3_l >> 2;
202 tmp3_r = vec2_r >> 2;
203 tmp3_l = vec2_l >> 2;
206 tmp5_r = vec1_r >> 2;
207 tmp5_l = vec1_l >> 2;
210 tmp7_r = vec0_r >> 2;
211 tmp7_l = vec0_l >> 2;
212 tmp7_r = vec3_r - tmp7_r;
213 tmp7_l = vec3_l - tmp7_l;
215 BUTTERFLY_4(tmp0_r, tmp0_l, tmp7_l, tmp7_r, res0_r, res0_l, res7_l, res7_r);
216 BUTTERFLY_4(tmp2_r, tmp2_l, tmp5_l, tmp5_r, res1_r, res1_l, res6_l, res6_r);
217 BUTTERFLY_4(tmp4_r, tmp4_l, tmp3_l, tmp3_r, res2_r, res2_l, res5_l, res5_r);
218 BUTTERFLY_4(tmp6_r, tmp6_l, tmp1_l, tmp1_r, res3_r, res3_l, res4_l, res4_r);
219 SRA_4V(res0_r, res0_l, res1_r, res1_l, 6);
220 SRA_4V(res2_r, res2_l, res3_r, res3_l, 6);
221 SRA_4V(res4_r, res4_l, res5_r, res5_l, 6);
222 SRA_4V(res6_r, res6_l, res7_r, res7_l, 6);
223 PCKEV_H4_SH(res0_l, res0_r, res1_l, res1_r, res2_l, res2_r, res3_l, res3_r,
224 res0, res1, res2, res3);
225 PCKEV_H4_SH(res4_l, res4_r, res5_l, res5_r, res6_l, res6_r, res7_l, res7_r,
226 res4, res5, res6, res7);
227 LD_SB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
228 ILVR_B4_SH(zeros, dst0, zeros, dst1, zeros, dst2, zeros, dst3,
229 tmp0, tmp1, tmp2, tmp3);
230 ILVR_B4_SH(zeros, dst4, zeros, dst5, zeros, dst6, zeros, dst7,
231 tmp4, tmp5, tmp6, tmp7);
232 ADD4(res0, tmp0, res1, tmp1, res2, tmp2, res3, tmp3,
233 res0, res1, res2, res3);
234 ADD4(res4, tmp4, res5, tmp5, res6, tmp6, res7, tmp7,
235 res4, res5, res6, res7);
236 CLIP_SH4_0_255(res0, res1, res2, res3);
237 CLIP_SH4_0_255(res4, res5, res6, res7);
238 PCKEV_B4_SB(res1, res0, res3, res2, res5, res4, res7, res6,
239 dst0, dst1, dst2, dst3);
240 ST8x4_UB(dst0, dst1, dst, dst_stride);
241 dst += (4 * dst_stride);
242 ST8x4_UB(dst2, dst3, dst, dst_stride);
245 static void avc_idct8_dc_addblk_msa(uint8_t *dst, int16_t *src,
249 v16i8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
250 v8i16 dst0_r, dst1_r, dst2_r, dst3_r, dst4_r, dst5_r, dst6_r, dst7_r;
254 dc_val = (src[0] + 32) >> 6;
255 dc = __msa_fill_h(dc_val);
259 LD_SB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
260 ILVR_B4_SH(zeros, dst0, zeros, dst1, zeros, dst2, zeros, dst3,
261 dst0_r, dst1_r, dst2_r, dst3_r);
262 ILVR_B4_SH(zeros, dst4, zeros, dst5, zeros, dst6, zeros, dst7,
263 dst4_r, dst5_r, dst6_r, dst7_r);
264 ADD4(dst0_r, dc, dst1_r, dc, dst2_r, dc, dst3_r, dc,
265 dst0_r, dst1_r, dst2_r, dst3_r);
266 ADD4(dst4_r, dc, dst5_r, dc, dst6_r, dc, dst7_r, dc,
267 dst4_r, dst5_r, dst6_r, dst7_r);
268 CLIP_SH4_0_255(dst0_r, dst1_r, dst2_r, dst3_r);
269 CLIP_SH4_0_255(dst4_r, dst5_r, dst6_r, dst7_r);
270 PCKEV_B4_SB(dst1_r, dst0_r, dst3_r, dst2_r, dst5_r, dst4_r, dst7_r, dst6_r,
271 dst0, dst1, dst2, dst3);
272 ST8x4_UB(dst0, dst1, dst, dst_stride);
273 dst += (4 * dst_stride);
274 ST8x4_UB(dst2, dst3, dst, dst_stride);
277 void ff_h264_idct_add_msa(uint8_t *dst, int16_t *src, int32_t dst_stride)
279 uint32_t src0_m, src1_m, src2_m, src3_m, out0_m, out1_m, out2_m, out3_m;
280 v16i8 dst0_m = { 0 };
281 v16i8 dst1_m = { 0 };
282 v8i16 hres0, hres1, hres2, hres3, vres0, vres1, vres2, vres3;
283 v8i16 inp0_m, inp1_m, res0_m, res1_m, src1, src3;
284 const v8i16 src0 = LD_SH(src);
285 const v8i16 src2 = LD_SH(src + 8);
286 const v8i16 zero = { 0 };
287 const uint8_t *dst1 = dst + dst_stride;
288 const uint8_t *dst2 = dst + 2 * dst_stride;
289 const uint8_t *dst3 = dst + 3 * dst_stride;
291 ILVL_D2_SH(src0, src0, src2, src2, src1, src3);
292 ST_SH2(zero, zero, src, 8);
293 AVC_ITRANS_H(src0, src1, src2, src3, hres0, hres1, hres2, hres3);
294 TRANSPOSE4x4_SH_SH(hres0, hres1, hres2, hres3, hres0, hres1, hres2, hres3);
295 AVC_ITRANS_H(hres0, hres1, hres2, hres3, vres0, vres1, vres2, vres3);
298 SRARI_H4_SH(vres0, vres1, vres2, vres3, 6);
301 ILVR_D2_SH(vres1, vres0, vres3, vres2, inp0_m, inp1_m);
302 INSERT_W2_SB(src0_m, src1_m, dst0_m);
303 INSERT_W2_SB(src2_m, src3_m, dst1_m);
304 ILVR_B2_SH(zero, dst0_m, zero, dst1_m, res0_m, res1_m);
305 ADD2(res0_m, inp0_m, res1_m, inp1_m, res0_m, res1_m);
306 CLIP_SH2_0_255(res0_m, res1_m);
307 PCKEV_B2_SB(res0_m, res0_m, res1_m, res1_m, dst0_m, dst1_m);
308 out0_m = __msa_copy_u_w((v4i32) dst0_m, 0);
309 out1_m = __msa_copy_u_w((v4i32) dst0_m, 1);
310 out2_m = __msa_copy_u_w((v4i32) dst1_m, 0);
311 out3_m = __msa_copy_u_w((v4i32) dst1_m, 1);
318 void ff_h264_idct8_addblk_msa(uint8_t *dst, int16_t *src,
321 avc_idct8_addblk_msa(dst, src, dst_stride);
324 void ff_h264_idct4x4_addblk_dc_msa(uint8_t *dst, int16_t *src,
329 v8i16 pred_r, pred_l;
330 const uint32_t src0 = LW(dst);
331 const uint32_t src1 = LW(dst + dst_stride);
332 const uint32_t src2 = LW(dst + 2 * dst_stride);
333 const uint32_t src3 = LW(dst + 3 * dst_stride);
334 const int16_t dc = (src[0] + 32) >> 6;
335 const v8i16 input_dc = __msa_fill_h(dc);
338 INSERT_W4_UB(src0, src1, src2, src3, pred);
339 UNPCK_UB_SH(pred, pred_r, pred_l);
340 ADD2(pred_r, input_dc, pred_l, input_dc, pred_r, pred_l);
341 CLIP_SH2_0_255(pred_r, pred_l);
342 out = __msa_pckev_b((v16i8) pred_l, (v16i8) pred_r);
343 ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
346 void ff_h264_idct8_dc_addblk_msa(uint8_t *dst, int16_t *src,
349 avc_idct8_dc_addblk_msa(dst, src, dst_stride);
352 void ff_h264_idct_add16_msa(uint8_t *dst,
353 const int32_t *blk_offset,
354 int16_t *block, int32_t dst_stride,
355 const uint8_t nzc[15 * 8])
359 for (i = 0; i < 16; i++) {
360 int32_t nnz = nzc[scan8[i]];
363 if (nnz == 1 && ((dctcoef *) block)[i * 16])
364 ff_h264_idct4x4_addblk_dc_msa(dst + blk_offset[i],
365 block + i * 16 * sizeof(pixel),
368 ff_h264_idct_add_msa(dst + blk_offset[i],
369 block + i * 16 * sizeof(pixel),
375 void ff_h264_idct8_add4_msa(uint8_t *dst, const int32_t *blk_offset,
376 int16_t *block, int32_t dst_stride,
377 const uint8_t nzc[15 * 8])
381 for (cnt = 0; cnt < 16; cnt += 4) {
382 int32_t nnz = nzc[scan8[cnt]];
385 if (nnz == 1 && ((dctcoef *) block)[cnt * 16])
386 ff_h264_idct8_dc_addblk_msa(dst + blk_offset[cnt],
387 block + cnt * 16 * sizeof(pixel),
390 ff_h264_idct8_addblk_msa(dst + blk_offset[cnt],
391 block + cnt * 16 * sizeof(pixel),
397 void ff_h264_idct_add8_msa(uint8_t **dst,
398 const int32_t *blk_offset,
399 int16_t *block, int32_t dst_stride,
400 const uint8_t nzc[15 * 8])
404 for (j = 1; j < 3; j++) {
405 for (i = (j * 16); i < (j * 16 + 4); i++) {
407 ff_h264_idct_add_msa(dst[j - 1] + blk_offset[i],
408 block + i * 16 * sizeof(pixel),
410 else if (((dctcoef *) block)[i * 16])
411 ff_h264_idct4x4_addblk_dc_msa(dst[j - 1] + blk_offset[i],
412 block + i * 16 * sizeof(pixel),
418 void ff_h264_idct_add8_422_msa(uint8_t **dst,
419 const int32_t *blk_offset,
420 int16_t *block, int32_t dst_stride,
421 const uint8_t nzc[15 * 8])
425 for (j = 1; j < 3; j++) {
426 for (i = (j * 16); i < (j * 16 + 4); i++) {
428 ff_h264_idct_add_msa(dst[j - 1] + blk_offset[i],
429 block + i * 16 * sizeof(pixel),
431 else if (((dctcoef *) block)[i * 16])
432 ff_h264_idct4x4_addblk_dc_msa(dst[j - 1] + blk_offset[i],
433 block + i * 16 * sizeof(pixel),
438 for (j = 1; j < 3; j++) {
439 for (i = (j * 16 + 4); i < (j * 16 + 8); i++) {
440 if (nzc[scan8[i + 4]])
441 ff_h264_idct_add_msa(dst[j - 1] + blk_offset[i + 4],
442 block + i * 16 * sizeof(pixel),
444 else if (((dctcoef *) block)[i * 16])
445 ff_h264_idct4x4_addblk_dc_msa(dst[j - 1] + blk_offset[i + 4],
446 block + i * 16 * sizeof(pixel),
452 void ff_h264_idct_add16_intra_msa(uint8_t *dst,
453 const int32_t *blk_offset,
456 const uint8_t nzc[15 * 8])
460 for (i = 0; i < 16; i++) {
462 ff_h264_idct_add_msa(dst + blk_offset[i],
463 block + i * 16 * sizeof(pixel), dst_stride);
464 else if (((dctcoef *) block)[i * 16])
465 ff_h264_idct4x4_addblk_dc_msa(dst + blk_offset[i],
466 block + i * 16 * sizeof(pixel),
471 void ff_h264_deq_idct_luma_dc_msa(int16_t *dst, int16_t *src,
474 avc_deq_idct_luma_dc_msa(dst, src, de_qval);