2 * Copyright (c) 2015 Manojkumar Bhosale (Manojkumar.Bhosale@imgtec.com)
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libavutil/mips/generic_macros_msa.h"
22 #include "h264dsp_mips.h"
23 #include "libavcodec/bit_depth_template.c"
25 #define AVC_ITRANS_H(in0, in1, in2, in3, out0, out1, out2, out3) \
27 v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
32 tmp2_m = tmp2_m - in3; \
34 tmp3_m = in1 + tmp3_m; \
36 BUTTERFLY_4(tmp0_m, tmp1_m, tmp2_m, tmp3_m, out0, out1, out2, out3); \
39 static void avc_idct4x4_addblk_msa(uint8_t *dst, int16_t *src,
42 v8i16 src0, src1, src2, src3;
43 v8i16 hres0, hres1, hres2, hres3;
44 v8i16 vres0, vres1, vres2, vres3;
47 LD4x4_SH(src, src0, src1, src2, src3);
48 AVC_ITRANS_H(src0, src1, src2, src3, hres0, hres1, hres2, hres3);
49 TRANSPOSE4x4_SH_SH(hres0, hres1, hres2, hres3, hres0, hres1, hres2, hres3);
50 AVC_ITRANS_H(hres0, hres1, hres2, hres3, vres0, vres1, vres2, vres3);
51 SRARI_H4_SH(vres0, vres1, vres2, vres3, 6);
52 ADDBLK_ST4x4_UB(vres0, vres1, vres2, vres3, dst, dst_stride);
53 ST_SH2(zeros, zeros, src, 8);
56 static void avc_idct4x4_addblk_dc_msa(uint8_t *dst, int16_t *src,
60 uint32_t src0, src1, src2, src3;
63 v8i16 input_dc, pred_r, pred_l;
65 dc = (src[0] + 32) >> 6;
66 input_dc = __msa_fill_h(dc);
69 LW4(dst, dst_stride, src0, src1, src2, src3);
70 INSERT_W4_UB(src0, src1, src2, src3, pred);
71 UNPCK_UB_SH(pred, pred_r, pred_l);
76 CLIP_SH2_0_255(pred_r, pred_l);
77 out = __msa_pckev_b((v16i8) pred_l, (v16i8) pred_r);
78 ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride);
81 static void avc_deq_idct_luma_dc_msa(int16_t *dst, int16_t *src,
84 #define DC_DEST_STRIDE 16
85 int16_t out0, out1, out2, out3;
86 v8i16 src0, src1, src2, src3;
87 v8i16 vec0, vec1, vec2, vec3;
88 v8i16 hres0, hres1, hres2, hres3;
89 v8i16 vres0, vres1, vres2, vres3;
90 v4i32 vres0_r, vres1_r, vres2_r, vres3_r;
91 v4i32 de_q_vec = __msa_fill_w(de_q_val);
93 LD4x4_SH(src, src0, src1, src2, src3);
94 TRANSPOSE4x4_SH_SH(src0, src1, src2, src3, src0, src1, src2, src3);
95 BUTTERFLY_4(src0, src2, src3, src1, vec0, vec3, vec2, vec1);
96 BUTTERFLY_4(vec0, vec1, vec2, vec3, hres0, hres3, hres2, hres1);
97 TRANSPOSE4x4_SH_SH(hres0, hres1, hres2, hres3, hres0, hres1, hres2, hres3);
98 BUTTERFLY_4(hres0, hres1, hres3, hres2, vec0, vec3, vec2, vec1);
99 BUTTERFLY_4(vec0, vec1, vec2, vec3, vres0, vres1, vres2, vres3);
100 UNPCK_R_SH_SW(vres0, vres0_r);
101 UNPCK_R_SH_SW(vres1, vres1_r);
102 UNPCK_R_SH_SW(vres2, vres2_r);
103 UNPCK_R_SH_SW(vres3, vres3_r);
110 SRARI_W4_SW(vres0_r, vres1_r, vres2_r, vres3_r, 8);
111 PCKEV_H2_SH(vres1_r, vres0_r, vres3_r, vres2_r, vec0, vec1);
113 out0 = __msa_copy_s_h(vec0, 0);
114 out1 = __msa_copy_s_h(vec0, 1);
115 out2 = __msa_copy_s_h(vec0, 2);
116 out3 = __msa_copy_s_h(vec0, 3);
118 SH(out1, (dst + 2 * DC_DEST_STRIDE));
119 SH(out2, (dst + 8 * DC_DEST_STRIDE));
120 SH(out3, (dst + 10 * DC_DEST_STRIDE));
121 dst += DC_DEST_STRIDE;
123 out0 = __msa_copy_s_h(vec0, 4);
124 out1 = __msa_copy_s_h(vec0, 5);
125 out2 = __msa_copy_s_h(vec0, 6);
126 out3 = __msa_copy_s_h(vec0, 7);
128 SH(out1, (dst + 2 * DC_DEST_STRIDE));
129 SH(out2, (dst + 8 * DC_DEST_STRIDE));
130 SH(out3, (dst + 10 * DC_DEST_STRIDE));
131 dst += (3 * DC_DEST_STRIDE);
133 out0 = __msa_copy_s_h(vec1, 0);
134 out1 = __msa_copy_s_h(vec1, 1);
135 out2 = __msa_copy_s_h(vec1, 2);
136 out3 = __msa_copy_s_h(vec1, 3);
138 SH(out1, (dst + 2 * DC_DEST_STRIDE));
139 SH(out2, (dst + 8 * DC_DEST_STRIDE));
140 SH(out3, (dst + 10 * DC_DEST_STRIDE));
141 dst += DC_DEST_STRIDE;
143 out0 = __msa_copy_s_h(vec1, 4);
144 out1 = __msa_copy_s_h(vec1, 5);
145 out2 = __msa_copy_s_h(vec1, 6);
146 out3 = __msa_copy_s_h(vec1, 7);
148 SH(out1, (dst + 2 * DC_DEST_STRIDE));
149 SH(out2, (dst + 8 * DC_DEST_STRIDE));
150 SH(out3, (dst + 10 * DC_DEST_STRIDE));
152 #undef DC_DEST_STRIDE
155 static void avc_idct8_addblk_msa(uint8_t *dst, int16_t *src, int32_t dst_stride)
157 v8i16 src0, src1, src2, src3, src4, src5, src6, src7;
158 v8i16 vec0, vec1, vec2, vec3;
159 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
160 v8i16 res0, res1, res2, res3, res4, res5, res6, res7;
161 v4i32 tmp0_r, tmp1_r, tmp2_r, tmp3_r, tmp4_r, tmp5_r, tmp6_r, tmp7_r;
162 v4i32 tmp0_l, tmp1_l, tmp2_l, tmp3_l, tmp4_l, tmp5_l, tmp6_l, tmp7_l;
163 v4i32 vec0_r, vec1_r, vec2_r, vec3_r, vec0_l, vec1_l, vec2_l, vec3_l;
164 v4i32 res0_r, res1_r, res2_r, res3_r, res4_r, res5_r, res6_r, res7_r;
165 v4i32 res0_l, res1_l, res2_l, res3_l, res4_l, res5_l, res6_l, res7_l;
166 v16i8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
171 LD_SH8(src, 8, src0, src1, src2, src3, src4, src5, src6, src7);
180 BUTTERFLY_4(vec0, vec1, vec2, vec3, tmp0, tmp1, tmp2, tmp3);
183 vec0 = src5 - vec0 - src3 - src7;
185 vec1 = src1 - vec1 + src7 - src3;
187 vec2 = vec2 - src1 + src7 + src5;
189 vec3 = vec3 + src3 + src5 + src1;
199 BUTTERFLY_8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
200 res0, res1, res2, res3, res4, res5, res6, res7);
201 TRANSPOSE8x8_SH_SH(res0, res1, res2, res3, res4, res5, res6, res7,
202 res0, res1, res2, res3, res4, res5, res6, res7);
203 UNPCK_SH_SW(res0, tmp0_r, tmp0_l);
204 UNPCK_SH_SW(res1, tmp1_r, tmp1_l);
205 UNPCK_SH_SW(res2, tmp2_r, tmp2_l);
206 UNPCK_SH_SW(res3, tmp3_r, tmp3_l);
207 UNPCK_SH_SW(res4, tmp4_r, tmp4_l);
208 UNPCK_SH_SW(res5, tmp5_r, tmp5_l);
209 UNPCK_SH_SW(res6, tmp6_r, tmp6_l);
210 UNPCK_SH_SW(res7, tmp7_r, tmp7_l);
211 BUTTERFLY_4(tmp0_r, tmp0_l, tmp4_l, tmp4_r, vec0_r, vec0_l, vec1_l, vec1_r);
213 vec2_r = tmp2_r >> 1;
214 vec2_l = tmp2_l >> 1;
217 vec3_r = tmp6_r >> 1;
218 vec3_l = tmp6_l >> 1;
222 BUTTERFLY_4(vec0_r, vec1_r, vec2_r, vec3_r, tmp0_r, tmp2_r, tmp4_r, tmp6_r);
223 BUTTERFLY_4(vec0_l, vec1_l, vec2_l, vec3_l, tmp0_l, tmp2_l, tmp4_l, tmp6_l);
225 vec0_r = tmp7_r >> 1;
226 vec0_l = tmp7_l >> 1;
227 vec0_r = tmp5_r - vec0_r - tmp3_r - tmp7_r;
228 vec0_l = tmp5_l - vec0_l - tmp3_l - tmp7_l;
229 vec1_r = tmp3_r >> 1;
230 vec1_l = tmp3_l >> 1;
231 vec1_r = tmp1_r - vec1_r + tmp7_r - tmp3_r;
232 vec1_l = tmp1_l - vec1_l + tmp7_l - tmp3_l;
233 vec2_r = tmp5_r >> 1;
234 vec2_l = tmp5_l >> 1;
235 vec2_r = vec2_r - tmp1_r + tmp7_r + tmp5_r;
236 vec2_l = vec2_l - tmp1_l + tmp7_l + tmp5_l;
237 vec3_r = tmp1_r >> 1;
238 vec3_l = tmp1_l >> 1;
239 vec3_r = vec3_r + tmp3_r + tmp5_r + tmp1_r;
240 vec3_l = vec3_l + tmp3_l + tmp5_l + tmp1_l;
241 tmp1_r = vec3_r >> 2;
242 tmp1_l = vec3_l >> 2;
245 tmp3_r = vec2_r >> 2;
246 tmp3_l = vec2_l >> 2;
249 tmp5_r = vec1_r >> 2;
250 tmp5_l = vec1_l >> 2;
253 tmp7_r = vec0_r >> 2;
254 tmp7_l = vec0_l >> 2;
255 tmp7_r = vec3_r - tmp7_r;
256 tmp7_l = vec3_l - tmp7_l;
258 BUTTERFLY_4(tmp0_r, tmp0_l, tmp7_l, tmp7_r, res0_r, res0_l, res7_l, res7_r);
259 BUTTERFLY_4(tmp2_r, tmp2_l, tmp5_l, tmp5_r, res1_r, res1_l, res6_l, res6_r);
260 BUTTERFLY_4(tmp4_r, tmp4_l, tmp3_l, tmp3_r, res2_r, res2_l, res5_l, res5_r);
261 BUTTERFLY_4(tmp6_r, tmp6_l, tmp1_l, tmp1_r, res3_r, res3_l, res4_l, res4_r);
262 SRA_4V(res0_r, res0_l, res1_r, res1_l, 6);
263 SRA_4V(res2_r, res2_l, res3_r, res3_l, 6);
264 SRA_4V(res4_r, res4_l, res5_r, res5_l, 6);
265 SRA_4V(res6_r, res6_l, res7_r, res7_l, 6);
266 PCKEV_H4_SH(res0_l, res0_r, res1_l, res1_r, res2_l, res2_r, res3_l, res3_r,
267 res0, res1, res2, res3);
268 PCKEV_H4_SH(res4_l, res4_r, res5_l, res5_r, res6_l, res6_r, res7_l, res7_r,
269 res4, res5, res6, res7);
270 LD_SB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
271 ILVR_B4_SH(zeros, dst0, zeros, dst1, zeros, dst2, zeros, dst3,
272 tmp0, tmp1, tmp2, tmp3);
273 ILVR_B4_SH(zeros, dst4, zeros, dst5, zeros, dst6, zeros, dst7,
274 tmp4, tmp5, tmp6, tmp7);
275 ADD4(res0, tmp0, res1, tmp1, res2, tmp2, res3, tmp3,
276 res0, res1, res2, res3);
277 ADD4(res4, tmp4, res5, tmp5, res6, tmp6, res7, tmp7,
278 res4, res5, res6, res7);
279 CLIP_SH4_0_255(res0, res1, res2, res3);
280 CLIP_SH4_0_255(res4, res5, res6, res7);
281 PCKEV_B4_SB(res1, res0, res3, res2, res5, res4, res7, res6,
282 dst0, dst1, dst2, dst3);
283 ST8x4_UB(dst0, dst1, dst, dst_stride);
284 dst += (4 * dst_stride);
285 ST8x4_UB(dst2, dst3, dst, dst_stride);
288 static void avc_idct8_dc_addblk_msa(uint8_t *dst, int16_t *src,
292 v16i8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
293 v8i16 dst0_r, dst1_r, dst2_r, dst3_r, dst4_r, dst5_r, dst6_r, dst7_r;
297 dc_val = (src[0] + 32) >> 6;
298 dc = __msa_fill_h(dc_val);
302 LD_SB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
303 ILVR_B4_SH(zeros, dst0, zeros, dst1, zeros, dst2, zeros, dst3,
304 dst0_r, dst1_r, dst2_r, dst3_r);
305 ILVR_B4_SH(zeros, dst4, zeros, dst5, zeros, dst6, zeros, dst7,
306 dst4_r, dst5_r, dst6_r, dst7_r);
307 ADD4(dst0_r, dc, dst1_r, dc, dst2_r, dc, dst3_r, dc,
308 dst0_r, dst1_r, dst2_r, dst3_r);
309 ADD4(dst4_r, dc, dst5_r, dc, dst6_r, dc, dst7_r, dc,
310 dst4_r, dst5_r, dst6_r, dst7_r);
311 CLIP_SH4_0_255(dst0_r, dst1_r, dst2_r, dst3_r);
312 CLIP_SH4_0_255(dst4_r, dst5_r, dst6_r, dst7_r);
313 PCKEV_B4_SB(dst1_r, dst0_r, dst3_r, dst2_r, dst5_r, dst4_r, dst7_r, dst6_r,
314 dst0, dst1, dst2, dst3);
315 ST8x4_UB(dst0, dst1, dst, dst_stride);
316 dst += (4 * dst_stride);
317 ST8x4_UB(dst2, dst3, dst, dst_stride);
320 void ff_h264_idct_add_msa(uint8_t *dst, int16_t *src,
323 avc_idct4x4_addblk_msa(dst, src, dst_stride);
324 memset(src, 0, 16 * sizeof(dctcoef));
327 void ff_h264_idct8_addblk_msa(uint8_t *dst, int16_t *src,
330 avc_idct8_addblk_msa(dst, src, dst_stride);
331 memset(src, 0, 64 * sizeof(dctcoef));
334 void ff_h264_idct4x4_addblk_dc_msa(uint8_t *dst, int16_t *src,
337 avc_idct4x4_addblk_dc_msa(dst, src, dst_stride);
340 void ff_h264_idct8_dc_addblk_msa(uint8_t *dst, int16_t *src,
343 avc_idct8_dc_addblk_msa(dst, src, dst_stride);
346 void ff_h264_idct_add16_msa(uint8_t *dst,
347 const int32_t *blk_offset,
348 int16_t *block, int32_t dst_stride,
349 const uint8_t nzc[15 * 8])
353 for (i = 0; i < 16; i++) {
354 int32_t nnz = nzc[scan8[i]];
357 if (nnz == 1 && ((dctcoef *) block)[i * 16])
358 ff_h264_idct4x4_addblk_dc_msa(dst + blk_offset[i],
359 block + i * 16 * sizeof(pixel),
362 ff_h264_idct_add_msa(dst + blk_offset[i],
363 block + i * 16 * sizeof(pixel),
369 void ff_h264_idct8_add4_msa(uint8_t *dst, const int32_t *blk_offset,
370 int16_t *block, int32_t dst_stride,
371 const uint8_t nzc[15 * 8])
375 for (cnt = 0; cnt < 16; cnt += 4) {
376 int32_t nnz = nzc[scan8[cnt]];
379 if (nnz == 1 && ((dctcoef *) block)[cnt * 16])
380 ff_h264_idct8_dc_addblk_msa(dst + blk_offset[cnt],
381 block + cnt * 16 * sizeof(pixel),
384 ff_h264_idct8_addblk_msa(dst + blk_offset[cnt],
385 block + cnt * 16 * sizeof(pixel),
391 void ff_h264_idct_add8_msa(uint8_t **dst,
392 const int32_t *blk_offset,
393 int16_t *block, int32_t dst_stride,
394 const uint8_t nzc[15 * 8])
398 for (j = 1; j < 3; j++) {
399 for (i = (j * 16); i < (j * 16 + 4); i++) {
401 ff_h264_idct_add_msa(dst[j - 1] + blk_offset[i],
402 block + i * 16 * sizeof(pixel),
404 else if (((dctcoef *) block)[i * 16])
405 ff_h264_idct4x4_addblk_dc_msa(dst[j - 1] + blk_offset[i],
406 block + i * 16 * sizeof(pixel),
412 void ff_h264_idct_add8_422_msa(uint8_t **dst,
413 const int32_t *blk_offset,
414 int16_t *block, int32_t dst_stride,
415 const uint8_t nzc[15 * 8])
419 for (j = 1; j < 3; j++) {
420 for (i = (j * 16); i < (j * 16 + 4); i++) {
422 ff_h264_idct_add_msa(dst[j - 1] + blk_offset[i],
423 block + i * 16 * sizeof(pixel),
425 else if (((dctcoef *) block)[i * 16])
426 ff_h264_idct4x4_addblk_dc_msa(dst[j - 1] + blk_offset[i],
427 block + i * 16 * sizeof(pixel),
432 for (j = 1; j < 3; j++) {
433 for (i = (j * 16 + 4); i < (j * 16 + 8); i++) {
434 if (nzc[scan8[i + 4]])
435 ff_h264_idct_add_msa(dst[j - 1] + blk_offset[i + 4],
436 block + i * 16 * sizeof(pixel),
438 else if (((dctcoef *) block)[i * 16])
439 ff_h264_idct4x4_addblk_dc_msa(dst[j - 1] + blk_offset[i + 4],
440 block + i * 16 * sizeof(pixel),
446 void ff_h264_idct_add16_intra_msa(uint8_t *dst,
447 const int32_t *blk_offset,
450 const uint8_t nzc[15 * 8])
454 for (i = 0; i < 16; i++) {
456 ff_h264_idct_add_msa(dst + blk_offset[i],
457 block + i * 16 * sizeof(pixel), dst_stride);
458 else if (((dctcoef *) block)[i * 16])
459 ff_h264_idct4x4_addblk_dc_msa(dst + blk_offset[i],
460 block + i * 16 * sizeof(pixel),
465 void ff_h264_deq_idct_luma_dc_msa(int16_t *dst, int16_t *src,
468 avc_deq_idct_luma_dc_msa(dst, src, de_qval);