1 /*****************************************************************************
2 * dct-c.c: msa transform and zigzag
3 *****************************************************************************
4 * Copyright (C) 2015 x264 project
6 * Authors: Rishikesh More <rishikesh.more@imgtec.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
22 * This program is also available under a commercial proprietary license.
23 * For more information, contact us at licensing@x264.com.
24 *****************************************************************************/
26 #include "common/common.h"
30 #define AVC_ITRANS_H( in0, in1, in2, in3, out0, out1, out2, out3 ) \
32 v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
37 tmp2_m = tmp2_m - in3; \
39 tmp3_m = in1 + tmp3_m; \
41 BUTTERFLY_4( tmp0_m, tmp1_m, tmp2_m, tmp3_m, out0, out1, out2, out3 ); \
44 static void avc_dct4x4dc_msa( int16_t *p_src, int16_t *p_dst,
45 int32_t i_src_stride )
47 v8i16 src0, src1, src2, src3, ver_res0, ver_res1, ver_res2, ver_res3;
48 v4i32 src0_r, src1_r, src2_r, src3_r, tmp0, tmp1, tmp2, tmp3;
49 v4i32 hor_res0, hor_res1, hor_res2, hor_res3;
50 v4i32 ver_res0_r, ver_res1_r, ver_res2_r, ver_res3_r;
52 LD_SH4( p_src, i_src_stride, src0, src1, src2, src3 );
53 UNPCK_R_SH_SW( src0, src0_r );
54 UNPCK_R_SH_SW( src1, src1_r );
55 UNPCK_R_SH_SW( src2, src2_r );
56 UNPCK_R_SH_SW( src3, src3_r );
57 BUTTERFLY_4( src0_r, src2_r, src3_r, src1_r,
58 tmp0, tmp3, tmp2, tmp1 );
59 BUTTERFLY_4( tmp0, tmp1, tmp2, tmp3,
60 hor_res0, hor_res3, hor_res2, hor_res1 );
61 TRANSPOSE4x4_SW_SW( hor_res0, hor_res1, hor_res2, hor_res3,
62 hor_res0, hor_res1, hor_res2, hor_res3 );
63 BUTTERFLY_4( hor_res0, hor_res2, hor_res3, hor_res1,
64 tmp0, tmp3, tmp2, tmp1 );
65 BUTTERFLY_4( tmp0, tmp1, tmp2, tmp3,
66 ver_res0_r, ver_res3_r, ver_res2_r, ver_res1_r );
67 SRARI_W4_SW( ver_res0_r, ver_res1_r, ver_res2_r, ver_res3_r, 1 );
68 PCKEV_H4_SH( ver_res0_r, ver_res0_r, ver_res1_r, ver_res1_r,
69 ver_res2_r, ver_res2_r, ver_res3_r, ver_res3_r,
70 ver_res0, ver_res1, ver_res2, ver_res3 );
71 PCKOD_D2_SH( ver_res1, ver_res0, ver_res3, ver_res2, ver_res0, ver_res2 );
72 ST_SH2( ver_res0, ver_res2, p_dst, 8 );
75 static void avc_sub4x4_dct_msa( uint8_t *p_src, int32_t i_src_stride,
76 uint8_t *p_ref, int32_t i_dst_stride,
79 uint32_t i_src0, i_src1, i_src2, i_src3;
80 uint32_t i_ref0, i_ref1, i_ref2, i_ref3;
84 v8i16 diff0, diff1, diff2, diff3;
85 v8i16 temp0, temp1, temp2, temp3;
87 LW4( p_src, i_src_stride, i_src0, i_src1, i_src2, i_src3 );
88 LW4( p_ref, i_dst_stride, i_ref0, i_ref1, i_ref2, i_ref3 );
90 INSERT_W4_SB( i_src0, i_src1, i_src2, i_src3, src );
91 INSERT_W4_SB( i_ref0, i_ref1, i_ref2, i_ref3, ref );
93 ILVRL_B2_UB( src, ref, inp0, inp1 );
95 HSUB_UB2_SH( inp0, inp1, diff0, diff2 );
97 diff1 = ( v8i16 ) __msa_ilvl_d( ( v2i64 ) diff0, ( v2i64 ) diff0 );
98 diff3 = ( v8i16 ) __msa_ilvl_d( ( v2i64 ) diff2, ( v2i64 ) diff2 );
100 BUTTERFLY_4( diff0, diff1, diff2, diff3, temp0, temp1, temp2, temp3 );
102 diff0 = temp0 + temp1;
103 diff1 = ( temp3 << 1 ) + temp2;
104 diff2 = temp0 - temp1;
105 diff3 = temp3 - ( temp2 << 1 );
107 TRANSPOSE4x4_SH_SH( diff0, diff1, diff2, diff3,
108 temp0, temp1, temp2, temp3 );
109 BUTTERFLY_4( temp0, temp1, temp2, temp3, diff0, diff1, diff2, diff3 );
111 temp0 = diff0 + diff1;
112 temp1 = ( diff3 << 1 ) + diff2;
113 temp2 = diff0 - diff1;
114 temp3 = diff3 - ( diff2 << 1 );
116 ILVR_D2_UB( temp1, temp0, temp3, temp2, inp0, inp1 );
117 ST_UB2( inp0, inp1, p_dst, 8 );
120 static void avc_zigzag_scan_4x4_frame_msa( int16_t pi_dct[16],
121 int16_t pi_level[16] )
124 v8i16 mask0 = { 0, 4, 1, 2, 5, 8, 12, 9 };
125 v8i16 mask1 = { 6, 3, 7, 10, 13, 14, 11, 15 };
127 LD_SH2( pi_dct, 8, src0, src1 );
128 VSHF_H2_SH( src0, src1, src0, src1, mask0, mask1, mask0, mask1 );
129 ST_SH2( mask0, mask1, pi_level, 8 );
132 static void avc_idct4x4_addblk_msa( uint8_t *p_dst, int16_t *p_src,
133 int32_t i_dst_stride )
135 v8i16 src0, src1, src2, src3;
136 v8i16 hres0, hres1, hres2, hres3;
137 v8i16 vres0, vres1, vres2, vres3;
140 LD4x4_SH( p_src, src0, src1, src2, src3 );
141 AVC_ITRANS_H( src0, src1, src2, src3, hres0, hres1, hres2, hres3 );
142 TRANSPOSE4x4_SH_SH( hres0, hres1, hres2, hres3,
143 hres0, hres1, hres2, hres3 );
144 AVC_ITRANS_H( hres0, hres1, hres2, hres3, vres0, vres1, vres2, vres3 );
145 SRARI_H4_SH( vres0, vres1, vres2, vres3, 6 );
146 ADDBLK_ST4x4_UB( vres0, vres1, vres2, vres3, p_dst, i_dst_stride );
147 ST_SH2( zeros, zeros, p_src, 8 );
150 static void avc_idct4x4_addblk_dc_msa( uint8_t *p_dst, int16_t *p_src,
151 int32_t i_dst_stride )
154 uint32_t i_src0, i_src1, i_src2, i_src3;
157 v8i16 input_dc, pred_r, pred_l;
159 i_dc = ( p_src[0] + 32 ) >> 6;
160 input_dc = __msa_fill_h( i_dc );
163 LW4( p_dst, i_dst_stride, i_src0, i_src1, i_src2, i_src3 );
164 INSERT_W4_UB( i_src0, i_src1, i_src2, i_src3, pred );
165 UNPCK_UB_SH( pred, pred_r, pred_l );
170 CLIP_SH2_0_255( pred_r, pred_l );
171 out = __msa_pckev_b( ( v16i8 ) pred_l, ( v16i8 ) pred_r );
172 ST4x4_UB( out, out, 0, 1, 2, 3, p_dst, i_dst_stride );
175 static void avc_idct8_addblk_msa( uint8_t *p_dst, int16_t *p_src,
176 int32_t i_dst_stride )
178 v8i16 src0, src1, src2, src3, src4, src5, src6, src7;
179 v8i16 vec0, vec1, vec2, vec3;
180 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
181 v8i16 res0, res1, res2, res3, res4, res5, res6, res7;
182 v4i32 tmp0_r, tmp1_r, tmp2_r, tmp3_r, tmp4_r, tmp5_r, tmp6_r, tmp7_r;
183 v4i32 tmp0_l, tmp1_l, tmp2_l, tmp3_l, tmp4_l, tmp5_l, tmp6_l, tmp7_l;
184 v4i32 vec0_r, vec1_r, vec2_r, vec3_r, vec0_l, vec1_l, vec2_l, vec3_l;
185 v4i32 res0_r, res1_r, res2_r, res3_r, res4_r, res5_r, res6_r, res7_r;
186 v4i32 res0_l, res1_l, res2_l, res3_l, res4_l, res5_l, res6_l, res7_l;
187 v16i8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
192 LD_SH8( p_src, 8, src0, src1, src2, src3, src4, src5, src6, src7 );
201 BUTTERFLY_4( vec0, vec1, vec2, vec3, tmp0, tmp1, tmp2, tmp3 );
204 vec0 = src5 - vec0 - src3 - src7;
206 vec1 = src1 - vec1 + src7 - src3;
208 vec2 = vec2 - src1 + src7 + src5;
210 vec3 = vec3 + src3 + src5 + src1;
220 BUTTERFLY_8( tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
221 res0, res1, res2, res3, res4, res5, res6, res7 );
222 TRANSPOSE8x8_SH_SH( res0, res1, res2, res3, res4, res5, res6, res7,
223 res0, res1, res2, res3, res4, res5, res6, res7 );
224 UNPCK_SH_SW( res0, tmp0_r, tmp0_l );
225 UNPCK_SH_SW( res1, tmp1_r, tmp1_l );
226 UNPCK_SH_SW( res2, tmp2_r, tmp2_l );
227 UNPCK_SH_SW( res3, tmp3_r, tmp3_l );
228 UNPCK_SH_SW( res4, tmp4_r, tmp4_l );
229 UNPCK_SH_SW( res5, tmp5_r, tmp5_l );
230 UNPCK_SH_SW( res6, tmp6_r, tmp6_l );
231 UNPCK_SH_SW( res7, tmp7_r, tmp7_l );
232 BUTTERFLY_4( tmp0_r, tmp0_l, tmp4_l, tmp4_r,
233 vec0_r, vec0_l, vec1_l, vec1_r );
235 vec2_r = tmp2_r >> 1;
236 vec2_l = tmp2_l >> 1;
239 vec3_r = tmp6_r >> 1;
240 vec3_l = tmp6_l >> 1;
244 BUTTERFLY_4( vec0_r, vec1_r, vec2_r, vec3_r,
245 tmp0_r, tmp2_r, tmp4_r, tmp6_r );
246 BUTTERFLY_4( vec0_l, vec1_l, vec2_l, vec3_l,
247 tmp0_l, tmp2_l, tmp4_l, tmp6_l );
249 vec0_r = tmp7_r >> 1;
250 vec0_l = tmp7_l >> 1;
251 vec0_r = tmp5_r - vec0_r - tmp3_r - tmp7_r;
252 vec0_l = tmp5_l - vec0_l - tmp3_l - tmp7_l;
253 vec1_r = tmp3_r >> 1;
254 vec1_l = tmp3_l >> 1;
255 vec1_r = tmp1_r - vec1_r + tmp7_r - tmp3_r;
256 vec1_l = tmp1_l - vec1_l + tmp7_l - tmp3_l;
257 vec2_r = tmp5_r >> 1;
258 vec2_l = tmp5_l >> 1;
259 vec2_r = vec2_r - tmp1_r + tmp7_r + tmp5_r;
260 vec2_l = vec2_l - tmp1_l + tmp7_l + tmp5_l;
261 vec3_r = tmp1_r >> 1;
262 vec3_l = tmp1_l >> 1;
263 vec3_r = vec3_r + tmp3_r + tmp5_r + tmp1_r;
264 vec3_l = vec3_l + tmp3_l + tmp5_l + tmp1_l;
265 tmp1_r = vec3_r >> 2;
266 tmp1_l = vec3_l >> 2;
269 tmp3_r = vec2_r >> 2;
270 tmp3_l = vec2_l >> 2;
273 tmp5_r = vec1_r >> 2;
274 tmp5_l = vec1_l >> 2;
277 tmp7_r = vec0_r >> 2;
278 tmp7_l = vec0_l >> 2;
279 tmp7_r = vec3_r - tmp7_r;
280 tmp7_l = vec3_l - tmp7_l;
282 BUTTERFLY_4( tmp0_r, tmp0_l, tmp7_l, tmp7_r,
283 res0_r, res0_l, res7_l, res7_r );
284 BUTTERFLY_4( tmp2_r, tmp2_l, tmp5_l, tmp5_r,
285 res1_r, res1_l, res6_l, res6_r );
286 BUTTERFLY_4( tmp4_r, tmp4_l, tmp3_l, tmp3_r,
287 res2_r, res2_l, res5_l, res5_r );
288 BUTTERFLY_4( tmp6_r, tmp6_l, tmp1_l, tmp1_r,
289 res3_r, res3_l, res4_l, res4_r );
290 SRA_4V( res0_r, res0_l, res1_r, res1_l, 6 );
291 SRA_4V( res2_r, res2_l, res3_r, res3_l, 6 );
292 SRA_4V( res4_r, res4_l, res5_r, res5_l, 6 );
293 SRA_4V( res6_r, res6_l, res7_r, res7_l, 6 );
294 PCKEV_H4_SH( res0_l, res0_r, res1_l, res1_r, res2_l, res2_r, res3_l, res3_r,
295 res0, res1, res2, res3 );
296 PCKEV_H4_SH( res4_l, res4_r, res5_l, res5_r, res6_l, res6_r, res7_l, res7_r,
297 res4, res5, res6, res7 );
298 LD_SB8( p_dst, i_dst_stride,
299 dst0, dst1, dst2, dst3,
300 dst4, dst5, dst6, dst7 );
301 ILVR_B4_SH( zeros, dst0, zeros, dst1, zeros, dst2, zeros, dst3,
302 tmp0, tmp1, tmp2, tmp3 );
303 ILVR_B4_SH( zeros, dst4, zeros, dst5, zeros, dst6, zeros, dst7,
304 tmp4, tmp5, tmp6, tmp7 );
305 ADD4( res0, tmp0, res1, tmp1, res2, tmp2, res3, tmp3,
306 res0, res1, res2, res3 );
307 ADD4( res4, tmp4, res5, tmp5, res6, tmp6, res7, tmp7,
308 res4, res5, res6, res7 );
309 CLIP_SH4_0_255( res0, res1, res2, res3 );
310 CLIP_SH4_0_255( res4, res5, res6, res7 );
311 PCKEV_B4_SB( res1, res0, res3, res2, res5, res4, res7, res6,
312 dst0, dst1, dst2, dst3 );
313 ST8x4_UB( dst0, dst1, p_dst, i_dst_stride );
314 p_dst += ( 4 * i_dst_stride );
315 ST8x4_UB( dst2, dst3, p_dst, i_dst_stride );
318 static void avc_idct4x4dc_msa( int16_t *p_src, int32_t i_src_stride,
319 int16_t *p_dst, int32_t i_dst_stride )
321 v8i16 src0, src1, src2, src3;
322 v4i32 src0_r, src1_r, src2_r, src3_r;
323 v4i32 hres0, hres1, hres2, hres3;
324 v8i16 vres0, vres1, vres2, vres3;
325 v4i32 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
328 LD_SH4( p_src, i_src_stride, src0, src1, src2, src3 );
329 UNPCK_R_SH_SW( src0, src0_r );
330 UNPCK_R_SH_SW( src1, src1_r );
331 UNPCK_R_SH_SW( src2, src2_r );
332 UNPCK_R_SH_SW( src3, src3_r );
333 BUTTERFLY_4( src0_r, src2_r, src3_r, src1_r, vec0, vec3, vec2, vec1 );
334 BUTTERFLY_4( vec0, vec1, vec2, vec3, hres0, hres3, hres2, hres1 );
335 TRANSPOSE4x4_SW_SW( hres0, hres1, hres2, hres3,
336 hres0, hres1, hres2, hres3 );
337 BUTTERFLY_4( hres0, hres2, hres3, hres1, vec0, vec3, vec2, vec1 );
338 BUTTERFLY_4( vec0, vec1, vec2, vec3, vec4, vec7, vec6, vec5 );
339 PCKEV_H4_SH( vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7,
340 vres0, vres1, vres2, vres3 );
341 PCKOD_D2_SD( vres1, vres0, vres3, vres2, res0, res1 );
342 ST8x4_UB( res0, res1, p_dst, i_dst_stride * 2 );
345 static int32_t subtract_sum4x4_msa( uint8_t *p_src, int32_t i_src_stride,
346 uint8_t *pred_ptr, int32_t i_pred_stride )
349 uint32_t i_src0, i_src1, i_src2, i_src3;
350 uint32_t i_pred0, i_pred1, i_pred2, i_pred3;
353 v16u8 src_l0, src_l1;
356 LW4( p_src, i_src_stride, i_src0, i_src1, i_src2, i_src3 );
357 LW4( pred_ptr, i_pred_stride, i_pred0, i_pred1, i_pred2, i_pred3 );
358 INSERT_W4_SB( i_src0, i_src1, i_src2, i_src3, src );
359 INSERT_W4_SB( i_pred0, i_pred1, i_pred2, i_pred3, pred );
360 ILVRL_B2_UB( src, pred, src_l0, src_l1 );
361 HSUB_UB2_SH( src_l0, src_l1, diff0, diff1 );
362 i_sum = HADD_UH_U32( diff0 + diff1 );
367 void x264_dct4x4dc_msa( int16_t d[16] )
369 avc_dct4x4dc_msa( d, d, 4 );
372 void x264_idct4x4dc_msa( int16_t d[16] )
374 avc_idct4x4dc_msa( d, 4, d, 4 );
377 void x264_add4x4_idct_msa( uint8_t *p_dst, int16_t pi_dct[16] )
379 avc_idct4x4_addblk_msa( p_dst, pi_dct, FDEC_STRIDE );
382 void x264_add8x8_idct_msa( uint8_t *p_dst, int16_t pi_dct[4][16] )
384 avc_idct4x4_addblk_msa( &p_dst[0], &pi_dct[0][0], FDEC_STRIDE );
385 avc_idct4x4_addblk_msa( &p_dst[4], &pi_dct[1][0], FDEC_STRIDE );
386 avc_idct4x4_addblk_msa( &p_dst[4 * FDEC_STRIDE + 0],
387 &pi_dct[2][0], FDEC_STRIDE );
388 avc_idct4x4_addblk_msa( &p_dst[4 * FDEC_STRIDE + 4],
389 &pi_dct[3][0], FDEC_STRIDE );
392 void x264_add16x16_idct_msa( uint8_t *p_dst, int16_t pi_dct[16][16] )
394 x264_add8x8_idct_msa( &p_dst[0], &pi_dct[0] );
395 x264_add8x8_idct_msa( &p_dst[8], &pi_dct[4] );
396 x264_add8x8_idct_msa( &p_dst[8 * FDEC_STRIDE + 0], &pi_dct[8] );
397 x264_add8x8_idct_msa( &p_dst[8 * FDEC_STRIDE + 8], &pi_dct[12] );
400 void x264_add8x8_idct8_msa( uint8_t *p_dst, int16_t pi_dct[64] )
402 avc_idct8_addblk_msa( p_dst, pi_dct, FDEC_STRIDE );
405 void x264_add16x16_idct8_msa( uint8_t *p_dst, int16_t pi_dct[4][64] )
407 avc_idct8_addblk_msa( &p_dst[0], &pi_dct[0][0], FDEC_STRIDE );
408 avc_idct8_addblk_msa( &p_dst[8], &pi_dct[1][0], FDEC_STRIDE );
409 avc_idct8_addblk_msa( &p_dst[8 * FDEC_STRIDE + 0],
410 &pi_dct[2][0], FDEC_STRIDE );
411 avc_idct8_addblk_msa( &p_dst[8 * FDEC_STRIDE + 8],
412 &pi_dct[3][0], FDEC_STRIDE );
415 void x264_add8x8_idct_dc_msa( uint8_t *p_dst, int16_t pi_dct[4] )
417 avc_idct4x4_addblk_dc_msa( &p_dst[0], &pi_dct[0], FDEC_STRIDE );
418 avc_idct4x4_addblk_dc_msa( &p_dst[4], &pi_dct[1], FDEC_STRIDE );
419 avc_idct4x4_addblk_dc_msa( &p_dst[4 * FDEC_STRIDE + 0],
420 &pi_dct[2], FDEC_STRIDE );
421 avc_idct4x4_addblk_dc_msa( &p_dst[4 * FDEC_STRIDE + 4],
422 &pi_dct[3], FDEC_STRIDE );
425 void x264_add16x16_idct_dc_msa( uint8_t *p_dst, int16_t pi_dct[16] )
427 for( int32_t i = 0; i < 4; i++, pi_dct += 4, p_dst += 4 * FDEC_STRIDE )
429 avc_idct4x4_addblk_dc_msa( &p_dst[ 0], &pi_dct[0], FDEC_STRIDE );
430 avc_idct4x4_addblk_dc_msa( &p_dst[ 4], &pi_dct[1], FDEC_STRIDE );
431 avc_idct4x4_addblk_dc_msa( &p_dst[ 8], &pi_dct[2], FDEC_STRIDE );
432 avc_idct4x4_addblk_dc_msa( &p_dst[12], &pi_dct[3], FDEC_STRIDE );
436 void x264_sub4x4_dct_msa( int16_t p_dst[16], uint8_t *p_src,
439 avc_sub4x4_dct_msa( p_src, FENC_STRIDE, p_ref, FDEC_STRIDE, p_dst );
442 void x264_sub8x8_dct_msa( int16_t p_dst[4][16], uint8_t *p_src,
445 avc_sub4x4_dct_msa( &p_src[0], FENC_STRIDE,
446 &p_ref[0], FDEC_STRIDE, p_dst[0] );
447 avc_sub4x4_dct_msa( &p_src[4], FENC_STRIDE, &p_ref[4],
448 FDEC_STRIDE, p_dst[1] );
449 avc_sub4x4_dct_msa( &p_src[4 * FENC_STRIDE + 0],
450 FENC_STRIDE, &p_ref[4 * FDEC_STRIDE + 0],
451 FDEC_STRIDE, p_dst[2] );
452 avc_sub4x4_dct_msa( &p_src[4 * FENC_STRIDE + 4],
453 FENC_STRIDE, &p_ref[4 * FDEC_STRIDE + 4],
454 FDEC_STRIDE, p_dst[3] );
457 void x264_sub16x16_dct_msa( int16_t p_dst[16][16],
461 x264_sub8x8_dct_msa( &p_dst[ 0], &p_src[0], &p_ref[0] );
462 x264_sub8x8_dct_msa( &p_dst[ 4], &p_src[8], &p_ref[8] );
463 x264_sub8x8_dct_msa( &p_dst[ 8], &p_src[8 * FENC_STRIDE + 0],
464 &p_ref[8*FDEC_STRIDE+0] );
465 x264_sub8x8_dct_msa( &p_dst[12], &p_src[8 * FENC_STRIDE + 8],
466 &p_ref[8*FDEC_STRIDE+8] );
469 void x264_sub8x8_dct_dc_msa( int16_t pi_dct[4],
470 uint8_t *p_pix1, uint8_t *p_pix2 )
472 int32_t d0, d1, d2, d3;
474 pi_dct[0] = subtract_sum4x4_msa( &p_pix1[0], FENC_STRIDE,
475 &p_pix2[0], FDEC_STRIDE );
476 pi_dct[1] = subtract_sum4x4_msa( &p_pix1[4], FENC_STRIDE,
477 &p_pix2[4], FDEC_STRIDE );
478 pi_dct[2] = subtract_sum4x4_msa( &p_pix1[4 * FENC_STRIDE + 0], FENC_STRIDE,
479 &p_pix2[4 * FDEC_STRIDE + 0],
481 pi_dct[3] = subtract_sum4x4_msa( &p_pix1[4 * FENC_STRIDE + 4], FENC_STRIDE,
482 &p_pix2[4 * FDEC_STRIDE + 4],
485 BUTTERFLY_4( pi_dct[0], pi_dct[2], pi_dct[3], pi_dct[1], d0, d1, d3, d2 );
486 BUTTERFLY_4( d0, d2, d3, d1, pi_dct[0], pi_dct[2], pi_dct[3], pi_dct[1] );
489 void x264_sub8x16_dct_dc_msa( int16_t pi_dct[8],
490 uint8_t *p_pix1, uint8_t *p_pix2 )
492 int32_t a0, a1, a2, a3, a4, a5, a6, a7;
493 int32_t b0, b1, b2, b3, b4, b5, b6, b7;
495 a0 = subtract_sum4x4_msa( &p_pix1[ 0 * FENC_STRIDE + 0], FENC_STRIDE,
496 &p_pix2[ 0 * FDEC_STRIDE + 0], FDEC_STRIDE );
497 a1 = subtract_sum4x4_msa( &p_pix1[ 0 * FENC_STRIDE + 4], FENC_STRIDE,
498 &p_pix2[ 0 * FDEC_STRIDE + 4], FDEC_STRIDE );
499 a2 = subtract_sum4x4_msa( &p_pix1[ 4 * FENC_STRIDE + 0], FENC_STRIDE,
500 &p_pix2[ 4 * FDEC_STRIDE + 0], FDEC_STRIDE );
501 a3 = subtract_sum4x4_msa( &p_pix1[ 4 * FENC_STRIDE + 4], FENC_STRIDE,
502 &p_pix2[ 4 * FDEC_STRIDE + 4], FDEC_STRIDE );
503 a4 = subtract_sum4x4_msa( &p_pix1[ 8 * FENC_STRIDE + 0], FENC_STRIDE,
504 &p_pix2[ 8 * FDEC_STRIDE + 0], FDEC_STRIDE );
505 a5 = subtract_sum4x4_msa( &p_pix1[ 8 * FENC_STRIDE + 4], FENC_STRIDE,
506 &p_pix2[ 8 * FDEC_STRIDE + 4], FDEC_STRIDE );
507 a6 = subtract_sum4x4_msa( &p_pix1[12 * FENC_STRIDE + 0], FENC_STRIDE,
508 &p_pix2[12 * FDEC_STRIDE + 0], FDEC_STRIDE );
509 a7 = subtract_sum4x4_msa( &p_pix1[12 * FENC_STRIDE + 4], FENC_STRIDE,
510 &p_pix2[12 * FDEC_STRIDE + 4], FDEC_STRIDE );
512 BUTTERFLY_8( a0, a2, a4, a6, a7, a5, a3, a1,
513 b0, b1, b2, b3, b7, b6, b5, b4 );
514 BUTTERFLY_8( b0, b2, b4, b6, b7, b5, b3, b1,
515 a0, a1, a2, a3, a7, a6, a5, a4 );
516 BUTTERFLY_8( a0, a2, a4, a6, a7, a5, a3, a1,
517 pi_dct[0], pi_dct[1], pi_dct[6], pi_dct[7],
518 pi_dct[5], pi_dct[4], pi_dct[3], pi_dct[2] );
521 void x264_zigzag_scan_4x4_frame_msa( int16_t pi_level[16], int16_t pi_dct[16] )
523 avc_zigzag_scan_4x4_frame_msa( pi_dct, pi_level );