4 * Copyright (c) 2016 Loongson Technology Corporation Limited
5 * Copyright (c) 2016 Zhou Xiaoyong <zhouxiaoyong@loongson.cn>
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #ifndef AVUTIL_MIPS_MMIUTILS_H
25 #define AVUTIL_MIPS_MMIUTILS_H
28 #include "libavutil/mips/asmdefs.h"
32 #define DECLARE_VAR_LOW32 int32_t low32
33 #define RESTRICT_ASM_LOW32 [low32]"=&r"(low32),
34 #define DECLARE_VAR_ALL64 int64_t all64
35 #define RESTRICT_ASM_ALL64 [all64]"=&r"(all64),
36 #define DECLARE_VAR_ADDRT mips_reg addrt
37 #define RESTRICT_ASM_ADDRT [addrt]"=&r"(addrt),
39 #define MMI_LWX(reg, addr, stride, bias) \
40 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
41 "lw "#reg", "#bias"(%[addrt]) \n\t"
43 #define MMI_SWX(reg, addr, stride, bias) \
44 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
45 "sw "#reg", "#bias"(%[addrt]) \n\t"
47 #define MMI_LDX(reg, addr, stride, bias) \
48 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
49 "ld "#reg", "#bias"(%[addrt]) \n\t"
51 #define MMI_SDX(reg, addr, stride, bias) \
52 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
53 "sd "#reg", "#bias"(%[addrt]) \n\t"
55 #define MMI_LWC1(fp, addr, bias) \
56 "lwc1 "#fp", "#bias"("#addr") \n\t"
58 #define MMI_ULWC1(fp, addr, bias) \
59 "ulw %[low32], "#bias"("#addr") \n\t" \
60 "mtc1 %[low32], "#fp" \n\t"
62 #define MMI_LWXC1(fp, addr, stride, bias) \
63 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
64 MMI_LWC1(fp, %[addrt], bias)
66 #define MMI_SWC1(fp, addr, bias) \
67 "swc1 "#fp", "#bias"("#addr") \n\t"
69 #define MMI_USWC1(fp, addr, bias) \
70 "mfc1 %[low32], "#fp" \n\t" \
71 "usw %[low32], "#bias"("#addr") \n\t"
73 #define MMI_SWXC1(fp, addr, stride, bias) \
74 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
75 MMI_SWC1(fp, %[addrt], bias)
77 #define MMI_LDC1(fp, addr, bias) \
78 "ldc1 "#fp", "#bias"("#addr") \n\t"
80 #define MMI_ULDC1(fp, addr, bias) \
81 "uld %[all64], "#bias"("#addr") \n\t" \
82 "dmtc1 %[all64], "#fp" \n\t"
84 #define MMI_LDXC1(fp, addr, stride, bias) \
85 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
86 MMI_LDC1(fp, %[addrt], bias)
88 #define MMI_SDC1(fp, addr, bias) \
89 "sdc1 "#fp", "#bias"("#addr") \n\t"
91 #define MMI_USDC1(fp, addr, bias) \
92 "dmfc1 %[all64], "#fp" \n\t" \
93 "usd %[all64], "#bias"("#addr") \n\t"
95 #define MMI_SDXC1(fp, addr, stride, bias) \
96 PTR_ADDU "%[addrt], "#addr", "#stride" \n\t" \
97 MMI_SDC1(fp, %[addrt], bias)
99 #define MMI_LQ(reg1, reg2, addr, bias) \
100 "ld "#reg1", "#bias"("#addr") \n\t" \
101 "ld "#reg2", 8+"#bias"("#addr") \n\t"
103 #define MMI_SQ(reg1, reg2, addr, bias) \
104 "sd "#reg1", "#bias"("#addr") \n\t" \
105 "sd "#reg2", 8+"#bias"("#addr") \n\t"
107 #define MMI_LQC1(fp1, fp2, addr, bias) \
108 "ldc1 "#fp1", "#bias"("#addr") \n\t" \
109 "ldc1 "#fp2", 8+"#bias"("#addr") \n\t"
111 #define MMI_SQC1(fp1, fp2, addr, bias) \
112 "sdc1 "#fp1", "#bias"("#addr") \n\t" \
113 "sdc1 "#fp2", 8+"#bias"("#addr") \n\t"
115 #elif HAVE_LOONGSON3 /* !HAVE_LOONGSON2 */
117 #define DECLARE_VAR_ALL64
118 #define RESTRICT_ASM_ALL64
119 #define DECLARE_VAR_ADDRT
120 #define RESTRICT_ASM_ADDRT
122 #define MMI_LWX(reg, addr, stride, bias) \
123 "gslwx "#reg", "#bias"("#addr", "#stride") \n\t"
125 #define MMI_SWX(reg, addr, stride, bias) \
126 "gsswx "#reg", "#bias"("#addr", "#stride") \n\t"
128 #define MMI_LDX(reg, addr, stride, bias) \
129 "gsldx "#reg", "#bias"("#addr", "#stride") \n\t"
131 #define MMI_SDX(reg, addr, stride, bias) \
132 "gssdx "#reg", "#bias"("#addr", "#stride") \n\t"
134 #define MMI_LWC1(fp, addr, bias) \
135 "lwc1 "#fp", "#bias"("#addr") \n\t"
137 #if _MIPS_SIM == _ABIO32 /* workaround for 3A2000 gslwlc1 bug */
139 #define DECLARE_VAR_LOW32 int32_t low32
140 #define RESTRICT_ASM_LOW32 [low32]"=&r"(low32),
142 #define MMI_ULWC1(fp, addr, bias) \
143 "ulw %[low32], "#bias"("#addr") \n\t" \
144 "mtc1 %[low32], "#fp" \n\t"
146 #else /* _MIPS_SIM != _ABIO32 */
148 #define DECLARE_VAR_LOW32
149 #define RESTRICT_ASM_LOW32
151 #define MMI_ULWC1(fp, addr, bias) \
152 "gslwlc1 "#fp", 3+"#bias"("#addr") \n\t" \
153 "gslwrc1 "#fp", "#bias"("#addr") \n\t"
155 #endif /* _MIPS_SIM != _ABIO32 */
157 #define MMI_LWXC1(fp, addr, stride, bias) \
158 "gslwxc1 "#fp", "#bias"("#addr", "#stride") \n\t"
160 #define MMI_SWC1(fp, addr, bias) \
161 "swc1 "#fp", "#bias"("#addr") \n\t"
163 #define MMI_USWC1(fp, addr, bias) \
164 "gsswlc1 "#fp", 3+"#bias"("#addr") \n\t" \
165 "gsswrc1 "#fp", "#bias"("#addr") \n\t"
167 #define MMI_SWXC1(fp, addr, stride, bias) \
168 "gsswxc1 "#fp", "#bias"("#addr", "#stride") \n\t"
170 #define MMI_LDC1(fp, addr, bias) \
171 "ldc1 "#fp", "#bias"("#addr") \n\t"
173 #define MMI_ULDC1(fp, addr, bias) \
174 "gsldlc1 "#fp", 7+"#bias"("#addr") \n\t" \
175 "gsldrc1 "#fp", "#bias"("#addr") \n\t"
177 #define MMI_LDXC1(fp, addr, stride, bias) \
178 "gsldxc1 "#fp", "#bias"("#addr", "#stride") \n\t"
180 #define MMI_SDC1(fp, addr, bias) \
181 "sdc1 "#fp", "#bias"("#addr") \n\t"
183 #define MMI_USDC1(fp, addr, bias) \
184 "gssdlc1 "#fp", 7+"#bias"("#addr") \n\t" \
185 "gssdrc1 "#fp", "#bias"("#addr") \n\t"
187 #define MMI_SDXC1(fp, addr, stride, bias) \
188 "gssdxc1 "#fp", "#bias"("#addr", "#stride") \n\t"
190 #define MMI_LQ(reg1, reg2, addr, bias) \
191 "gslq "#reg1", "#reg2", "#bias"("#addr") \n\t"
193 #define MMI_SQ(reg1, reg2, addr, bias) \
194 "gssq "#reg1", "#reg2", "#bias"("#addr") \n\t"
196 #define MMI_LQC1(fp1, fp2, addr, bias) \
197 "gslqc1 "#fp1", "#fp2", "#bias"("#addr") \n\t"
199 #define MMI_SQC1(fp1, fp2, addr, bias) \
200 "gssqc1 "#fp1", "#fp2", "#bias"("#addr") \n\t"
202 #endif /* HAVE_LOONGSON2 */
208 LOCAL_ALIGNED_16(double, temp_backup_reg, [8]); \
209 if (_MIPS_SIM == _ABI64) \
211 "gssqc1 $f25, $f24, 0x00(%[temp]) \n\t" \
212 "gssqc1 $f27, $f26, 0x10(%[temp]) \n\t" \
213 "gssqc1 $f29, $f28, 0x20(%[temp]) \n\t" \
214 "gssqc1 $f31, $f30, 0x30(%[temp]) \n\t" \
216 : [temp]"r"(temp_backup_reg) \
221 "gssqc1 $f22, $f20, 0x00(%[temp]) \n\t" \
222 "gssqc1 $f26, $f24, 0x10(%[temp]) \n\t" \
223 "gssqc1 $f30, $f28, 0x20(%[temp]) \n\t" \
225 : [temp]"r"(temp_backup_reg) \
232 #define RECOVER_REG \
233 if (_MIPS_SIM == _ABI64) \
235 "gslqc1 $f25, $f24, 0x00(%[temp]) \n\t" \
236 "gslqc1 $f27, $f26, 0x10(%[temp]) \n\t" \
237 "gslqc1 $f29, $f28, 0x20(%[temp]) \n\t" \
238 "gslqc1 $f31, $f30, 0x30(%[temp]) \n\t" \
240 : [temp]"r"(temp_backup_reg) \
245 "gslqc1 $f22, $f20, 0x00(%[temp]) \n\t" \
246 "gslqc1 $f26, $f24, 0x10(%[temp]) \n\t" \
247 "gslqc1 $f30, $f28, 0x20(%[temp]) \n\t" \
249 : [temp]"r"(temp_backup_reg) \
254 * brief: Transpose 2X2 word packaged data.
258 #define TRANSPOSE_2W(fr_i0, fr_i1, fr_o0, fr_o1) \
259 "punpcklwd "#fr_o0", "#fr_i0", "#fr_i1" \n\t" \
260 "punpckhwd "#fr_o1", "#fr_i0", "#fr_i1" \n\t"
263 * brief: Transpose 4X4 half word packaged data.
264 * fr_i0, fr_i1, fr_i2, fr_i3: src & dst
265 * fr_t0, fr_t1, fr_t2, fr_t3: temporary register
267 #define TRANSPOSE_4H(fr_i0, fr_i1, fr_i2, fr_i3, \
268 fr_t0, fr_t1, fr_t2, fr_t3) \
269 "punpcklhw "#fr_t0", "#fr_i0", "#fr_i1" \n\t" \
270 "punpckhhw "#fr_t1", "#fr_i0", "#fr_i1" \n\t" \
271 "punpcklhw "#fr_t2", "#fr_i2", "#fr_i3" \n\t" \
272 "punpckhhw "#fr_t3", "#fr_i2", "#fr_i3" \n\t" \
273 "punpcklwd "#fr_i0", "#fr_t0", "#fr_t2" \n\t" \
274 "punpckhwd "#fr_i1", "#fr_t0", "#fr_t2" \n\t" \
275 "punpcklwd "#fr_i2", "#fr_t1", "#fr_t3" \n\t" \
276 "punpckhwd "#fr_i3", "#fr_t1", "#fr_t3" \n\t"
279 * brief: Transpose 8x8 byte packaged data.
280 * fr_i0~i7: src & dst
281 * fr_t0~t3: temporary register
283 #define TRANSPOSE_8B(fr_i0, fr_i1, fr_i2, fr_i3, fr_i4, fr_i5, \
284 fr_i6, fr_i7, fr_t0, fr_t1, fr_t2, fr_t3) \
285 "punpcklbh "#fr_t0", "#fr_i0", "#fr_i1" \n\t" \
286 "punpckhbh "#fr_t1", "#fr_i0", "#fr_i1" \n\t" \
287 "punpcklbh "#fr_t2", "#fr_i2", "#fr_i3" \n\t" \
288 "punpckhbh "#fr_t3", "#fr_i2", "#fr_i3" \n\t" \
289 "punpcklbh "#fr_i0", "#fr_i4", "#fr_i5" \n\t" \
290 "punpckhbh "#fr_i1", "#fr_i4", "#fr_i5" \n\t" \
291 "punpcklbh "#fr_i2", "#fr_i6", "#fr_i7" \n\t" \
292 "punpckhbh "#fr_i3", "#fr_i6", "#fr_i7" \n\t" \
293 "punpcklhw "#fr_i4", "#fr_t0", "#fr_t2" \n\t" \
294 "punpckhhw "#fr_i5", "#fr_t0", "#fr_t2" \n\t" \
295 "punpcklhw "#fr_i6", "#fr_t1", "#fr_t3" \n\t" \
296 "punpckhhw "#fr_i7", "#fr_t1", "#fr_t3" \n\t" \
297 "punpcklhw "#fr_t0", "#fr_i0", "#fr_i2" \n\t" \
298 "punpckhhw "#fr_t1", "#fr_i0", "#fr_i2" \n\t" \
299 "punpcklhw "#fr_t2", "#fr_i1", "#fr_i3" \n\t" \
300 "punpckhhw "#fr_t3", "#fr_i1", "#fr_i3" \n\t" \
301 "punpcklwd "#fr_i0", "#fr_i4", "#fr_t0" \n\t" \
302 "punpckhwd "#fr_i1", "#fr_i4", "#fr_t0" \n\t" \
303 "punpcklwd "#fr_i2", "#fr_i5", "#fr_t1" \n\t" \
304 "punpckhwd "#fr_i3", "#fr_i5", "#fr_t1" \n\t" \
305 "punpcklwd "#fr_i4", "#fr_i6", "#fr_t2" \n\t" \
306 "punpckhwd "#fr_i5", "#fr_i6", "#fr_t2" \n\t" \
307 "punpcklwd "#fr_i6", "#fr_i7", "#fr_t3" \n\t" \
308 "punpckhwd "#fr_i7", "#fr_i7", "#fr_t3" \n\t"
311 * brief: Parallel SRA for 8 byte packaged data.
313 * fr_i1: SRA number(SRAB number + 8)
314 * fr_t0, fr_t1: temporary register
317 #define PSRAB_MMI(fr_i0, fr_i1, fr_t0, fr_t1, fr_d0) \
318 "punpcklbh "#fr_t0", "#fr_t0", "#fr_i0" \n\t" \
319 "punpckhbh "#fr_t1", "#fr_t1", "#fr_i0" \n\t" \
320 "psrah "#fr_t0", "#fr_t0", "#fr_i1" \n\t" \
321 "psrah "#fr_t1", "#fr_t1", "#fr_i1" \n\t" \
322 "packsshb "#fr_d0", "#fr_t0", "#fr_t1" \n\t"
325 * brief: Parallel SRL for 8 byte packaged data.
327 * fr_i1: SRL number(SRLB number + 8)
328 * fr_t0, fr_t1: temporary register
331 #define PSRLB_MMI(fr_i0, fr_i1, fr_t0, fr_t1, fr_d0) \
332 "punpcklbh "#fr_t0", "#fr_t0", "#fr_i0" \n\t" \
333 "punpckhbh "#fr_t1", "#fr_t1", "#fr_i0" \n\t" \
334 "psrlh "#fr_t0", "#fr_t0", "#fr_i1" \n\t" \
335 "psrlh "#fr_t1", "#fr_t1", "#fr_i1" \n\t" \
336 "packsshb "#fr_d0", "#fr_t0", "#fr_t1" \n\t"
338 #define PSRAH_4_MMI(fp1, fp2, fp3, fp4, shift) \
339 "psrah "#fp1", "#fp1", "#shift" \n\t" \
340 "psrah "#fp2", "#fp2", "#shift" \n\t" \
341 "psrah "#fp3", "#fp3", "#shift" \n\t" \
342 "psrah "#fp4", "#fp4", "#shift" \n\t"
344 #define PSRAH_8_MMI(fp1, fp2, fp3, fp4, fp5, fp6, fp7, fp8, shift) \
345 PSRAH_4_MMI(fp1, fp2, fp3, fp4, shift) \
346 PSRAH_4_MMI(fp5, fp6, fp7, fp8, shift)
349 * brief: (((value) + (1 << ((n) - 1))) >> (n))
351 * fr_i1: Operand number
352 * fr_t0, fr_t1: temporary FPR
353 * gr_t0: temporary GPR
355 #define ROUND_POWER_OF_TWO_MMI(fr_i0, fr_i1, fr_t0, fr_t1, gr_t0) \
356 "li "#gr_t0", 0x01 \n\t" \
357 "dmtc1 "#gr_t0", "#fr_t0" \n\t" \
358 "punpcklwd "#fr_t0", "#fr_t0", "#fr_t0" \n\t" \
359 "psubw "#fr_t1", "#fr_i1", "#fr_t0" \n\t" \
360 "psllw "#fr_t1", "#fr_t0", "#fr_t1" \n\t" \
361 "paddw "#fr_i0", "#fr_i0", "#fr_t1" \n\t" \
362 "psraw "#fr_i0", "#fr_i0", "#fr_i1" \n\t"
364 #endif /* AVUTILS_MIPS_MMIUTILS_H */