]> git.sesse.net Git - ffmpeg/blob - libavcodec/mips/h264pred_mmi.c
Merge commit 'ae5a8dca675ee544178225256893e679b750cb63'
[ffmpeg] / libavcodec / mips / h264pred_mmi.c
1 /*
2  * Loongson SIMD optimized h264pred
3  *
4  * Copyright (c) 2015 Loongson Technology Corporation Limited
5  * Copyright (c) 2015 Zhou Xiaoyong <zhouxiaoyong@loongson.cn>
6  *                    Zhang Shuangshuang <zhangshuangshuang@ict.ac.cn>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24
25 #include "h264pred_mips.h"
26 #include "constants.h"
27
28 void ff_pred16x16_vertical_8_mmi(uint8_t *src, ptrdiff_t stride)
29 {
30     __asm__ volatile (
31         "dsubu $2, %0, %1                   \r\n"
32         "daddu $3, %0, $0                   \r\n"
33         "ldl $4, 7($2)                      \r\n"
34         "ldr $4, 0($2)                      \r\n"
35         "ldl $5, 15($2)                     \r\n"
36         "ldr $5, 8($2)                      \r\n"
37         "dli $6, 0x10                       \r\n"
38         "1:                                 \r\n"
39         "sdl $4, 7($3)                      \r\n"
40         "sdr $4, 0($3)                      \r\n"
41         "sdl $5, 15($3)                     \r\n"
42         "sdr $5, 8($3)                      \r\n"
43         "daddu $3, %1                       \r\n"
44         "daddiu $6, -1                      \r\n"
45         "bnez $6, 1b                        \r\n"
46         ::"r"(src),"r"(stride)
47         : "$2","$3","$4","$5","$6","memory"
48     );
49 }
50
51 void ff_pred16x16_horizontal_8_mmi(uint8_t *src, ptrdiff_t stride)
52 {
53     __asm__ volatile (
54         "daddiu $2, %0, -1                  \r\n"
55         "daddu $3, %0, $0                   \r\n"
56         "dli $6, 0x10                       \r\n"
57         "1:                                 \r\n"
58         "lbu $4, 0($2)                      \r\n"
59         "dmul $5, $4, %2                    \r\n"
60         "sdl $5, 7($3)                      \r\n"
61         "sdr $5, 0($3)                      \r\n"
62         "sdl $5, 15($3)                     \r\n"
63         "sdr $5, 8($3)                      \r\n"
64         "daddu $2, %1                       \r\n"
65         "daddu $3, %1                       \r\n"
66         "daddiu $6, -1                      \r\n"
67         "bnez $6, 1b                        \r\n"
68         ::"r"(src),"r"(stride),"r"(ff_pb_1)
69         : "$2","$3","$4","$5","$6","memory"
70     );
71 }
72
73 void ff_pred16x16_dc_8_mmi(uint8_t *src, ptrdiff_t stride)
74 {
75     __asm__ volatile (
76         "daddiu $2, %0, -1                  \r\n"
77         "dli $6, 0x10                       \r\n"
78         "xor $8, $8, $8                     \r\n"
79         "1:                                 \r\n"
80         "lbu $4, 0($2)                      \r\n"
81         "daddu $8, $8, $4                   \r\n"
82         "daddu $2, $2, %1                   \r\n"
83         "daddiu $6, $6, -1                  \r\n"
84         "bnez $6, 1b                        \r\n"
85         "dli $6, 0x10                       \r\n"
86         "negu $3, %1                        \r\n"
87         "daddu $2, %0, $3                   \r\n"
88         "2:                                 \r\n"
89         "lbu $4, 0($2)                      \r\n"
90         "daddu $8, $8, $4                   \r\n"
91         "daddiu $2, $2, 1                   \r\n"
92         "daddiu $6, $6, -1                  \r\n"
93         "bnez $6, 2b                        \r\n"
94         "daddiu $8, $8, 0x10                \r\n"
95         "dsra $8, 5                         \r\n"
96         "dmul $5, $8, %2                    \r\n"
97         "daddu $2, %0, $0                   \r\n"
98         "dli $6, 0x10                       \r\n"
99         "3:                                 \r\n"
100         "sdl $5, 7($2)                      \r\n"
101         "sdr $5, 0($2)                      \r\n"
102         "sdl $5, 15($2)                     \r\n"
103         "sdr $5, 8($2)                      \r\n"
104         "daddu $2, $2, %1                   \r\n"
105         "daddiu $6, $6, -1                  \r\n"
106         "bnez $6, 3b                        \r\n"
107         ::"r"(src),"r"(stride),"r"(ff_pb_1)
108         : "$2","$3","$4","$5","$6","$8","memory"
109     );
110 }
111
112 void ff_pred8x8l_top_dc_8_mmi(uint8_t *src, int has_topleft,
113         int has_topright, ptrdiff_t stride)
114 {
115     int y;
116     uint32_t dc;
117
118     __asm__ volatile (
119         "ldl $8, 7(%1)                      \r\n"
120         "ldr $8, 0(%1)                      \r\n"
121         "ldl $9, 7(%2)                      \r\n"
122         "ldr $9, 0(%2)                      \r\n"
123         "ldl $10, 7(%3)                     \r\n"
124         "ldr $10, 0(%3)                     \r\n"
125         "dmtc1 $8, $f2                      \r\n"
126         "dmtc1 $9, $f4                      \r\n"
127         "dmtc1 $10, $f6                     \r\n"
128         "dmtc1 $0, $f0                      \r\n"
129         "punpcklbh $f8, $f2, $f0            \r\n"
130         "punpckhbh $f10, $f2, $f0           \r\n"
131         "punpcklbh $f12, $f4, $f0           \r\n"
132         "punpckhbh $f14, $f4, $f0           \r\n"
133         "punpcklbh $f16, $f6, $f0           \r\n"
134         "punpckhbh $f18, $f6, $f0           \r\n"
135         "bnez %4, 1f                        \r\n"
136         "pinsrh_0 $f8, $f8, $f12            \r\n"
137         "1:                                 \r\n"
138         "bnez %5, 2f                        \r\n"
139         "pinsrh_3 $f18, $f18, $f14          \r\n"
140         "2:                                 \r\n"
141         "daddiu $8, $0, 2                   \r\n"
142         "dmtc1 $8, $f20                     \r\n"
143         "pshufh $f22, $f20, $f0             \r\n"
144         "pmullh $f12, $f12, $f22            \r\n"
145         "pmullh $f14, $f14, $f22            \r\n"
146         "paddh $f8, $f8, $f12               \r\n"
147         "paddh $f10, $f10, $f14             \r\n"
148         "paddh $f8, $f8, $f16               \r\n"
149         "paddh $f10, $f10, $f18             \r\n"
150         "paddh $f8, $f8, $f22               \r\n"
151         "paddh $f10, $f10, $f22             \r\n"
152         "psrah $f8, $f8, $f20               \r\n"
153         "psrah $f10, $f10, $f20             \r\n"
154         "packushb $f4, $f8, $f10            \r\n"
155         "biadd $f2, $f4                     \r\n"
156         "mfc1 $9, $f2                       \r\n"
157         "addiu $9, $9, 4                    \r\n"
158         "dsrl $9, $9, 3                     \r\n"
159         "li $8, 0x01010101                  \r\n"
160         "mul %0, $9, $8                     \r\n"
161         : "=r"(dc)
162         : "r"(src-stride-1),"r"(src-stride),"r"(src-stride+1),
163           "r"(has_topleft),"r"(has_topright)
164         : "$8","$9","$10"
165     );
166
167     for (y=0; y<8; y++) {
168         AV_WN4PA(((uint32_t*)src)+0, dc);
169         AV_WN4PA(((uint32_t*)src)+1, dc);
170         src += stride;
171     }
172 }
173
174 void ff_pred8x8l_dc_8_mmi(uint8_t *src, int has_topleft,
175         int has_topright, ptrdiff_t stride)
176 {
177     int y;
178     uint32_t dc, dc1, dc2;
179
180     const int l0 = ((has_topleft ? src[-1+-1*stride] : src[-1+0*stride]) + 2*src[-1+0*stride] + src[-1+1*stride] + 2) >> 2;
181     const int l1 = (src[-1+0*stride] + 2*src[-1+1*stride] + src[-1+2*stride] + 2) >> 2;
182     const int l2 = (src[-1+1*stride] + 2*src[-1+2*stride] + src[-1+3*stride] + 2) >> 2;
183     const int l3 = (src[-1+2*stride] + 2*src[-1+3*stride] + src[-1+4*stride] + 2) >> 2;
184     const int l4 = (src[-1+3*stride] + 2*src[-1+4*stride] + src[-1+5*stride] + 2) >> 2;
185     const int l5 = (src[-1+4*stride] + 2*src[-1+5*stride] + src[-1+6*stride] + 2) >> 2;
186     const int l6 = (src[-1+5*stride] + 2*src[-1+6*stride] + src[-1+7*stride] + 2) >> 2;
187     const int l7 = (src[-1+6*stride] + 2*src[-1+7*stride] + src[-1+7*stride] + 2) >> 2;
188
189     __asm__ volatile (
190         "ldl $8, 7(%1)                      \r\n"
191         "ldr $8, 0(%1)                      \r\n"
192         "ldl $9, 7(%2)                      \r\n"
193         "ldr $9, 0(%2)                      \r\n"
194         "ldl $10, 7(%3)                     \r\n"
195         "ldr $10, 0(%3)                     \r\n"
196         "dmtc1 $8, $f2                      \r\n"
197         "dmtc1 $9, $f4                      \r\n"
198         "dmtc1 $10, $f6                     \r\n"
199         "dmtc1 $0, $f0                      \r\n"
200         "punpcklbh $f8, $f2, $f0            \r\n"
201         "punpckhbh $f10, $f2, $f0           \r\n"
202         "punpcklbh $f12, $f4, $f0           \r\n"
203         "punpckhbh $f14, $f4, $f0           \r\n"
204         "punpcklbh $f16, $f6, $f0           \r\n"
205         "punpckhbh $f18, $f6, $f0           \r\n"
206         "daddiu $8, $0, 3                   \r\n"
207         "dmtc1 $8, $f20                     \r\n"
208         "pshufh $f28, $f10, $f20            \r\n"
209         "pshufh $f30, $f18, $f20            \r\n"
210         "pinsrh_3 $f10, $f10, $f30          \r\n"
211         "pinsrh_3 $f18, $f18, $f28          \r\n"
212         "bnez %4, 1f                        \r\n"
213         "pinsrh_0 $f8, $f8, $f12            \r\n"
214         "1:                                 \r\n"
215         "bnez %5, 2f                        \r\n"
216         "pshufh $f30, $f14, $f20            \r\n"
217         "pinsrh_3 $f10, $f10, $f30          \r\n"
218         "2:                                 \r\n"
219         "daddiu $8, $0, 2                   \r\n"
220         "dmtc1 $8, $f20                     \r\n"
221         "pshufh $f22, $f20, $f0             \r\n"
222         "pmullh $f12, $f12, $f22            \r\n"
223         "pmullh $f14, $f14, $f22            \r\n"
224         "paddh $f8, $f8, $f12               \r\n"
225         "paddh $f10, $f10, $f14             \r\n"
226         "paddh $f8, $f8, $f16               \r\n"
227         "paddh $f10, $f10, $f18             \r\n"
228         "paddh $f8, $f8, $f22               \r\n"
229         "paddh $f10, $f10, $f22             \r\n"
230         "psrah $f8, $f8, $f20               \r\n"
231         "psrah $f10, $f10, $f20             \r\n"
232         "packushb $f4, $f8, $f10            \r\n"
233         "biadd $f2, $f4                     \r\n"
234         "mfc1 %0, $f2                       \r\n"
235         : "=r"(dc2)
236         : "r"(src-stride-1),"r"(src-stride),"r"(src-stride+1),
237           "r"(has_topleft),"r"(has_topright)
238         : "$8","$9","$10"
239     );
240
241     dc1 = l0+l1+l2+l3+l4+l5+l6+l7;
242     dc = PIXEL_SPLAT_X4((dc1+dc2+8)>>4);
243
244     for (y=0; y<8; y++) {
245         AV_WN4PA(((uint32_t*)src)+0, dc);
246         AV_WN4PA(((uint32_t*)src)+1, dc);
247         src += stride;
248     }
249 }
250
251 void ff_pred8x8l_horizontal_8_mmi(uint8_t *src, int has_topleft,
252         int has_topright, ptrdiff_t stride)
253 {
254     const int l0 = ((has_topleft ? src[-1+-1*stride] : src[-1+0*stride]) + 2*src[-1+0*stride] + src[-1+1*stride] + 2) >> 2;
255     const int l1 = (src[-1+0*stride] + 2*src[-1+1*stride] + src[-1+2*stride] + 2) >> 2;
256     const int l2 = (src[-1+1*stride] + 2*src[-1+2*stride] + src[-1+3*stride] + 2) >> 2;
257     const int l3 = (src[-1+2*stride] + 2*src[-1+3*stride] + src[-1+4*stride] + 2) >> 2;
258     const int l4 = (src[-1+3*stride] + 2*src[-1+4*stride] + src[-1+5*stride] + 2) >> 2;
259     const int l5 = (src[-1+4*stride] + 2*src[-1+5*stride] + src[-1+6*stride] + 2) >> 2;
260     const int l6 = (src[-1+5*stride] + 2*src[-1+6*stride] + src[-1+7*stride] + 2) >> 2;
261     const int l7 = (src[-1+6*stride] + 2*src[-1+7*stride] + src[-1+7*stride] + 2) >> 2;
262
263     AV_WN4PA(src+0*stride, PIXEL_SPLAT_X4(l0));
264     AV_WN4PA(src+0*stride+4, PIXEL_SPLAT_X4(l0));
265     AV_WN4PA(src+1*stride, PIXEL_SPLAT_X4(l1));
266     AV_WN4PA(src+1*stride+4, PIXEL_SPLAT_X4(l1));
267     AV_WN4PA(src+2*stride, PIXEL_SPLAT_X4(l2));
268     AV_WN4PA(src+2*stride+4, PIXEL_SPLAT_X4(l2));
269     AV_WN4PA(src+3*stride, PIXEL_SPLAT_X4(l3));
270     AV_WN4PA(src+3*stride+4, PIXEL_SPLAT_X4(l3));
271     AV_WN4PA(src+4*stride, PIXEL_SPLAT_X4(l4));
272     AV_WN4PA(src+4*stride+4, PIXEL_SPLAT_X4(l4));
273     AV_WN4PA(src+5*stride, PIXEL_SPLAT_X4(l5));
274     AV_WN4PA(src+5*stride+4, PIXEL_SPLAT_X4(l5));
275     AV_WN4PA(src+6*stride, PIXEL_SPLAT_X4(l6));
276     AV_WN4PA(src+6*stride+4, PIXEL_SPLAT_X4(l6));
277     AV_WN4PA(src+7*stride, PIXEL_SPLAT_X4(l7));
278     AV_WN4PA(src+7*stride+4, PIXEL_SPLAT_X4(l7));
279 }
280
281 void ff_pred8x8l_vertical_8_mmi(uint8_t *src, int has_topleft,
282         int has_topright, ptrdiff_t stride)
283 {
284     int y;
285     uint32_t a, b;
286
287     __asm__ volatile (
288         "ldl $8, 7(%1)                      \r\n"
289         "ldr $8, 0(%1)                      \r\n"
290         "ldl $9, 7(%2)                      \r\n"
291         "ldr $9, 0(%2)                      \r\n"
292         "ldl $10, 7(%3)                     \r\n"
293         "ldr $10, 0(%3)                     \r\n"
294         "dmtc1 $8, $f2                      \r\n"
295         "dmtc1 $9, $f4                      \r\n"
296         "dmtc1 $10, $f6                     \r\n"
297         "dmtc1 $0, $f0                      \r\n"
298         "punpcklbh $f8, $f2, $f0            \r\n"
299         "punpckhbh $f10, $f2, $f0           \r\n"
300         "punpcklbh $f12, $f4, $f0           \r\n"
301         "punpckhbh $f14, $f4, $f0           \r\n"
302         "punpcklbh $f16, $f6, $f0           \r\n"
303         "punpckhbh $f18, $f6, $f0           \r\n"
304         "bnez %4, 1f                        \r\n"
305         "pinsrh_0 $f8, $f8, $f12            \r\n"
306         "1:                                 \r\n"
307         "bnez %5, 2f                        \r\n"
308         "pinsrh_3 $f18, $f18, $f14          \r\n"
309         "2:                                 \r\n"
310         "daddiu $8, $0, 2                   \r\n"
311         "dmtc1 $8, $f20                     \r\n"
312         "pshufh $f22, $f20, $f0             \r\n"
313         "pmullh $f12, $f12, $f22            \r\n"
314         "pmullh $f14, $f14, $f22            \r\n"
315         "paddh $f8, $f8, $f12               \r\n"
316         "paddh $f10, $f10, $f14             \r\n"
317         "paddh $f8, $f8, $f16               \r\n"
318         "paddh $f10, $f10, $f18             \r\n"
319         "paddh $f8, $f8, $f22               \r\n"
320         "paddh $f10, $f10, $f22             \r\n"
321         "psrah $f8, $f8, $f20               \r\n"
322         "psrah $f10, $f10, $f20             \r\n"
323         "packushb $f4, $f8, $f10            \r\n"
324         "sdc1 $f4, %0                       \r\n"
325         : "=m"(*src)
326         : "r"(src-stride-1),"r"(src-stride),"r"(src-stride+1),
327           "r"(has_topleft),"r"(has_topright)
328         : "$8","$9","$10"
329     );
330
331     a = AV_RN4PA(((uint32_t*)src)+0);
332     b = AV_RN4PA(((uint32_t*)src)+1);
333
334     for (y=1; y<8; y++) {
335         AV_WN4PA(((uint32_t*)(src+y*stride))+0, a);
336         AV_WN4PA(((uint32_t*)(src+y*stride))+1, b);
337     }
338 }
339
340 void ff_pred4x4_dc_8_mmi(uint8_t *src, const uint8_t *topright,
341         ptrdiff_t stride)
342 {
343     const int dc = (src[-stride] + src[1-stride] + src[2-stride]
344                  + src[3-stride] + src[-1+0*stride] + src[-1+1*stride]
345                  + src[-1+2*stride] + src[-1+3*stride] + 4) >>3;
346
347     __asm__ volatile (
348         "daddu $2, %2, $0                   \r\n"
349         "dmul $3, $2, %3                    \r\n"
350         "xor $4, $4, $4                     \r\n"
351         "gsswx $3, 0(%0,$4)                 \r\n"
352         "daddu $4, %1                       \r\n"
353         "gsswx $3, 0(%0,$4)                 \r\n"
354         "daddu $4, %1                       \r\n"
355         "gsswx $3, 0(%0,$4)                 \r\n"
356         "daddu $4, %1                       \r\n"
357         "gsswx $3, 0(%0,$4)                 \r\n"
358         ::"r"(src),"r"(stride),"r"(dc),"r"(ff_pb_1)
359         : "$2","$3","$4","memory"
360     );
361 }
362
363 void ff_pred8x8_vertical_8_mmi(uint8_t *src, ptrdiff_t stride)
364 {
365     __asm__ volatile (
366         "dsubu $2, %0, %1                   \r\n"
367         "daddu $3, %0, $0                   \r\n"
368         "ldl $4, 7($2)                      \r\n"
369         "ldr $4, 0($2)                      \r\n"
370         "dli $5, 0x8                        \r\n"
371         "1:                                 \r\n"
372         "sdl $4, 7($3)                      \r\n"
373         "sdr $4, 0($3)                      \r\n"
374         "daddu $3, %1                       \r\n"
375         "daddiu $5, -1                      \r\n"
376         "bnez $5, 1b                        \r\n"
377         ::"r"(src),"r"(stride)
378         : "$2","$3","$4","$5","memory"
379     );
380 }
381
382 void ff_pred8x8_horizontal_8_mmi(uint8_t *src, ptrdiff_t stride)
383 {
384     __asm__ volatile (
385         "daddiu $2, %0, -1                  \r\n"
386         "daddu $3, %0, $0                   \r\n"
387         "dli $6, 0x8                        \r\n"
388         "1:                                 \r\n"
389         "lbu $4, 0($2)                      \r\n"
390         "dmul $5, $4, %2                    \r\n"
391         "sdl $5, 7($3)                      \r\n"
392         "sdr $5, 0($3)                      \r\n"
393         "daddu $2, %1                       \r\n"
394         "daddu $3, %1                       \r\n"
395         "daddiu $6, -1                      \r\n"
396         "bnez $6, 1b                        \r\n"
397         ::"r"(src),"r"(stride),"r"(ff_pb_1)
398         : "$2","$3","$4","$5","$6","memory"
399     );
400 }
401
402 static void ff_pred16x16_plane_compat_8_mmi(uint8_t *src, ptrdiff_t stride,
403         const int svq3, const int rv40)
404 {
405     __asm__ volatile (
406         "negu $2, %1                        \r\n"
407         "daddu $3, %0, $2                   \r\n"
408         "xor $f8, $f8, $f8                  \r\n"
409         "gslwlc1 $f0, 2($3)                 \r\n"
410         "gslwrc1 $f0, -1($3)                \r\n"
411         "gslwlc1 $f2, 6($3)                 \r\n"
412         "gslwrc1 $f2, 3($3)                 \r\n"
413         "gslwlc1 $f4, 11($3)                \r\n"
414         "gslwrc1 $f4, 8($3)                 \r\n"
415         "gslwlc1 $f6, 15($3)                \r\n"
416         "gslwrc1 $f6, 12($3)                \r\n"
417         "punpcklbh $f0, $f0, $f8            \r\n"
418         "punpcklbh $f2, $f2, $f8            \r\n"
419         "punpcklbh $f4, $f4, $f8            \r\n"
420         "punpcklbh $f6, $f6, $f8            \r\n"
421         "dmtc1 %4, $f20                     \r\n"
422         "dmtc1 %5, $f22                     \r\n"
423         "dmtc1 %6, $f24                     \r\n"
424         "dmtc1 %7, $f26                     \r\n"
425         "pmullh $f0, $f0, $f20              \r\n"
426         "pmullh $f2, $f2, $f22              \r\n"
427         "pmullh $f4, $f4, $f24              \r\n"
428         "pmullh $f6, $f6, $f26              \r\n"
429         "paddsh $f0, $f0, $f4               \r\n"
430         "paddsh $f2, $f2, $f6               \r\n"
431         "paddsh $f0, $f0, $f2               \r\n"
432         "dli $4, 0xE                        \r\n"
433         "dmtc1 $4, $f28                     \r\n"
434         "pshufh $f2, $f0, $f28              \r\n"
435         "paddsh $f0, $f0, $f2               \r\n"
436         "dli $4, 0x1                        \r\n"
437         "dmtc1 $4, $f30                     \r\n"
438         "pshufh $f2, $f0, $f30              \r\n"
439         "paddsh $f10, $f0, $f2              \r\n"
440         "daddiu $3, %0, -1                  \r\n"
441         "daddu $3, $2                       \r\n"
442         "lbu $4, 0($3)                      \r\n"
443         "lbu $8, 16($3)                     \r\n"
444         "daddu $3, %1                       \r\n"
445         "lbu $5, 0($3)                      \r\n"
446         "daddu $3, %1                       \r\n"
447         "lbu $6, 0($3)                      \r\n"
448         "daddu $3, %1                       \r\n"
449         "lbu $7, 0($3)                      \r\n"
450         "dsll $5, 16                        \r\n"
451         "dsll $6, 32                        \r\n"
452         "dsll $7, 48                        \r\n"
453         "or $6, $7                          \r\n"
454         "or $4, $5                          \r\n"
455         "or $4, $6                          \r\n"
456         "dmtc1 $4, $f0                      \r\n"
457         "daddu $3, %1                       \r\n"
458         "lbu $4, 0($3)                      \r\n"
459         "daddu $3, %1                       \r\n"
460         "lbu $5, 0($3)                      \r\n"
461         "daddu $3, %1                       \r\n"
462         "lbu $6, 0($3)                      \r\n"
463         "daddu $3, %1                       \r\n"
464         "lbu $7, 0($3)                      \r\n"
465         "dsll $5, 16                        \r\n"
466         "dsll $6, 32                        \r\n"
467         "dsll $7, 48                        \r\n"
468         "or $6, $7                          \r\n"
469         "or $4, $5                          \r\n"
470         "or $4, $6                          \r\n"
471         "dmtc1 $4, $f2                      \r\n"
472         "daddu $3, %1                       \r\n"
473         "daddu $3, %1                       \r\n"
474         "lbu $4, 0($3)                      \r\n"
475         "daddu $3, %1                       \r\n"
476         "lbu $5, 0($3)                      \r\n"
477         "daddu $3, %1                       \r\n"
478         "lbu $6, 0($3)                      \r\n"
479         "daddu $3, %1                       \r\n"
480         "lbu $7, 0($3)                      \r\n"
481         "dsll $5, 16                        \r\n"
482         "dsll $6, 32                        \r\n"
483         "dsll $7, 48                        \r\n"
484         "or $6, $7                          \r\n"
485         "or $4, $5                          \r\n"
486         "or $4, $6                          \r\n"
487         "dmtc1 $4, $f4                      \r\n"
488         "daddu $3, %1                       \r\n"
489         "lbu $4, 0($3)                      \r\n"
490         "daddu $3, %1                       \r\n"
491         "lbu $5, 0($3)                      \r\n"
492         "daddu $3, %1                       \r\n"
493         "lbu $6, 0($3)                      \r\n"
494         "daddu $3, %1                       \r\n"
495         "lbu $7, 0($3)                      \r\n"
496         "daddu $8, $7                       \r\n"
497         "daddiu $8, 1                       \r\n"
498         "dsll $8, 4                         \r\n"
499         "dsll $5, 16                        \r\n"
500         "dsll $6, 32                        \r\n"
501         "dsll $7, 48                        \r\n"
502         "or $6, $7                          \r\n"
503         "or $4, $5                          \r\n"
504         "or $4, $6                          \r\n"
505         "dmtc1 $4, $f6                      \r\n"
506         "pmullh $f0, $f0, $f20              \r\n"
507         "pmullh $f2, $f2, $f22              \r\n"
508         "pmullh $f4, $f4, $f24              \r\n"
509         "pmullh $f6, $f6, $f26              \r\n"
510         "paddsh $f0, $f0, $f4               \r\n"
511         "paddsh $f2, $f2, $f6               \r\n"
512         "paddsh $f0, $f0, $f2               \r\n"
513         "pshufh $f2, $f0, $f28              \r\n"
514         "paddsh $f0, $f0, $f2               \r\n"
515         "pshufh $f2, $f0, $f30              \r\n"
516         "paddsh $f12, $f0, $f2              \r\n"
517         "dmfc1 $2, $f10                     \r\n"
518         "dsll $2, 48                        \r\n"
519         "dsra $2, 48                        \r\n"
520         "dmfc1 $3, $f12                     \r\n"
521         "dsll $3, 48                        \r\n"
522         "dsra $3, 48                        \r\n"
523         "beqz %2, 1f                        \r\n"
524         "dli $4, 4                          \r\n"
525         "ddiv $2, $4                        \r\n"
526         "ddiv $3, $4                        \r\n"
527         "dli $4, 5                          \r\n"
528         "dmul $2, $4                        \r\n"
529         "dmul $3, $4                        \r\n"
530         "dli $4, 16                         \r\n"
531         "ddiv $2, $4                        \r\n"
532         "ddiv $3, $4                        \r\n"
533         "daddu $4, $2, $0                   \r\n"
534         "daddu $2, $3, $0                   \r\n"
535         "daddu $3, $4, $0                   \r\n"
536         "b 2f                               \r\n"
537         "1:                                 \r\n"
538         "beqz %3, 1f                        \r\n"
539         "dsra $4, $2, 2                     \r\n"
540         "daddu $2, $4                       \r\n"
541         "dsra $4, $3, 2                     \r\n"
542         "daddu $3, $4                       \r\n"
543         "dsra $2, 4                         \r\n"
544         "dsra $3, 4                         \r\n"
545         "b 2f                               \r\n"
546         "1:                                 \r\n"
547         "dli $4, 5                          \r\n"
548         "dmul $2, $4                        \r\n"
549         "dmul $3, $4                        \r\n"
550         "daddiu $2, 32                      \r\n"
551         "daddiu $3, 32                      \r\n"
552         "dsra $2, 6                         \r\n"
553         "dsra $3, 6                         \r\n"
554         "2:                                 \r\n"
555         "daddu $5, $2, $3                   \r\n"
556         "dli $4, 7                          \r\n"
557         "dmul $5, $4                        \r\n"
558         "dsubu $8, $5                       \r\n"
559         "dmtc1 $0, $f8                      \r\n"
560         "dmtc1 $2, $f0                      \r\n"
561         "pshufh $f0, $f0, $f8               \r\n"
562         "dmtc1 $3, $f10                     \r\n"
563         "pshufh $f10, $f10, $f8             \r\n"
564         "dmtc1 $8, $f12                     \r\n"
565         "pshufh $f12, $f12, $f8             \r\n"
566         "dli $4, 5                          \r\n"
567         "dmtc1 $4, $f14                     \r\n"
568         "dmtc1 %8, $f2                      \r\n"
569         "pmullh $f2, $f2, $f0               \r\n"
570         "dmtc1 %9, $f4                      \r\n"
571         "pmullh $f4, $f4, $f0               \r\n"
572         "dmtc1 %10, $f6                      \r\n"
573         "pmullh $f6, $f6, $f0               \r\n"
574         "dmtc1 %11, $f8                      \r\n"
575         "pmullh $f8, $f8, $f0               \r\n"
576         "daddu $3, %0, $0                   \r\n"
577         "dli $2, 16                         \r\n"
578         "1:                                 \r\n"
579         "paddsh $f16, $f2, $f12             \r\n"
580         "psrah $f16, $f16, $f14             \r\n"
581         "paddsh $f18, $f4, $f12             \r\n"
582         "psrah $f18, $f18, $f14             \r\n"
583         "packushb $f20, $f16, $f18          \r\n"
584         "gssdlc1 $f20, 7($3)                \r\n"
585         "gssdrc1 $f20, 0($3)                \r\n"
586         "paddsh $f16, $f6, $f12             \r\n"
587         "psrah $f16, $f16, $f14             \r\n"
588         "paddsh $f18, $f8, $f12             \r\n"
589         "psrah $f18, $f18, $f14             \r\n"
590         "packushb $f20, $f16, $f18          \r\n"
591         "gssdlc1 $f20, 15($3)               \r\n"
592         "gssdrc1 $f20, 8($3)                \r\n"
593         "paddsh $f12, $f12, $f10            \r\n"
594         "daddu $3, %1                       \r\n"
595         "daddiu $2, -1                      \r\n"
596         "bnez $2, 1b                        \r\n"
597         ::"r"(src),"r"(stride),"r"(svq3),"r"(rv40),
598           "r"(ff_pw_m8tom5),"r"(ff_pw_m4tom1),"r"(ff_pw_1to4),"r"(ff_pw_5to8),
599           "r"(ff_pw_0to3),"r"(ff_pw_4to7),"r"(ff_pw_8tob),"r"(ff_pw_ctof)
600         : "$2","$3","$4","$5","$6","$7","$8","memory"
601     );
602 }
603
604 void ff_pred16x16_plane_svq3_8_mmi(uint8_t *src, ptrdiff_t stride)
605 {
606     ff_pred16x16_plane_compat_8_mmi(src, stride, 1, 0);
607 }
608
609 void ff_pred16x16_plane_rv40_8_mmi(uint8_t *src, ptrdiff_t stride)
610 {
611     ff_pred16x16_plane_compat_8_mmi(src, stride, 0, 1);
612 }
613
614 void ff_pred16x16_plane_h264_8_mmi(uint8_t *src, ptrdiff_t stride)
615 {
616     ff_pred16x16_plane_compat_8_mmi(src, stride, 0, 0);
617 }
618
619 void ff_pred8x8_top_dc_8_mmi(uint8_t *src, ptrdiff_t stride)
620 {
621     __asm__ volatile (
622         "dli $2, 2                          \r\n"
623         "xor $f0, $f0, $f0                  \r\n"
624         "xor $f2, $f2, $f2                  \r\n"
625         "xor $f30, $f30, $f30               \r\n"
626         "negu $3, %1                        \r\n"
627         "daddu $3, $3, %0                   \r\n"
628         "gsldlc1 $f4, 7($3)                 \r\n"
629         "gsldrc1 $f4, 0($3)                 \r\n"
630         "punpcklbh $f0, $f4, $f30           \r\n"
631         "punpckhbh $f2, $f4, $f30           \r\n"
632         "biadd $f0, $f0                     \r\n"
633         "biadd $f2, $f2                     \r\n"
634         "pshufh $f0, $f0, $f30              \r\n"
635         "pshufh $f2, $f2, $f30              \r\n"
636         "dmtc1 $2, $f4                      \r\n"
637         "pshufh $f4, $f4, $f30              \r\n"
638         "paddush $f0, $f0, $f4              \r\n"
639         "paddush $f2, $f2, $f4              \r\n"
640         "dmtc1 $2, $f4                      \r\n"
641         "psrlh $f0, $f0, $f4                \r\n"
642         "psrlh $f2, $f2, $f4                \r\n"
643         "packushb $f4, $f0, $f2             \r\n"
644         "dli $2, 8                          \r\n"
645         "1:                                 \r\n"
646         "gssdlc1 $f4, 7(%0)                 \r\n"
647         "gssdrc1 $f4, 0(%0)                 \r\n"
648         "daddu %0, %0, %1                   \r\n"
649         "daddiu $2, $2, -1                  \r\n"
650         "bnez $2, 1b                        \r\n"
651         ::"r"(src),"r"(stride)
652         : "$2","$3","memory"
653     );
654 }
655
656 void ff_pred8x8_dc_8_mmi(uint8_t *src, ptrdiff_t stride)
657 {
658     __asm__ volatile (
659         "negu $2, %1                        \r\n"
660         "daddu $2, $2, %0                   \r\n"
661         "daddiu $5, $2, 4                   \r\n"
662         "lbu $6, 0($2)                      \r\n"
663         "daddu $3, $0, $6                   \r\n"
664         "daddiu $2, 1                       \r\n"
665         "lbu $6, 0($5)                      \r\n"
666         "daddu $4, $0, $6                   \r\n"
667         "daddiu $5, 1                       \r\n"
668         "lbu $6, 0($2)                      \r\n"
669         "daddu $3, $3, $6                   \r\n"
670         "daddiu $2, 1                       \r\n"
671         "lbu $6, 0($5)                      \r\n"
672         "daddu $4, $4, $6                   \r\n"
673         "daddiu $5, 1                       \r\n"
674         "lbu $6, 0($2)                      \r\n"
675         "daddu $3, $3, $6                   \r\n"
676         "daddiu $2, 1                       \r\n"
677         "lbu $6, 0($5)                      \r\n"
678         "daddu $4, $4, $6                   \r\n"
679         "daddiu $5, 1                       \r\n"
680         "lbu $6, 0($2)                      \r\n"
681         "daddu $3, $3, $6                   \r\n"
682         "daddiu $2, 1                       \r\n"
683         "lbu $6, 0($5)                      \r\n"
684         "daddu $4, $4, $6                   \r\n"
685         "daddiu $5, 1                       \r\n"
686         "dli $6, -1                         \r\n"
687         "daddu $6, $6, %0                   \r\n"
688         "lbu $5, 0($6)                      \r\n"
689         "daddu $7, $0, $5                   \r\n"
690         "daddu $6, $6, %1                   \r\n"
691         "lbu $5, 0($6)                      \r\n"
692         "daddu $7, $7, $5                   \r\n"
693         "daddu $6, $6, %1                   \r\n"
694         "lbu $5, 0($6)                      \r\n"
695         "daddu $7, $7, $5                   \r\n"
696         "daddu $6, $6, %1                   \r\n"
697         "lbu $5, 0($6)                      \r\n"
698         "daddu $7, $7, $5                   \r\n"
699         "daddu $6, $6, %1                   \r\n"
700         "lbu $5, 0($6)                      \r\n"
701         "daddu $8, $0, $5                   \r\n"
702         "daddu $6, $6, %1                   \r\n"
703         "lbu $5, 0($6)                      \r\n"
704         "daddu $8, $8, $5                   \r\n"
705         "daddu $6, $6, %1                   \r\n"
706         "lbu $5, 0($6)                      \r\n"
707         "daddu $8, $8, $5                   \r\n"
708         "daddu $6, $6, %1                   \r\n"
709         "lbu $5, 0($6)                      \r\n"
710         "daddu $8, $8, $5                   \r\n"
711         "daddu $3, $3, $7                   \r\n"
712         "daddiu $3, $3, 4                   \r\n"
713         "daddiu $4, $4, 2                   \r\n"
714         "daddiu $5, $8, 2                   \r\n"
715         "daddu $6, $4, $5                   \r\n"
716         "dsrl $3, 3                         \r\n"
717         "dsrl $4, 2                         \r\n"
718         "dsrl $5, 2                         \r\n"
719         "dsrl $6, 3                         \r\n"
720         "xor $f30, $f30, $f30               \r\n"
721         "dmtc1 $3, $f0                      \r\n"
722         "pshufh $f0, $f0, $f30              \r\n"
723         "dmtc1 $4, $f2                      \r\n"
724         "pshufh $f2, $f2, $f30              \r\n"
725         "dmtc1 $5, $f4                      \r\n"
726         "pshufh $f4, $f4, $f30              \r\n"
727         "dmtc1 $6, $f6                      \r\n"
728         "pshufh $f6, $f6, $f30              \r\n"
729         "packushb $f0, $f0, $f2             \r\n"
730         "packushb $f2, $f4, $f6             \r\n"
731         "daddu $2, $0, %0                   \r\n"
732         "sdc1 $f0, 0($2)                    \r\n"
733         "daddu $2, $2, %1                   \r\n"
734         "sdc1 $f0, 0($2)                    \r\n"
735         "daddu $2, $2, %1                   \r\n"
736         "sdc1 $f0, 0($2)                    \r\n"
737         "daddu $2, $2, %1                   \r\n"
738         "sdc1 $f0, 0($2)                    \r\n"
739         "daddu $2, $2, %1                   \r\n"
740         "sdc1 $f2, 0($2)                    \r\n"
741         "daddu $2, $2, %1                   \r\n"
742         "sdc1 $f2, 0($2)                    \r\n"
743         "daddu $2, $2, %1                   \r\n"
744         "sdc1 $f2, 0($2)                    \r\n"
745         "daddu $2, $2, %1                   \r\n"
746         "sdc1 $f2, 0($2)                    \r\n"
747         ::"r"(src),"r"(stride)
748         :"$2","$3","$4","$5","$6","$7","$8","memory"
749     );
750 }
751
752 void ff_pred8x16_vertical_8_mmi(uint8_t *src, ptrdiff_t stride)
753 {
754     __asm__ volatile (
755         "dsubu $2, %0, %1                   \r\n"
756         "daddu $3, %0, $0                   \r\n"
757         "ldl $4, 7($2)                      \r\n"
758         "ldr $4, 0($2)                      \r\n"
759         "dli $5, 0x10                       \r\n"
760         "1:                                 \r\n"
761         "sdl $4, 7($3)                      \r\n"
762         "sdr $4, 0($3)                      \r\n"
763         "daddu $3, %1                       \r\n"
764         "daddiu $5, -1                      \r\n"
765         "bnez $5, 1b                        \r\n"
766         ::"r"(src),"r"(stride)
767         : "$2","$3","$4","$5","memory"
768     );
769 }
770
771 void ff_pred8x16_horizontal_8_mmi(uint8_t *src, ptrdiff_t stride)
772 {
773     __asm__ volatile (
774         "daddiu $2, %0, -1                  \r\n"
775         "daddu $3, %0, $0                   \r\n"
776         "dli $6, 0x10                       \r\n"
777         "1:                                 \r\n"
778         "lbu $4, 0($2)                      \r\n"
779         "dmul $5, $4, %2                    \r\n"
780         "sdl $5, 7($3)                      \r\n"
781         "sdr $5, 0($3)                      \r\n"
782         "daddu $2, %1                       \r\n"
783         "daddu $3, %1                       \r\n"
784         "daddiu $6, -1                      \r\n"
785         "bnez $6, 1b                        \r\n"
786         ::"r"(src),"r"(stride),"r"(ff_pb_1)
787         : "$2","$3","$4","$5","$6","memory"
788     );
789 }