2 * copyright (c) 2002 Leon van Stuivenberg
4 * This file is part of Libav.
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #ifndef AVCODEC_MIPS_MMI_H
22 #define AVCODEC_MIPS_MMI_H
26 #include "libavcodec/dsputil.h"
28 void ff_mmi_idct_put(uint8_t *dest, int line_size, DCTELEM *block);
29 void ff_mmi_idct_add(uint8_t *dest, int line_size, DCTELEM *block);
30 void ff_mmi_idct(DCTELEM *block);
32 #define align16 __attribute__ ((aligned (16)))
36 #define r1 $at //assembler!
37 #define r2 $v0 //return
38 #define r3 $v1 //return
45 #define r10 $t2 //temp
46 #define r11 $t3 //temp
47 #define r12 $t4 //temp
48 #define r13 $t5 //temp
49 #define r14 $t6 //temp
50 #define r15 $t7 //temp
51 #define r16 $s0 //saved temp
52 #define r17 $s1 //saved temp
53 #define r18 $s2 //saved temp
54 #define r19 $s3 //saved temp
55 #define r20 $s4 //saved temp
56 #define r21 $s5 //saved temp
57 #define r22 $s6 //saved temp
58 #define r23 $s7 //saved temp
59 #define r24 $t8 //temp
60 #define r25 $t9 //temp
61 #define r26 $k0 //kernel
62 #define r27 $k1 //kernel
63 #define r28 $gp //global ptr
64 #define r29 $sp //stack ptr
65 #define r30 $fp //frame ptr
66 #define r31 $ra //return addr
70 #define lq(base, off, reg) \
71 __asm__ volatile ("lq " #reg ", %0("#base ")" : : "i" (off) )
73 #define lq2(mem, reg) \
74 __asm__ volatile ("lq " #reg ", %0" : : "r" (mem))
76 #define sq(reg, off, base) \
77 __asm__ volatile ("sq " #reg ", %0("#base ")" : : "i" (off) )
80 #define ld(base, off, reg) \
81 __asm__ volatile ("ld " #reg ", " #off "("#base ")")
84 #define ld3(base, off, reg) \
85 __asm__ volatile (".word %0" : : "i" ( 0xdc000000 | (base<<21) | (reg<<16) | (off)))
87 #define ldr3(base, off, reg) \
88 __asm__ volatile (".word %0" : : "i" ( 0x6c000000 | (base<<21) | (reg<<16) | (off)))
90 #define ldl3(base, off, reg) \
91 __asm__ volatile (".word %0" : : "i" ( 0x68000000 | (base<<21) | (reg<<16) | (off)))
94 #define sd(reg, off, base) \
95 __asm__ volatile ("sd " #reg ", " #off "("#base ")")
97 //seems assembler has bug encoding mnemonic 'sd', so DIY
98 #define sd3(reg, off, base) \
99 __asm__ volatile (".word %0" : : "i" ( 0xfc000000 | (base<<21) | (reg<<16) | (off)))
101 #define sw(reg, off, base) \
102 __asm__ volatile ("sw " #reg ", " #off "("#base ")")
104 #define sq2(reg, mem) \
105 __asm__ volatile ("sq " #reg ", %0" : : "m" (*(mem)))
107 #define pinth(rs, rt, rd) \
108 __asm__ volatile ("pinth " #rd ", " #rs ", " #rt )
110 #define phmadh(rs, rt, rd) \
111 __asm__ volatile ("phmadh " #rd ", " #rs ", " #rt )
113 #define pcpyud(rs, rt, rd) \
114 __asm__ volatile ("pcpyud " #rd ", " #rs ", " #rt )
116 #define pcpyld(rs, rt, rd) \
117 __asm__ volatile ("pcpyld " #rd ", " #rs ", " #rt )
119 #define pcpyh(rt, rd) \
120 __asm__ volatile ("pcpyh " #rd ", " #rt )
122 #define paddw(rs, rt, rd) \
123 __asm__ volatile ("paddw " #rd ", " #rs ", " #rt )
125 #define pextlw(rs, rt, rd) \
126 __asm__ volatile ("pextlw " #rd ", " #rs ", " #rt )
128 #define pextuw(rs, rt, rd) \
129 __asm__ volatile ("pextuw " #rd ", " #rs ", " #rt )
131 #define pextlh(rs, rt, rd) \
132 __asm__ volatile ("pextlh " #rd ", " #rs ", " #rt )
134 #define pextuh(rs, rt, rd) \
135 __asm__ volatile ("pextuh " #rd ", " #rs ", " #rt )
137 #define psubw(rs, rt, rd) \
138 __asm__ volatile ("psubw " #rd ", " #rs ", " #rt )
140 #define psraw(rt, sa, rd) \
141 __asm__ volatile ("psraw " #rd ", " #rt ", %0" : : "i"(sa) )
143 #define ppach(rs, rt, rd) \
144 __asm__ volatile ("ppach " #rd ", " #rs ", " #rt )
146 #define ppacb(rs, rt, rd) \
147 __asm__ volatile ("ppacb " #rd ", " #rs ", " #rt )
149 #define prevh(rt, rd) \
150 __asm__ volatile ("prevh " #rd ", " #rt )
152 #define pmulth(rs, rt, rd) \
153 __asm__ volatile ("pmulth " #rd ", " #rs ", " #rt )
155 #define pmaxh(rs, rt, rd) \
156 __asm__ volatile ("pmaxh " #rd ", " #rs ", " #rt )
158 #define pminh(rs, rt, rd) \
159 __asm__ volatile ("pminh " #rd ", " #rs ", " #rt )
161 #define pinteh(rs, rt, rd) \
162 __asm__ volatile ("pinteh " #rd ", " #rs ", " #rt )
164 #define paddh(rs, rt, rd) \
165 __asm__ volatile ("paddh " #rd ", " #rs ", " #rt )
167 #define psubh(rs, rt, rd) \
168 __asm__ volatile ("psubh " #rd ", " #rs ", " #rt )
170 #define psrah(rt, sa, rd) \
171 __asm__ volatile ("psrah " #rd ", " #rt ", %0" : : "i"(sa) )
173 #define pmfhl_uw(rd) \
174 __asm__ volatile ("pmfhl.uw " #rd)
176 #define pextlb(rs, rt, rd) \
177 __asm__ volatile ("pextlb " #rd ", " #rs ", " #rt )
179 #endif /* AVCODEC_MIPS_MMI_H */