1 ;******************************************************************************
2 ;* x86-SIMD-optimized IDCT for prores
3 ;* this is identical to "simple" IDCT except for the clip range
5 ;* Copyright (c) 2011 Ronald S. Bultje <rsbultje@gmail.com>
7 ;* This file is part of Libav.
9 ;* Libav is free software; you can redistribute it and/or
10 ;* modify it under the terms of the GNU Lesser General Public
11 ;* License as published by the Free Software Foundation; either
12 ;* version 2.1 of the License, or (at your option) any later version.
14 ;* Libav is distributed in the hope that it will be useful,
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 ;* Lesser General Public License for more details.
19 ;* You should have received a copy of the GNU Lesser General Public
20 ;* License along with Libav; if not, write to the Free Software
21 ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 ;******************************************************************************
24 %include "libavutil/x86/x86util.asm"
26 %define W1sh2 22725 ; W1 = 90901 = 22725<<2 + 1
27 %define W2sh2 21407 ; W2 = 85627 = 21407<<2 - 1
28 %define W3sh2 19265 ; W3 = 77062 = 19265<<2 + 2
29 %define W4sh2 16384 ; W4 = 65535 = 16384<<2 - 1
30 %define W5sh2 12873 ; W5 = 51491 = 12873<<2 - 1
31 %define W6sh2 8867 ; W6 = 35468 = 8867<<2
32 %define W7sh2 4520 ; W7 = 18081 = 4520<<2 + 1
38 w4_plus_w2: times 4 dw W4sh2, +W2sh2
39 w4_min_w2: times 4 dw W4sh2, -W2sh2
40 w4_plus_w6: times 4 dw W4sh2, +W6sh2
41 w4_min_w6: times 4 dw W4sh2, -W6sh2
42 w1_plus_w3: times 4 dw W1sh2, +W3sh2
43 w3_min_w1: times 4 dw W3sh2, -W1sh2
44 w7_plus_w3: times 4 dw W7sh2, +W3sh2
45 w3_min_w7: times 4 dw W3sh2, -W7sh2
46 w1_plus_w5: times 4 dw W1sh2, +W5sh2
47 w5_min_w1: times 4 dw W5sh2, -W1sh2
48 w5_plus_w7: times 4 dw W5sh2, +W7sh2
49 w7_min_w5: times 4 dw W7sh2, -W5sh2
50 row_round: times 8 dw (1<<14)
57 section .text align=16
59 ; interleave data while maintaining source
60 ; %1=type, %2=dstlo, %3=dsthi, %4=src, %5=interleave
62 punpckl%1 m%2, m%4, m%5
63 punpckh%1 m%3, m%4, m%5
66 ; %1/%2=src1/dst1, %3/%4=dst2, %5/%6=src2, %7=shift
67 ; action: %3/%4 = %1/%2 - %5/%6; %1/%2 += %5/%6
68 ; %1/%2/%3/%4 >>= %7; dword -> word (in %1/%3)
70 psubd %3, %1, %5 ; { a0 - b0 }[0-3]
71 psubd %4, %2, %6 ; { a0 - b0 }[4-7]
72 paddd %1, %5 ; { a0 + b0 }[0-3]
73 paddd %2, %6 ; { a0 + b0 }[4-7]
78 packssdw %1, %2 ; row[0]
79 packssdw %3, %4 ; row[7]
82 ; %1 = row or col (for rounding variable)
83 ; %2 = number of bits to shift at the end
85 ; a0 = (W4 * row[0]) + (1 << (15 - 1));
96 SBUTTERFLY3 wd, 0, 1, 10, 8 ; { row[0], row[2] }[0-3]/[4-7]
100 SIGNEXTEND m8, m9, m14 ; { row[2] }[0-3] / [4-7]
101 SIGNEXTEND m10, m11, m14 ; { row[0] }[0-3] / [4-7]
102 pmaddwd m2, m0, [w4_plus_w6]
103 pmaddwd m3, m1, [w4_plus_w6]
104 pmaddwd m4, m0, [w4_min_w6]
105 pmaddwd m5, m1, [w4_min_w6]
106 pmaddwd m6, m0, [w4_min_w2]
107 pmaddwd m7, m1, [w4_min_w2]
108 pmaddwd m0, [w4_plus_w2]
109 pmaddwd m1, [w4_plus_w2]
119 ; a0: -1*row[0]-1*row[2]
122 ; a3: -1*row[0]+1*row[2]
123 psubd m2, m10 ; a1[0-3]
124 psubd m3, m11 ; a1[4-7]
125 psubd m4, m10 ; a2[0-3]
126 psubd m5, m11 ; a2[4-7]
131 psubd m0, m8 ; a0[0-3]
132 psubd m1, m9 ; a0[4-7]
133 paddd m6, m8 ; a3[0-3]
134 paddd m7, m9 ; a3[4-7]
136 ; a0 += W4*row[4] + W6*row[6]; i.e. -1*row[4]
137 ; a1 -= W4*row[4] + W2*row[6]; i.e. -1*row[4]-1*row[6]
138 ; a2 -= W4*row[4] - W2*row[6]; i.e. -1*row[4]+1*row[6]
139 ; a3 += W4*row[4] - W6*row[6]; i.e. -1*row[4]
140 SBUTTERFLY3 wd, 8, 9, 13, 12 ; { row[4], row[6] }[0-3]/[4-7]
141 SIGNEXTEND m13, m14, m10 ; { row[4] }[0-3] / [4-7]
142 pmaddwd m10, m8, [w4_plus_w6]
143 pmaddwd m11, m9, [w4_plus_w6]
148 paddd m0, m10 ; a0[0-3]
149 paddd m1, m11 ; a0[4-7]
150 pmaddwd m10, m8, [w4_min_w6]
151 pmaddwd m11, m9, [w4_min_w6]
156 paddd m6, m10 ; a3[0-3]
157 paddd m7, m11 ; a3[4-7]
158 pmaddwd m10, m8, [w4_min_w2]
159 pmaddwd m11, m9, [w4_min_w2]
160 pmaddwd m8, [w4_plus_w2]
161 pmaddwd m9, [w4_plus_w2]
170 psubd m4, m10 ; a2[0-3] intermediate
171 psubd m5, m11 ; a2[4-7] intermediate
172 psubd m2, m8 ; a1[0-3] intermediate
173 psubd m3, m9 ; a1[4-7] intermediate
174 SIGNEXTEND m12, m13, m10 ; { row[6] }[0-3] / [4-7]
175 psubd m4, m12 ; a2[0-3]
176 psubd m5, m13 ; a2[4-7]
177 paddd m2, m12 ; a1[0-3]
178 paddd m3, m13 ; a1[4-7]
185 mova m10,[r2+ 16] ; { row[1] }[0-7]
186 mova m8, [r2+ 48] ; { row[3] }[0-7]
187 mova m13,[r2+ 80] ; { row[5] }[0-7]
188 mova m14,[r2+112] ; { row[7] }[0-7]
200 ; b0 = MUL(W1, row[1]);
201 ; MAC(b0, W3, row[3]);
202 ; b1 = MUL(W3, row[1]);
203 ; MAC(b1, -W7, row[3]);
204 ; b2 = MUL(W5, row[1]);
205 ; MAC(b2, -W1, row[3]);
206 ; b3 = MUL(W7, row[1]);
207 ; MAC(b3, -W5, row[3]);
208 SBUTTERFLY3 wd, 0, 1, 10, 8 ; { row[1], row[3] }[0-3]/[4-7]
209 SIGNEXTEND m10, m11, m12 ; { row[1] }[0-3] / [4-7]
210 SIGNEXTEND m8, m9, m12 ; { row[3] }[0-3] / [4-7]
211 pmaddwd m2, m0, [w3_min_w7]
212 pmaddwd m3, m1, [w3_min_w7]
213 pmaddwd m4, m0, [w5_min_w1]
214 pmaddwd m5, m1, [w5_min_w1]
215 pmaddwd m6, m0, [w7_min_w5]
216 pmaddwd m7, m1, [w7_min_w5]
217 pmaddwd m0, [w1_plus_w3]
218 pmaddwd m1, [w1_plus_w3]
228 ; b0: +1*row[1]+2*row[3]
229 ; b1: +2*row[1]-1*row[3]
230 ; b2: -1*row[1]-1*row[3]
231 ; b3: +1*row[1]+1*row[3]
236 paddd m8, m10 ; { row[1] + row[3] }[0-3]
237 paddd m9, m11 ; { row[1] + row[3] }[4-7]
240 paddd m0, m8 ; b0[0-3]
241 paddd m1, m9 ; b0[4-7]
242 paddd m2, m10 ; b1[0-3]
243 paddd m3, m11 ; b2[4-7]
244 psubd m4, m8 ; b2[0-3]
245 psubd m5, m9 ; b2[4-7]
246 paddd m6, m8 ; b3[0-3]
247 paddd m7, m9 ; b3[4-7]
249 ; MAC(b0, W5, row[5]);
250 ; MAC(b0, W7, row[7]);
251 ; MAC(b1, -W1, row[5]);
252 ; MAC(b1, -W5, row[7]);
253 ; MAC(b2, W7, row[5]);
254 ; MAC(b2, W3, row[7]);
255 ; MAC(b3, W3, row[5]);
256 ; MAC(b3, -W1, row[7]);
257 SBUTTERFLY3 wd, 8, 9, 13, 14 ; { row[5], row[7] }[0-3]/[4-7]
258 SIGNEXTEND m13, m12, m11 ; { row[5] }[0-3] / [4-7]
259 SIGNEXTEND m14, m11, m10 ; { row[7] }[0-3] / [4-7]
261 ; b0: -1*row[5]+1*row[7]
262 ; b1: -1*row[5]+1*row[7]
263 ; b2: +1*row[5]+2*row[7]
264 ; b3: +2*row[5]-1*row[7]
269 psubd m13, m14 ; { row[5] - row[7] }[0-3]
270 psubd m12, m11 ; { row[5] - row[7] }[4-7]
282 pmaddwd m10, m8, [w1_plus_w5]
283 pmaddwd m11, m9, [w1_plus_w5]
284 pmaddwd m12, m8, [w5_plus_w7]
285 pmaddwd m13, m9, [w5_plus_w7]
290 psubd m2, m10 ; b1[0-3]
291 psubd m3, m11 ; b1[4-7]
292 paddd m0, m12 ; b0[0-3]
293 paddd m1, m13 ; b0[4-7]
294 pmaddwd m12, m8, [w7_plus_w3]
295 pmaddwd m13, m9, [w7_plus_w3]
296 pmaddwd m8, [w3_min_w1]
297 pmaddwd m9, [w3_min_w1]
302 paddd m4, m12 ; b2[0-3]
303 paddd m5, m13 ; b2[4-7]
304 paddd m6, m8 ; b3[0-3]
305 paddd m7, m9 ; b3[4-7]
307 ; row[0] = (a0 + b0) >> 15;
308 ; row[7] = (a0 - b0) >> 15;
309 ; row[1] = (a1 + b1) >> 15;
310 ; row[6] = (a1 - b1) >> 15;
311 ; row[2] = (a2 + b2) >> 15;
312 ; row[5] = (a2 - b2) >> 15;
313 ; row[3] = (a3 + b3) >> 15;
314 ; row[4] = (a3 - b3) >> 15;
315 mova m8, [r2+ 0] ; a0[0-3]
316 mova m9, [r2+16] ; a0[4-7]
317 SUMSUB_SHPK m8, m9, m10, m11, m0, m1, %2
318 mova m0, [r2+32] ; a1[0-3]
319 mova m1, [r2+48] ; a1[4-7]
320 SUMSUB_SHPK m0, m1, m9, m11, m2, m3, %2
321 mova m1, [r2+64] ; a2[0-3]
322 mova m2, [r2+80] ; a2[4-7]
323 SUMSUB_SHPK m1, m2, m11, m3, m4, m5, %2
324 mova m2, [r2+96] ; a3[0-3]
325 mova m3, [r2+112] ; a3[4-7]
326 SUMSUB_SHPK m2, m3, m4, m5, m6, m7, %2
329 ; void prores_idct_put_10_<opt>(uint8_t *pixels, int stride,
330 ; DCTELEM *block, const int16_t *qmat);
332 cglobal prores_idct_put_10, 4, 4, %1
336 ; for (i = 0; i < 8; i++)
337 ; idctRowCondDC(block + i*8);
338 mova m10,[r2+ 0] ; { row[0] }[0-7]
339 mova m8, [r2+32] ; { row[2] }[0-7]
340 mova m13,[r2+64] ; { row[4] }[0-7]
341 mova m12,[r2+96] ; { row[6] }[0-7]
350 ; transpose for second part of IDCT
351 TRANSPOSE8x8W 8, 0, 1, 2, 4, 11, 9, 10, 3
361 ; for (i = 0; i < 8; i++)
362 ; idctSparseColAdd(dest + i, line_size, block + i);
407 %macro SIGNEXTEND 2-3
408 %if cpuflag(sse4) ; dstlow, dsthigh
412 %elif cpuflag(sse2) ; dstlow, dsthigh, tmp