1 ;******************************************************************************
2 ;* x86-SIMD-optimized IDCT for prores
3 ;* this is identical to "simple" IDCT except for the clip range
5 ;* Copyright (c) 2011 Ronald S. Bultje <rsbultje@gmail.com>
7 ;* This file is part of Libav.
9 ;* Libav is free software; you can redistribute it and/or
10 ;* modify it under the terms of the GNU Lesser General Public
11 ;* License as published by the Free Software Foundation; either
12 ;* version 2.1 of the License, or (at your option) any later version.
14 ;* Libav is distributed in the hope that it will be useful,
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 ;* Lesser General Public License for more details.
19 ;* You should have received a copy of the GNU Lesser General Public
20 ;* License along with Libav; if not, write to the Free Software
21 ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 ;******************************************************************************
25 %include "x86util.asm"
27 %define W1sh2 22725 ; W1 = 90901 = 22725<<2 + 1
28 %define W2sh2 21407 ; W2 = 85627 = 21407<<2 - 1
29 %define W3sh2 19265 ; W3 = 77062 = 19265<<2 + 2
30 %define W4sh2 16384 ; W4 = 65535 = 16384<<2 - 1
31 %define W5sh2 12873 ; W5 = 51491 = 12873<<2 - 1
32 %define W6sh2 8867 ; W6 = 35468 = 8867<<2
33 %define W7sh2 4520 ; W7 = 18081 = 4520<<2 + 1
39 w4_plus_w2: times 4 dw W4sh2, +W2sh2
40 w4_min_w2: times 4 dw W4sh2, -W2sh2
41 w4_plus_w6: times 4 dw W4sh2, +W6sh2
42 w4_min_w6: times 4 dw W4sh2, -W6sh2
43 w1_plus_w3: times 4 dw W1sh2, +W3sh2
44 w3_min_w1: times 4 dw W3sh2, -W1sh2
45 w7_plus_w3: times 4 dw W7sh2, +W3sh2
46 w3_min_w7: times 4 dw W3sh2, -W7sh2
47 w1_plus_w5: times 4 dw W1sh2, +W5sh2
48 w5_min_w1: times 4 dw W5sh2, -W1sh2
49 w5_plus_w7: times 4 dw W5sh2, +W7sh2
50 w7_min_w5: times 4 dw W7sh2, -W5sh2
51 row_round: times 8 dw (1<<14)
58 section .text align=16
60 ; interleave data while maintaining source
61 ; %1=type, %2=dstlo, %3=dsthi, %4=src, %5=interleave
63 punpckl%1 m%2, m%4, m%5
64 punpckh%1 m%3, m%4, m%5
67 ; %1/%2=src1/dst1, %3/%4=dst2, %5/%6=src2, %7=shift
68 ; action: %3/%4 = %1/%2 - %5/%6; %1/%2 += %5/%6
69 ; %1/%2/%3/%4 >>= %7; dword -> word (in %1/%3)
71 psubd %3, %1, %5 ; { a0 - b0 }[0-3]
72 psubd %4, %2, %6 ; { a0 - b0 }[4-7]
73 paddd %1, %5 ; { a0 + b0 }[0-3]
74 paddd %2, %6 ; { a0 + b0 }[4-7]
79 packssdw %1, %2 ; row[0]
80 packssdw %3, %4 ; row[7]
83 ; %1 = row or col (for rounding variable)
84 ; %2 = number of bits to shift at the end
87 ; a0 = (W4 * row[0]) + (1 << (15 - 1));
98 SBUTTERFLY3 wd, 0, 1, 10, 8 ; { row[0], row[2] }[0-3]/[4-7]
100 psubw m10,[row_round]
102 SIGNEXTEND m8, m9, m14 ; { row[2] }[0-3] / [4-7]
103 SIGNEXTEND m10, m11, m14 ; { row[0] }[0-3] / [4-7]
104 pmaddwd m2, m0, [w4_plus_w6]
105 pmaddwd m3, m1, [w4_plus_w6]
106 pmaddwd m4, m0, [w4_min_w6]
107 pmaddwd m5, m1, [w4_min_w6]
108 pmaddwd m6, m0, [w4_min_w2]
109 pmaddwd m7, m1, [w4_min_w2]
110 pmaddwd m0, [w4_plus_w2]
111 pmaddwd m1, [w4_plus_w2]
121 ; a0: -1*row[0]-1*row[2]
124 ; a3: -1*row[0]+1*row[2]
125 psubd m2, m10 ; a1[0-3]
126 psubd m3, m11 ; a1[4-7]
127 psubd m4, m10 ; a2[0-3]
128 psubd m5, m11 ; a2[4-7]
133 psubd m0, m8 ; a0[0-3]
134 psubd m1, m9 ; a0[4-7]
135 paddd m6, m8 ; a3[0-3]
136 paddd m7, m9 ; a3[4-7]
138 ; a0 += W4*row[4] + W6*row[6]; i.e. -1*row[4]
139 ; a1 -= W4*row[4] + W2*row[6]; i.e. -1*row[4]-1*row[6]
140 ; a2 -= W4*row[4] - W2*row[6]; i.e. -1*row[4]+1*row[6]
141 ; a3 += W4*row[4] - W6*row[6]; i.e. -1*row[4]
142 SBUTTERFLY3 wd, 8, 9, 13, 12 ; { row[4], row[6] }[0-3]/[4-7]
143 SIGNEXTEND m13, m14, m10 ; { row[4] }[0-3] / [4-7]
144 pmaddwd m10, m8, [w4_plus_w6]
145 pmaddwd m11, m9, [w4_plus_w6]
150 paddd m0, m10 ; a0[0-3]
151 paddd m1, m11 ; a0[4-7]
152 pmaddwd m10, m8, [w4_min_w6]
153 pmaddwd m11, m9, [w4_min_w6]
158 paddd m6, m10 ; a3[0-3]
159 paddd m7, m11 ; a3[4-7]
160 pmaddwd m10, m8, [w4_min_w2]
161 pmaddwd m11, m9, [w4_min_w2]
162 pmaddwd m8, [w4_plus_w2]
163 pmaddwd m9, [w4_plus_w2]
172 psubd m4, m10 ; a2[0-3] intermediate
173 psubd m5, m11 ; a2[4-7] intermediate
174 psubd m2, m8 ; a1[0-3] intermediate
175 psubd m3, m9 ; a1[4-7] intermediate
176 SIGNEXTEND m12, m13, m10 ; { row[6] }[0-3] / [4-7]
177 psubd m4, m12 ; a2[0-3]
178 psubd m5, m13 ; a2[4-7]
179 paddd m2, m12 ; a1[0-3]
180 paddd m3, m13 ; a1[4-7]
187 mova m10,[r2+ 16] ; { row[1] }[0-7]
188 mova m8, [r2+ 48] ; { row[3] }[0-7]
189 mova m13,[r2+ 80] ; { row[5] }[0-7]
190 mova m14,[r2+112] ; { row[7] }[0-7]
202 ; b0 = MUL(W1, row[1]);
203 ; MAC(b0, W3, row[3]);
204 ; b1 = MUL(W3, row[1]);
205 ; MAC(b1, -W7, row[3]);
206 ; b2 = MUL(W5, row[1]);
207 ; MAC(b2, -W1, row[3]);
208 ; b3 = MUL(W7, row[1]);
209 ; MAC(b3, -W5, row[3]);
210 SBUTTERFLY3 wd, 0, 1, 10, 8 ; { row[1], row[3] }[0-3]/[4-7]
211 SIGNEXTEND m10, m11, m12 ; { row[1] }[0-3] / [4-7]
212 SIGNEXTEND m8, m9, m12 ; { row[3] }[0-3] / [4-7]
213 pmaddwd m2, m0, [w3_min_w7]
214 pmaddwd m3, m1, [w3_min_w7]
215 pmaddwd m4, m0, [w5_min_w1]
216 pmaddwd m5, m1, [w5_min_w1]
217 pmaddwd m6, m0, [w7_min_w5]
218 pmaddwd m7, m1, [w7_min_w5]
219 pmaddwd m0, [w1_plus_w3]
220 pmaddwd m1, [w1_plus_w3]
230 ; b0: +1*row[1]+2*row[3]
231 ; b1: +2*row[1]-1*row[3]
232 ; b2: -1*row[1]-1*row[3]
233 ; b3: +1*row[1]+1*row[3]
238 paddd m8, m10 ; { row[1] + row[3] }[0-3]
239 paddd m9, m11 ; { row[1] + row[3] }[4-7]
242 paddd m0, m8 ; b0[0-3]
243 paddd m1, m9 ; b0[4-7]
244 paddd m2, m10 ; b1[0-3]
245 paddd m3, m11 ; b2[4-7]
246 psubd m4, m8 ; b2[0-3]
247 psubd m5, m9 ; b2[4-7]
248 paddd m6, m8 ; b3[0-3]
249 paddd m7, m9 ; b3[4-7]
251 ; MAC(b0, W5, row[5]);
252 ; MAC(b0, W7, row[7]);
253 ; MAC(b1, -W1, row[5]);
254 ; MAC(b1, -W5, row[7]);
255 ; MAC(b2, W7, row[5]);
256 ; MAC(b2, W3, row[7]);
257 ; MAC(b3, W3, row[5]);
258 ; MAC(b3, -W1, row[7]);
259 SBUTTERFLY3 wd, 8, 9, 13, 14 ; { row[5], row[7] }[0-3]/[4-7]
260 SIGNEXTEND m13, m12, m11 ; { row[5] }[0-3] / [4-7]
261 SIGNEXTEND m14, m11, m10 ; { row[7] }[0-3] / [4-7]
263 ; b0: -1*row[5]+1*row[7]
264 ; b1: -1*row[5]+1*row[7]
265 ; b2: +1*row[5]+2*row[7]
266 ; b3: +2*row[5]-1*row[7]
271 psubd m13, m14 ; { row[5] - row[7] }[0-3]
272 psubd m12, m11 ; { row[5] - row[7] }[4-7]
284 pmaddwd m10, m8, [w1_plus_w5]
285 pmaddwd m11, m9, [w1_plus_w5]
286 pmaddwd m12, m8, [w5_plus_w7]
287 pmaddwd m13, m9, [w5_plus_w7]
292 psubd m2, m10 ; b1[0-3]
293 psubd m3, m11 ; b1[4-7]
294 paddd m0, m12 ; b0[0-3]
295 paddd m1, m13 ; b0[4-7]
296 pmaddwd m12, m8, [w7_plus_w3]
297 pmaddwd m13, m9, [w7_plus_w3]
298 pmaddwd m8, [w3_min_w1]
299 pmaddwd m9, [w3_min_w1]
304 paddd m4, m12 ; b2[0-3]
305 paddd m5, m13 ; b2[4-7]
306 paddd m6, m8 ; b3[0-3]
307 paddd m7, m9 ; b3[4-7]
309 ; row[0] = (a0 + b0) >> 15;
310 ; row[7] = (a0 - b0) >> 15;
311 ; row[1] = (a1 + b1) >> 15;
312 ; row[6] = (a1 - b1) >> 15;
313 ; row[2] = (a2 + b2) >> 15;
314 ; row[5] = (a2 - b2) >> 15;
315 ; row[3] = (a3 + b3) >> 15;
316 ; row[4] = (a3 - b3) >> 15;
317 mova m8, [r2+ 0] ; a0[0-3]
318 mova m9, [r2+16] ; a0[4-7]
319 SUMSUB_SHPK m8, m9, m10, m11, m0, m1, %2
320 mova m0, [r2+32] ; a1[0-3]
321 mova m1, [r2+48] ; a1[4-7]
322 SUMSUB_SHPK m0, m1, m9, m11, m2, m3, %2
323 mova m1, [r2+64] ; a2[0-3]
324 mova m2, [r2+80] ; a2[4-7]
325 SUMSUB_SHPK m1, m2, m11, m3, m4, m5, %2
326 mova m2, [r2+96] ; a3[0-3]
327 mova m3, [r2+112] ; a3[4-7]
328 SUMSUB_SHPK m2, m3, m4, m5, m6, m7, %2
331 ; void prores_idct_put_10_<opt>(uint8_t *pixels, int stride,
332 ; DCTELEM *block, const int16_t *qmat);
334 cglobal prores_idct_put_10_%1, 4, 4, %2
338 ; for (i = 0; i < 8; i++)
339 ; idctRowCondDC(block + i*8);
340 mova m10,[r2+ 0] ; { row[0] }[0-7]
341 mova m8, [r2+32] ; { row[2] }[0-7]
342 mova m13,[r2+64] ; { row[4] }[0-7]
343 mova m12,[r2+96] ; { row[6] }[0-7]
352 ; transpose for second part of IDCT
353 TRANSPOSE8x8W 8, 0, 1, 2, 4, 11, 9, 10, 3
363 ; for (i = 0; i < 8; i++)
364 ; idctSparseColAdd(dest + i, line_size, block + i);
409 %macro signextend_sse2 3 ; dstlow, dsthigh, tmp
417 %macro signextend_sse4 2-3 ; dstlow, dsthigh
424 %define SIGNEXTEND signextend_sse2
427 %define SIGNEXTEND signextend_sse4