2 * MMX and SSE2 optimized snow DSP utils
3 * Copyright (c) 2005-2006 Robert Edele <yartrebo@earthlink.net>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width){
27 const int w2= (width+1)>>1;
28 // SSE2 code runs faster with pointers aligned on a 32-byte boundary.
29 IDWTELEM temp_buf[(width>>1) + 4];
30 IDWTELEM * const temp = temp_buf + 4 - (((int)temp_buf & 0xF) >> 2);
31 const int w_l= (width>>1);
32 const int w_r= w2 - 1;
36 IDWTELEM * const ref = b + w2 - 1;
37 IDWTELEM b_0 = b[0]; //By allowing the first entry in b[0] to be calculated twice
38 // (the first time erroneously), we allow the SSE2 code to run an extra pass.
39 // The savings in code and time are well worth having to store this value and
40 // calculate b[0] correctly afterwards.
44 "pcmpeqd %%xmm7, %%xmm7 \n\t"
45 "psllw $15, %%xmm7 \n\t"
46 "psrlw $13, %%xmm7 \n\t"
48 for(; i<w_l-15; i+=16){
50 "movdqu (%1), %%xmm1 \n\t"
51 "movdqu 16(%1), %%xmm5 \n\t"
52 "movdqu 2(%1), %%xmm2 \n\t"
53 "movdqu 18(%1), %%xmm6 \n\t"
54 "paddw %%xmm1, %%xmm2 \n\t"
55 "paddw %%xmm5, %%xmm6 \n\t"
56 "movdqa %%xmm2, %%xmm0 \n\t"
57 "movdqa %%xmm6, %%xmm4 \n\t"
58 "paddw %%xmm2, %%xmm2 \n\t"
59 "paddw %%xmm6, %%xmm6 \n\t"
60 "paddw %%xmm0, %%xmm2 \n\t"
61 "paddw %%xmm4, %%xmm6 \n\t"
62 "paddw %%xmm7, %%xmm2 \n\t"
63 "paddw %%xmm7, %%xmm6 \n\t"
64 "psraw $3, %%xmm2 \n\t"
65 "psraw $3, %%xmm6 \n\t"
66 "movdqa (%0), %%xmm0 \n\t"
67 "movdqa 16(%0), %%xmm4 \n\t"
68 "psubw %%xmm2, %%xmm0 \n\t"
69 "psubw %%xmm6, %%xmm4 \n\t"
70 "movdqa %%xmm0, (%0) \n\t"
71 "movdqa %%xmm4, 16(%0) \n\t"
72 :: "r"(&b[i]), "r"(&ref[i])
76 snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
77 b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
81 IDWTELEM * const dst = b+w2;
84 for(; (((long)&dst[i]) & 0x1F) && i<w_r; i++){
85 dst[i] = dst[i] - (b[i] + b[i + 1]);
87 for(; i<w_r-15; i+=16){
89 "movdqu (%1), %%xmm1 \n\t"
90 "movdqu 16(%1), %%xmm5 \n\t"
91 "movdqu 2(%1), %%xmm2 \n\t"
92 "movdqu 18(%1), %%xmm6 \n\t"
93 "paddw %%xmm1, %%xmm2 \n\t"
94 "paddw %%xmm5, %%xmm6 \n\t"
95 "movdqa (%0), %%xmm0 \n\t"
96 "movdqa 16(%0), %%xmm4 \n\t"
97 "psubw %%xmm2, %%xmm0 \n\t"
98 "psubw %%xmm6, %%xmm4 \n\t"
99 "movdqa %%xmm0, (%0) \n\t"
100 "movdqa %%xmm4, 16(%0) \n\t"
101 :: "r"(&dst[i]), "r"(&b[i])
105 snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
109 IDWTELEM * const ref = b+w2 - 1;
114 "psllw $1, %%xmm7 \n\t"
116 for(; i<w_l-15; i+=16){
118 "movdqu (%1), %%xmm1 \n\t"
119 "movdqu 16(%1), %%xmm5 \n\t"
120 "movdqu 2(%1), %%xmm0 \n\t"
121 "movdqu 18(%1), %%xmm4 \n\t" //FIXME try aligned reads and shifts
122 "paddw %%xmm1, %%xmm0 \n\t"
123 "paddw %%xmm5, %%xmm4 \n\t"
124 "paddw %%xmm7, %%xmm0 \n\t"
125 "paddw %%xmm7, %%xmm4 \n\t"
126 "movdqa (%0), %%xmm1 \n\t"
127 "movdqa 16(%0), %%xmm5 \n\t"
128 "psraw $2, %%xmm0 \n\t"
129 "psraw $2, %%xmm4 \n\t"
130 "paddw %%xmm1, %%xmm0 \n\t"
131 "paddw %%xmm5, %%xmm4 \n\t"
132 "psraw $2, %%xmm0 \n\t"
133 "psraw $2, %%xmm4 \n\t"
134 "paddw %%xmm1, %%xmm0 \n\t"
135 "paddw %%xmm5, %%xmm4 \n\t"
136 "movdqa %%xmm0, (%0) \n\t"
137 "movdqa %%xmm4, 16(%0) \n\t"
138 :: "r"(&b[i]), "r"(&ref[i])
142 snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
143 b[0] = b_0 + ((2 * ref[1] + W_BO-1 + 4 * b_0) >> W_BS);
147 IDWTELEM * const src = b+w2;
150 for(; (((long)&temp[i]) & 0x1F) && i<w_r; i++){
151 temp[i] = src[i] - ((-W_AM*(b[i] + b[i+1]))>>W_AS);
153 for(; i<w_r-7; i+=8){
155 "movdqu 2(%1), %%xmm2 \n\t"
156 "movdqu 18(%1), %%xmm6 \n\t"
157 "paddw (%1), %%xmm2 \n\t"
158 "paddw 16(%1), %%xmm6 \n\t"
159 "movdqu (%0), %%xmm0 \n\t"
160 "movdqu 16(%0), %%xmm4 \n\t"
161 "paddw %%xmm2, %%xmm0 \n\t"
162 "paddw %%xmm6, %%xmm4 \n\t"
163 "psraw $1, %%xmm2 \n\t"
164 "psraw $1, %%xmm6 \n\t"
165 "paddw %%xmm0, %%xmm2 \n\t"
166 "paddw %%xmm4, %%xmm6 \n\t"
167 "movdqa %%xmm2, (%2) \n\t"
168 "movdqa %%xmm6, 16(%2) \n\t"
169 :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i])
173 snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO+1, W_AS);
177 snow_interleave_line_header(&i, width, b, temp);
179 for (; (i & 0x3E) != 0x3E; i-=2){
183 for (i-=62; i>=0; i-=64){
185 "movdqa (%1), %%xmm0 \n\t"
186 "movdqa 16(%1), %%xmm2 \n\t"
187 "movdqa 32(%1), %%xmm4 \n\t"
188 "movdqa 48(%1), %%xmm6 \n\t"
189 "movdqa (%1), %%xmm1 \n\t"
190 "movdqa 16(%1), %%xmm3 \n\t"
191 "movdqa 32(%1), %%xmm5 \n\t"
192 "movdqa 48(%1), %%xmm7 \n\t"
193 "punpcklwd (%2), %%xmm0 \n\t"
194 "punpcklwd 16(%2), %%xmm2 \n\t"
195 "punpcklwd 32(%2), %%xmm4 \n\t"
196 "punpcklwd 48(%2), %%xmm6 \n\t"
197 "movdqa %%xmm0, (%0) \n\t"
198 "movdqa %%xmm2, 32(%0) \n\t"
199 "movdqa %%xmm4, 64(%0) \n\t"
200 "movdqa %%xmm6, 96(%0) \n\t"
201 "punpckhwd (%2), %%xmm1 \n\t"
202 "punpckhwd 16(%2), %%xmm3 \n\t"
203 "punpckhwd 32(%2), %%xmm5 \n\t"
204 "punpckhwd 48(%2), %%xmm7 \n\t"
205 "movdqa %%xmm1, 16(%0) \n\t"
206 "movdqa %%xmm3, 48(%0) \n\t"
207 "movdqa %%xmm5, 80(%0) \n\t"
208 "movdqa %%xmm7, 112(%0) \n\t"
209 :: "r"(&(b)[i]), "r"(&(b)[i>>1]), "r"(&(temp)[i>>1])
216 void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, int width){
217 const int w2= (width+1)>>1;
218 IDWTELEM temp[width >> 1];
219 const int w_l= (width>>1);
220 const int w_r= w2 - 1;
224 IDWTELEM * const ref = b + w2 - 1;
227 b[0] = b[0] - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
229 "pcmpeqw %%mm7, %%mm7 \n\t"
230 "psllw $15, %%mm7 \n\t"
231 "psrlw $13, %%mm7 \n\t"
233 for(; i<w_l-7; i+=8){
235 "movq (%1), %%mm2 \n\t"
236 "movq 8(%1), %%mm6 \n\t"
237 "paddw 2(%1), %%mm2 \n\t"
238 "paddw 10(%1), %%mm6 \n\t"
239 "movq %%mm2, %%mm0 \n\t"
240 "movq %%mm6, %%mm4 \n\t"
241 "paddw %%mm2, %%mm2 \n\t"
242 "paddw %%mm6, %%mm6 \n\t"
243 "paddw %%mm0, %%mm2 \n\t"
244 "paddw %%mm4, %%mm6 \n\t"
245 "paddw %%mm7, %%mm2 \n\t"
246 "paddw %%mm7, %%mm6 \n\t"
247 "psraw $3, %%mm2 \n\t"
248 "psraw $3, %%mm6 \n\t"
249 "movq (%0), %%mm0 \n\t"
250 "movq 8(%0), %%mm4 \n\t"
251 "psubw %%mm2, %%mm0 \n\t"
252 "psubw %%mm6, %%mm4 \n\t"
253 "movq %%mm0, (%0) \n\t"
254 "movq %%mm4, 8(%0) \n\t"
255 :: "r"(&b[i]), "r"(&ref[i])
259 snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
263 IDWTELEM * const dst = b+w2;
266 for(; i<w_r-7; i+=8){
268 "movq (%1), %%mm2 \n\t"
269 "movq 8(%1), %%mm6 \n\t"
270 "paddw 2(%1), %%mm2 \n\t"
271 "paddw 10(%1), %%mm6 \n\t"
272 "movq (%0), %%mm0 \n\t"
273 "movq 8(%0), %%mm4 \n\t"
274 "psubw %%mm2, %%mm0 \n\t"
275 "psubw %%mm6, %%mm4 \n\t"
276 "movq %%mm0, (%0) \n\t"
277 "movq %%mm4, 8(%0) \n\t"
278 :: "r"(&dst[i]), "r"(&b[i])
282 snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
286 IDWTELEM * const ref = b+w2 - 1;
289 b[0] = b[0] + (((2 * ref[1] + W_BO) + 4 * b[0]) >> W_BS);
291 "psllw $1, %%mm7 \n\t"
293 for(; i<w_l-7; i+=8){
295 "movq (%1), %%mm0 \n\t"
296 "movq 8(%1), %%mm4 \n\t"
297 "paddw 2(%1), %%mm0 \n\t"
298 "paddw 10(%1), %%mm4 \n\t"
299 "paddw %%mm7, %%mm0 \n\t"
300 "paddw %%mm7, %%mm4 \n\t"
301 "psraw $2, %%mm0 \n\t"
302 "psraw $2, %%mm4 \n\t"
303 "movq (%0), %%mm1 \n\t"
304 "movq 8(%0), %%mm5 \n\t"
305 "paddw %%mm1, %%mm0 \n\t"
306 "paddw %%mm5, %%mm4 \n\t"
307 "psraw $2, %%mm0 \n\t"
308 "psraw $2, %%mm4 \n\t"
309 "paddw %%mm1, %%mm0 \n\t"
310 "paddw %%mm5, %%mm4 \n\t"
311 "movq %%mm0, (%0) \n\t"
312 "movq %%mm4, 8(%0) \n\t"
313 :: "r"(&b[i]), "r"(&ref[i])
317 snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
321 IDWTELEM * const src = b+w2;
324 for(; i<w_r-7; i+=8){
326 "movq 2(%1), %%mm2 \n\t"
327 "movq 10(%1), %%mm6 \n\t"
328 "paddw (%1), %%mm2 \n\t"
329 "paddw 8(%1), %%mm6 \n\t"
330 "movq (%0), %%mm0 \n\t"
331 "movq 8(%0), %%mm4 \n\t"
332 "paddw %%mm2, %%mm0 \n\t"
333 "paddw %%mm6, %%mm4 \n\t"
334 "psraw $1, %%mm2 \n\t"
335 "psraw $1, %%mm6 \n\t"
336 "paddw %%mm0, %%mm2 \n\t"
337 "paddw %%mm4, %%mm6 \n\t"
338 "movq %%mm2, (%2) \n\t"
339 "movq %%mm6, 8(%2) \n\t"
340 :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i])
344 snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO+1, W_AS);
348 snow_interleave_line_header(&i, width, b, temp);
350 for (; (i & 0x1E) != 0x1E; i-=2){
354 for (i-=30; i>=0; i-=32){
356 "movq (%1), %%mm0 \n\t"
357 "movq 8(%1), %%mm2 \n\t"
358 "movq 16(%1), %%mm4 \n\t"
359 "movq 24(%1), %%mm6 \n\t"
360 "movq (%1), %%mm1 \n\t"
361 "movq 8(%1), %%mm3 \n\t"
362 "movq 16(%1), %%mm5 \n\t"
363 "movq 24(%1), %%mm7 \n\t"
364 "punpcklwd (%2), %%mm0 \n\t"
365 "punpcklwd 8(%2), %%mm2 \n\t"
366 "punpcklwd 16(%2), %%mm4 \n\t"
367 "punpcklwd 24(%2), %%mm6 \n\t"
368 "movq %%mm0, (%0) \n\t"
369 "movq %%mm2, 16(%0) \n\t"
370 "movq %%mm4, 32(%0) \n\t"
371 "movq %%mm6, 48(%0) \n\t"
372 "punpckhwd (%2), %%mm1 \n\t"
373 "punpckhwd 8(%2), %%mm3 \n\t"
374 "punpckhwd 16(%2), %%mm5 \n\t"
375 "punpckhwd 24(%2), %%mm7 \n\t"
376 "movq %%mm1, 8(%0) \n\t"
377 "movq %%mm3, 24(%0) \n\t"
378 "movq %%mm5, 40(%0) \n\t"
379 "movq %%mm7, 56(%0) \n\t"
380 :: "r"(&b[i]), "r"(&b[i>>1]), "r"(&temp[i>>1])
387 #define snow_vertical_compose_sse2_load_add(op,r,t0,t1,t2,t3)\
388 ""op" (%%"r",%%"REG_d",2), %%"t0" \n\t"\
389 ""op" 16(%%"r",%%"REG_d",2), %%"t1" \n\t"\
390 ""op" 32(%%"r",%%"REG_d",2), %%"t2" \n\t"\
391 ""op" 48(%%"r",%%"REG_d",2), %%"t3" \n\t"
393 #define snow_vertical_compose_sse2_load(r,t0,t1,t2,t3)\
394 snow_vertical_compose_sse2_load_add("movdqa",r,t0,t1,t2,t3)
396 #define snow_vertical_compose_sse2_add(r,t0,t1,t2,t3)\
397 snow_vertical_compose_sse2_load_add("paddw",r,t0,t1,t2,t3)
399 #define snow_vertical_compose_sse2_sub(s0,s1,s2,s3,t0,t1,t2,t3)\
400 "psubw %%"s0", %%"t0" \n\t"\
401 "psubw %%"s1", %%"t1" \n\t"\
402 "psubw %%"s2", %%"t2" \n\t"\
403 "psubw %%"s3", %%"t3" \n\t"
405 #define snow_vertical_compose_sse2_store(w,s0,s1,s2,s3)\
406 "movdqa %%"s0", (%%"w",%%"REG_d",2) \n\t"\
407 "movdqa %%"s1", 16(%%"w",%%"REG_d",2) \n\t"\
408 "movdqa %%"s2", 32(%%"w",%%"REG_d",2) \n\t"\
409 "movdqa %%"s3", 48(%%"w",%%"REG_d",2) \n\t"
411 #define snow_vertical_compose_sse2_sra(n,t0,t1,t2,t3)\
412 "psraw $"n", %%"t0" \n\t"\
413 "psraw $"n", %%"t1" \n\t"\
414 "psraw $"n", %%"t2" \n\t"\
415 "psraw $"n", %%"t3" \n\t"
417 #define snow_vertical_compose_sse2_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)\
418 "paddw %%"s0", %%"t0" \n\t"\
419 "paddw %%"s1", %%"t1" \n\t"\
420 "paddw %%"s2", %%"t2" \n\t"\
421 "paddw %%"s3", %%"t3" \n\t"
423 #define snow_vertical_compose_sse2_move(s0,s1,s2,s3,t0,t1,t2,t3)\
424 "movdqa %%"s0", %%"t0" \n\t"\
425 "movdqa %%"s1", %%"t1" \n\t"\
426 "movdqa %%"s2", %%"t2" \n\t"\
427 "movdqa %%"s3", %%"t3" \n\t"
429 void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width){
435 b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
436 b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
437 b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
438 b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
445 "mov %6, %%"REG_a" \n\t"
446 "mov %4, %%"REG_S" \n\t"
448 snow_vertical_compose_sse2_load(REG_S,"xmm0","xmm2","xmm4","xmm6")
449 snow_vertical_compose_sse2_add(REG_a,"xmm0","xmm2","xmm4","xmm6")
450 snow_vertical_compose_sse2_move("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7")
451 snow_vertical_compose_sse2_r2r_add("xmm0","xmm2","xmm4","xmm6","xmm0","xmm2","xmm4","xmm6")
452 snow_vertical_compose_sse2_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
454 "pcmpeqd %%xmm1, %%xmm1 \n\t"
455 "psllw $15, %%xmm1 \n\t"
456 "psrlw $13, %%xmm1 \n\t"
457 "mov %5, %%"REG_a" \n\t"
459 snow_vertical_compose_sse2_r2r_add("xmm1","xmm1","xmm1","xmm1","xmm0","xmm2","xmm4","xmm6")
460 snow_vertical_compose_sse2_sra("3","xmm0","xmm2","xmm4","xmm6")
461 snow_vertical_compose_sse2_load(REG_a,"xmm1","xmm3","xmm5","xmm7")
462 snow_vertical_compose_sse2_sub("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7")
463 snow_vertical_compose_sse2_store(REG_a,"xmm1","xmm3","xmm5","xmm7")
464 "mov %3, %%"REG_c" \n\t"
465 snow_vertical_compose_sse2_load(REG_S,"xmm0","xmm2","xmm4","xmm6")
466 snow_vertical_compose_sse2_add(REG_c,"xmm1","xmm3","xmm5","xmm7")
467 snow_vertical_compose_sse2_sub("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
468 snow_vertical_compose_sse2_store(REG_S,"xmm0","xmm2","xmm4","xmm6")
469 "mov %2, %%"REG_a" \n\t"
470 snow_vertical_compose_sse2_add(REG_a,"xmm0","xmm2","xmm4","xmm6")
471 snow_vertical_compose_sse2_sra("2","xmm0","xmm2","xmm4","xmm6")
472 snow_vertical_compose_sse2_add(REG_c,"xmm0","xmm2","xmm4","xmm6")
474 "pcmpeqd %%xmm1, %%xmm1 \n\t"
475 "psllw $15, %%xmm1 \n\t"
476 "psrlw $14, %%xmm1 \n\t"
477 "mov %1, %%"REG_S" \n\t"
479 snow_vertical_compose_sse2_r2r_add("xmm1","xmm1","xmm1","xmm1","xmm0","xmm2","xmm4","xmm6")
480 snow_vertical_compose_sse2_sra("2","xmm0","xmm2","xmm4","xmm6")
481 snow_vertical_compose_sse2_add(REG_c,"xmm0","xmm2","xmm4","xmm6")
482 snow_vertical_compose_sse2_store(REG_c,"xmm0","xmm2","xmm4","xmm6")
483 snow_vertical_compose_sse2_add(REG_S,"xmm0","xmm2","xmm4","xmm6")
484 snow_vertical_compose_sse2_move("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7")
485 snow_vertical_compose_sse2_sra("1","xmm0","xmm2","xmm4","xmm6")
486 snow_vertical_compose_sse2_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
487 snow_vertical_compose_sse2_add(REG_a,"xmm0","xmm2","xmm4","xmm6")
488 snow_vertical_compose_sse2_store(REG_a,"xmm0","xmm2","xmm4","xmm6")
491 "sub $32, %%"REG_d" \n\t"
495 "m"(b0),"m"(b1),"m"(b2),"m"(b3),"m"(b4),"m"(b5):
496 "%"REG_a"","%"REG_S"","%"REG_c"");
499 #define snow_vertical_compose_mmx_load_add(op,r,t0,t1,t2,t3)\
500 ""op" (%%"r",%%"REG_d",2), %%"t0" \n\t"\
501 ""op" 8(%%"r",%%"REG_d",2), %%"t1" \n\t"\
502 ""op" 16(%%"r",%%"REG_d",2), %%"t2" \n\t"\
503 ""op" 24(%%"r",%%"REG_d",2), %%"t3" \n\t"
505 #define snow_vertical_compose_mmx_load(r,t0,t1,t2,t3)\
506 snow_vertical_compose_mmx_load_add("movq",r,t0,t1,t2,t3)
508 #define snow_vertical_compose_mmx_add(r,t0,t1,t2,t3)\
509 snow_vertical_compose_mmx_load_add("paddw",r,t0,t1,t2,t3)
511 #define snow_vertical_compose_mmx_sub(s0,s1,s2,s3,t0,t1,t2,t3)\
512 snow_vertical_compose_sse2_sub(s0,s1,s2,s3,t0,t1,t2,t3)
514 #define snow_vertical_compose_mmx_store(w,s0,s1,s2,s3)\
515 "movq %%"s0", (%%"w",%%"REG_d",2) \n\t"\
516 "movq %%"s1", 8(%%"w",%%"REG_d",2) \n\t"\
517 "movq %%"s2", 16(%%"w",%%"REG_d",2) \n\t"\
518 "movq %%"s3", 24(%%"w",%%"REG_d",2) \n\t"
520 #define snow_vertical_compose_mmx_sra(n,t0,t1,t2,t3)\
521 snow_vertical_compose_sse2_sra(n,t0,t1,t2,t3)
523 #define snow_vertical_compose_mmx_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)\
524 snow_vertical_compose_sse2_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)
526 #define snow_vertical_compose_mmx_move(s0,s1,s2,s3,t0,t1,t2,t3)\
527 "movq %%"s0", %%"t0" \n\t"\
528 "movq %%"s1", %%"t1" \n\t"\
529 "movq %%"s2", %%"t2" \n\t"\
530 "movq %%"s3", %%"t3" \n\t"
532 void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width){
537 b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
538 b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
539 b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
540 b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
547 "mov %6, %%"REG_a" \n\t"
548 "mov %4, %%"REG_S" \n\t"
550 snow_vertical_compose_mmx_load(REG_S,"mm0","mm2","mm4","mm6")
551 snow_vertical_compose_mmx_add(REG_a,"mm0","mm2","mm4","mm6")
552 snow_vertical_compose_mmx_move("mm0","mm2","mm4","mm6","mm1","mm3","mm5","mm7")
553 snow_vertical_compose_mmx_r2r_add("mm0","mm2","mm4","mm6","mm0","mm2","mm4","mm6")
554 snow_vertical_compose_mmx_r2r_add("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
556 "pcmpeqw %%mm1, %%mm1 \n\t"
557 "psllw $15, %%mm1 \n\t"
558 "psrlw $13, %%mm1 \n\t"
559 "mov %5, %%"REG_a" \n\t"
561 snow_vertical_compose_mmx_r2r_add("mm1","mm1","mm1","mm1","mm0","mm2","mm4","mm6")
562 snow_vertical_compose_mmx_sra("3","mm0","mm2","mm4","mm6")
563 snow_vertical_compose_mmx_load(REG_a,"mm1","mm3","mm5","mm7")
564 snow_vertical_compose_mmx_sub("mm0","mm2","mm4","mm6","mm1","mm3","mm5","mm7")
565 snow_vertical_compose_mmx_store(REG_a,"mm1","mm3","mm5","mm7")
566 "mov %3, %%"REG_c" \n\t"
567 snow_vertical_compose_mmx_load(REG_S,"mm0","mm2","mm4","mm6")
568 snow_vertical_compose_mmx_add(REG_c,"mm1","mm3","mm5","mm7")
569 snow_vertical_compose_mmx_sub("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
570 snow_vertical_compose_mmx_store(REG_S,"mm0","mm2","mm4","mm6")
571 "mov %2, %%"REG_a" \n\t"
572 snow_vertical_compose_mmx_add(REG_a,"mm0","mm2","mm4","mm6")
573 snow_vertical_compose_mmx_sra("2","mm0","mm2","mm4","mm6")
574 snow_vertical_compose_mmx_add(REG_c,"mm0","mm2","mm4","mm6")
576 "pcmpeqw %%mm1, %%mm1 \n\t"
577 "psllw $15, %%mm1 \n\t"
578 "psrlw $14, %%mm1 \n\t"
579 "mov %1, %%"REG_S" \n\t"
581 snow_vertical_compose_mmx_r2r_add("mm1","mm1","mm1","mm1","mm0","mm2","mm4","mm6")
582 snow_vertical_compose_mmx_sra("2","mm0","mm2","mm4","mm6")
583 snow_vertical_compose_mmx_add(REG_c,"mm0","mm2","mm4","mm6")
584 snow_vertical_compose_mmx_store(REG_c,"mm0","mm2","mm4","mm6")
585 snow_vertical_compose_mmx_add(REG_S,"mm0","mm2","mm4","mm6")
586 snow_vertical_compose_mmx_move("mm0","mm2","mm4","mm6","mm1","mm3","mm5","mm7")
587 snow_vertical_compose_mmx_sra("1","mm0","mm2","mm4","mm6")
588 snow_vertical_compose_mmx_r2r_add("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
589 snow_vertical_compose_mmx_add(REG_a,"mm0","mm2","mm4","mm6")
590 snow_vertical_compose_mmx_store(REG_a,"mm0","mm2","mm4","mm6")
593 "sub $16, %%"REG_d" \n\t"
597 "m"(b0),"m"(b1),"m"(b2),"m"(b3),"m"(b4),"m"(b5):
598 "%"REG_a"","%"REG_S"","%"REG_c"");
601 #define snow_inner_add_yblock_sse2_header \
602 IDWTELEM * * dst_array = sb->line + src_y;\
605 "mov %7, %%"REG_c" \n\t"\
607 "mov %4, %%"REG_S" \n\t"\
608 "pxor %%xmm7, %%xmm7 \n\t" /* 0 */\
609 "pcmpeqd %%xmm3, %%xmm3 \n\t"\
610 "psllw $15, %%xmm3 \n\t"\
611 "psrlw $12, %%xmm3 \n\t" /* FRAC_BITS >> 1 */\
613 "mov %1, %%"REG_D" \n\t"\
614 "mov (%%"REG_D"), %%"REG_D" \n\t"\
615 "add %3, %%"REG_D" \n\t"
617 #define snow_inner_add_yblock_sse2_start_8(out_reg1, out_reg2, ptr_offset, s_offset)\
618 "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
619 "movq (%%"REG_d"), %%"out_reg1" \n\t"\
620 "movq (%%"REG_d", %%"REG_c"), %%"out_reg2" \n\t"\
621 "punpcklbw %%xmm7, %%"out_reg1" \n\t"\
622 "punpcklbw %%xmm7, %%"out_reg2" \n\t"\
623 "movq "s_offset"(%%"REG_S"), %%xmm0 \n\t"\
624 "movq "s_offset"+16(%%"REG_S"), %%xmm4 \n\t"\
625 "punpcklbw %%xmm7, %%xmm0 \n\t"\
626 "punpcklbw %%xmm7, %%xmm4 \n\t"\
627 "pmullw %%xmm0, %%"out_reg1" \n\t"\
628 "pmullw %%xmm4, %%"out_reg2" \n\t"
630 #define snow_inner_add_yblock_sse2_start_16(out_reg1, out_reg2, ptr_offset, s_offset)\
631 "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
632 "movq (%%"REG_d"), %%"out_reg1" \n\t"\
633 "movq 8(%%"REG_d"), %%"out_reg2" \n\t"\
634 "punpcklbw %%xmm7, %%"out_reg1" \n\t"\
635 "punpcklbw %%xmm7, %%"out_reg2" \n\t"\
636 "movq "s_offset"(%%"REG_S"), %%xmm0 \n\t"\
637 "movq "s_offset"+8(%%"REG_S"), %%xmm4 \n\t"\
638 "punpcklbw %%xmm7, %%xmm0 \n\t"\
639 "punpcklbw %%xmm7, %%xmm4 \n\t"\
640 "pmullw %%xmm0, %%"out_reg1" \n\t"\
641 "pmullw %%xmm4, %%"out_reg2" \n\t"
643 #define snow_inner_add_yblock_sse2_accum_8(ptr_offset, s_offset) \
644 snow_inner_add_yblock_sse2_start_8("xmm2", "xmm6", ptr_offset, s_offset)\
645 "paddusw %%xmm2, %%xmm1 \n\t"\
646 "paddusw %%xmm6, %%xmm5 \n\t"
648 #define snow_inner_add_yblock_sse2_accum_16(ptr_offset, s_offset) \
649 snow_inner_add_yblock_sse2_start_16("xmm2", "xmm6", ptr_offset, s_offset)\
650 "paddusw %%xmm2, %%xmm1 \n\t"\
651 "paddusw %%xmm6, %%xmm5 \n\t"
653 #define snow_inner_add_yblock_sse2_end_common1\
654 "add $32, %%"REG_S" \n\t"\
655 "add %%"REG_c", %0 \n\t"\
656 "add %%"REG_c", "PTR_SIZE"*3(%%"REG_a");\n\t"\
657 "add %%"REG_c", "PTR_SIZE"*2(%%"REG_a");\n\t"\
658 "add %%"REG_c", "PTR_SIZE"*1(%%"REG_a");\n\t"\
659 "add %%"REG_c", (%%"REG_a") \n\t"
661 #define snow_inner_add_yblock_sse2_end_common2\
663 :"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\
665 "rm"((long)(src_x<<1)),"m"(obmc),"a"(block),"m"((long)b_h),"m"((long)src_stride):\
666 "%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d"");
668 #define snow_inner_add_yblock_sse2_end_8\
669 "sal $1, %%"REG_c" \n\t"\
670 "add $"PTR_SIZE"*2, %1 \n\t"\
671 snow_inner_add_yblock_sse2_end_common1\
672 "sar $1, %%"REG_c" \n\t"\
674 snow_inner_add_yblock_sse2_end_common2
676 #define snow_inner_add_yblock_sse2_end_16\
677 "add $"PTR_SIZE"*1, %1 \n\t"\
678 snow_inner_add_yblock_sse2_end_common1\
680 snow_inner_add_yblock_sse2_end_common2
682 static void inner_add_yblock_bw_8_obmc_16_bh_even_sse2(const uint8_t *obmc, const long obmc_stride, uint8_t * * block, int b_w, long b_h,
683 int src_x, int src_y, long src_stride, slice_buffer * sb, int add, uint8_t * dst8){
684 snow_inner_add_yblock_sse2_header
685 snow_inner_add_yblock_sse2_start_8("xmm1", "xmm5", "3", "0")
686 snow_inner_add_yblock_sse2_accum_8("2", "8")
687 snow_inner_add_yblock_sse2_accum_8("1", "128")
688 snow_inner_add_yblock_sse2_accum_8("0", "136")
690 "mov %0, %%"REG_d" \n\t"
691 "movdqa (%%"REG_D"), %%xmm0 \n\t"
692 "movdqa %%xmm1, %%xmm2 \n\t"
694 "punpckhwd %%xmm7, %%xmm1 \n\t"
695 "punpcklwd %%xmm7, %%xmm2 \n\t"
696 "paddd %%xmm2, %%xmm0 \n\t"
697 "movdqa 16(%%"REG_D"), %%xmm2 \n\t"
698 "paddd %%xmm1, %%xmm2 \n\t"
699 "paddd %%xmm3, %%xmm0 \n\t"
700 "paddd %%xmm3, %%xmm2 \n\t"
702 "mov %1, %%"REG_D" \n\t"
703 "mov "PTR_SIZE"(%%"REG_D"), %%"REG_D";\n\t"
704 "add %3, %%"REG_D" \n\t"
706 "movdqa (%%"REG_D"), %%xmm4 \n\t"
707 "movdqa %%xmm5, %%xmm6 \n\t"
708 "punpckhwd %%xmm7, %%xmm5 \n\t"
709 "punpcklwd %%xmm7, %%xmm6 \n\t"
710 "paddd %%xmm6, %%xmm4 \n\t"
711 "movdqa 16(%%"REG_D"), %%xmm6 \n\t"
712 "paddd %%xmm5, %%xmm6 \n\t"
713 "paddd %%xmm3, %%xmm4 \n\t"
714 "paddd %%xmm3, %%xmm6 \n\t"
716 "psrad $8, %%xmm0 \n\t" /* FRAC_BITS. */
717 "psrad $8, %%xmm2 \n\t" /* FRAC_BITS. */
718 "packssdw %%xmm2, %%xmm0 \n\t"
719 "packuswb %%xmm7, %%xmm0 \n\t"
720 "movq %%xmm0, (%%"REG_d") \n\t"
722 "psrad $8, %%xmm4 \n\t" /* FRAC_BITS. */
723 "psrad $8, %%xmm6 \n\t" /* FRAC_BITS. */
724 "packssdw %%xmm6, %%xmm4 \n\t"
725 "packuswb %%xmm7, %%xmm4 \n\t"
726 "movq %%xmm4, (%%"REG_d",%%"REG_c");\n\t"
727 snow_inner_add_yblock_sse2_end_8
730 static void inner_add_yblock_bw_16_obmc_32_sse2(const uint8_t *obmc, const long obmc_stride, uint8_t * * block, int b_w, long b_h,
731 int src_x, int src_y, long src_stride, slice_buffer * sb, int add, uint8_t * dst8){
732 snow_inner_add_yblock_sse2_header
733 snow_inner_add_yblock_sse2_start_16("xmm1", "xmm5", "3", "0")
734 snow_inner_add_yblock_sse2_accum_16("2", "16")
735 snow_inner_add_yblock_sse2_accum_16("1", "512")
736 snow_inner_add_yblock_sse2_accum_16("0", "528")
738 "mov %0, %%"REG_d" \n\t"
739 "psrlw $4, %%xmm1 \n\t"
740 "psrlw $4, %%xmm5 \n\t"
741 "paddw (%%"REG_D"), %%xmm1 \n\t"
742 "paddw 16(%%"REG_D"), %%xmm5 \n\t"
743 "paddw %%xmm3, %%xmm1 \n\t"
744 "paddw %%xmm3, %%xmm5 \n\t"
745 "psraw $4, %%xmm1 \n\t" /* FRAC_BITS. */
746 "psraw $4, %%xmm5 \n\t" /* FRAC_BITS. */
747 "packuswb %%xmm5, %%xmm1 \n\t"
749 "movdqu %%xmm1, (%%"REG_d") \n\t"
751 snow_inner_add_yblock_sse2_end_16
754 #define snow_inner_add_yblock_mmx_header \
755 IDWTELEM * * dst_array = sb->line + src_y;\
758 "mov %7, %%"REG_c" \n\t"\
760 "mov %4, %%"REG_S" \n\t"\
761 "pxor %%mm7, %%mm7 \n\t" /* 0 */\
762 "pcmpeqd %%mm3, %%mm3 \n\t"\
763 "psllw $15, %%mm3 \n\t"\
764 "psrlw $12, %%mm3 \n\t" /* FRAC_BITS >> 1 */\
766 "mov %1, %%"REG_D" \n\t"\
767 "mov (%%"REG_D"), %%"REG_D" \n\t"\
768 "add %3, %%"REG_D" \n\t"
770 #define snow_inner_add_yblock_mmx_start(out_reg1, out_reg2, ptr_offset, s_offset, d_offset)\
771 "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
772 "movd "d_offset"(%%"REG_d"), %%"out_reg1" \n\t"\
773 "movd "d_offset"+4(%%"REG_d"), %%"out_reg2" \n\t"\
774 "punpcklbw %%mm7, %%"out_reg1" \n\t"\
775 "punpcklbw %%mm7, %%"out_reg2" \n\t"\
776 "movd "s_offset"(%%"REG_S"), %%mm0 \n\t"\
777 "movd "s_offset"+4(%%"REG_S"), %%mm4 \n\t"\
778 "punpcklbw %%mm7, %%mm0 \n\t"\
779 "punpcklbw %%mm7, %%mm4 \n\t"\
780 "pmullw %%mm0, %%"out_reg1" \n\t"\
781 "pmullw %%mm4, %%"out_reg2" \n\t"
783 #define snow_inner_add_yblock_mmx_accum(ptr_offset, s_offset, d_offset) \
784 snow_inner_add_yblock_mmx_start("mm2", "mm6", ptr_offset, s_offset, d_offset)\
785 "paddusw %%mm2, %%mm1 \n\t"\
786 "paddusw %%mm6, %%mm5 \n\t"
788 #define snow_inner_add_yblock_mmx_mix(read_offset, write_offset)\
789 "mov %0, %%"REG_d" \n\t"\
790 "psrlw $4, %%mm1 \n\t"\
791 "psrlw $4, %%mm5 \n\t"\
792 "paddw "read_offset"(%%"REG_D"), %%mm1 \n\t"\
793 "paddw "read_offset"+8(%%"REG_D"), %%mm5 \n\t"\
794 "paddw %%mm3, %%mm1 \n\t"\
795 "paddw %%mm3, %%mm5 \n\t"\
796 "psraw $4, %%mm1 \n\t"\
797 "psraw $4, %%mm5 \n\t"\
798 "packuswb %%mm5, %%mm1 \n\t"\
799 "movq %%mm1, "write_offset"(%%"REG_d") \n\t"
801 #define snow_inner_add_yblock_mmx_end(s_step)\
802 "add $"s_step", %%"REG_S" \n\t"\
803 "add %%"REG_c", "PTR_SIZE"*3(%%"REG_a");\n\t"\
804 "add %%"REG_c", "PTR_SIZE"*2(%%"REG_a");\n\t"\
805 "add %%"REG_c", "PTR_SIZE"*1(%%"REG_a");\n\t"\
806 "add %%"REG_c", (%%"REG_a") \n\t"\
807 "add $"PTR_SIZE"*1, %1 \n\t"\
808 "add %%"REG_c", %0 \n\t"\
811 :"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\
813 "rm"((long)(src_x<<1)),"m"(obmc),"a"(block),"m"((long)b_h),"m"((long)src_stride):\
814 "%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d"");
816 static void inner_add_yblock_bw_8_obmc_16_mmx(const uint8_t *obmc, const long obmc_stride, uint8_t * * block, int b_w, long b_h,
817 int src_x, int src_y, long src_stride, slice_buffer * sb, int add, uint8_t * dst8){
818 snow_inner_add_yblock_mmx_header
819 snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "0", "0")
820 snow_inner_add_yblock_mmx_accum("2", "8", "0")
821 snow_inner_add_yblock_mmx_accum("1", "128", "0")
822 snow_inner_add_yblock_mmx_accum("0", "136", "0")
823 snow_inner_add_yblock_mmx_mix("0", "0")
824 snow_inner_add_yblock_mmx_end("16")
827 static void inner_add_yblock_bw_16_obmc_32_mmx(const uint8_t *obmc, const long obmc_stride, uint8_t * * block, int b_w, long b_h,
828 int src_x, int src_y, long src_stride, slice_buffer * sb, int add, uint8_t * dst8){
829 snow_inner_add_yblock_mmx_header
830 snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "0", "0")
831 snow_inner_add_yblock_mmx_accum("2", "16", "0")
832 snow_inner_add_yblock_mmx_accum("1", "512", "0")
833 snow_inner_add_yblock_mmx_accum("0", "528", "0")
834 snow_inner_add_yblock_mmx_mix("0", "0")
836 snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "8", "8")
837 snow_inner_add_yblock_mmx_accum("2", "24", "8")
838 snow_inner_add_yblock_mmx_accum("1", "520", "8")
839 snow_inner_add_yblock_mmx_accum("0", "536", "8")
840 snow_inner_add_yblock_mmx_mix("16", "8")
841 snow_inner_add_yblock_mmx_end("32")
844 void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
845 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8){
848 inner_add_yblock_bw_16_obmc_32_sse2(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
849 else if (b_w == 8 && obmc_stride == 16) {
851 inner_add_yblock_bw_8_obmc_16_bh_even_sse2(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
853 inner_add_yblock_bw_8_obmc_16_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
855 ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
858 void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
859 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8){
861 inner_add_yblock_bw_16_obmc_32_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
862 else if (b_w == 8 && obmc_stride == 16)
863 inner_add_yblock_bw_8_obmc_16_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
865 ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);