2 * Altivec optimized snow DSP utils
3 * Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "../dsputil.h"
26 #include "gcc_fixes.h"
27 #include "dsputil_altivec.h"
35 //FIXME remove this replication
36 #define slice_buffer_get_line(slice_buf, line_num) ((slice_buf)->line[line_num] ? (slice_buf)->line[line_num] : slice_buffer_load_line((slice_buf), (line_num)))
38 static DWTELEM * slice_buffer_load_line(slice_buffer * buf, int line)
43 // av_log(NULL, AV_LOG_DEBUG, "Cache hit: %d\n", line);
45 assert(buf->data_stack_top >= 0);
46 // assert(!buf->line[line]);
48 return buf->line[line];
50 offset = buf->line_width * line;
51 buffer = buf->data_stack[buf->data_stack_top];
52 buf->data_stack_top--;
53 buf->line[line] = buffer;
55 // av_log(NULL, AV_LOG_DEBUG, "slice_buffer_load_line: line: %d remaining: %d\n", line, buf->data_stack_top + 1);
63 void ff_snow_horizontal_compose97i_altivec(DWTELEM *b, int width)
65 const int w2= (width+1)>>1;
66 DECLARE_ALIGNED_16(DWTELEM, temp[(width>>1)]);
67 const int w_l= (width>>1);
68 const int w_r= w2 - 1;
70 vector signed int t1, t2, x, y, tmp1, tmp2;
71 vector signed int *vbuf, *vtmp;
72 vector unsigned char align;
77 DWTELEM * const ref = b + w2 - 1;
79 vbuf = (vector signed int *)b;
81 tmp1 = vec_ld (0, ref);
82 align = vec_lvsl (0, ref);
83 tmp2 = vec_ld (15, ref);
84 t1= vec_perm(tmp1, tmp2, align);
88 for (i=0; i<w_l-15; i+=16) {
90 b[i+0] = b[i+0] - ((3 * (ref[i+0] + ref[i+1]) + 4) >> 3);
91 b[i+1] = b[i+1] - ((3 * (ref[i+1] + ref[i+2]) + 4) >> 3);
92 b[i+2] = b[i+2] - ((3 * (ref[i+2] + ref[i+3]) + 4) >> 3);
93 b[i+3] = b[i+3] - ((3 * (ref[i+3] + ref[i+4]) + 4) >> 3);
96 tmp1 = vec_ld (0, ref+4+i);
97 tmp2 = vec_ld (15, ref+4+i);
99 t2 = vec_perm(tmp1, tmp2, align);
101 y = vec_add(t1,vec_sld(t1,t2,4));
102 y = vec_add(vec_add(y,y),y);
104 tmp1 = vec_ld (0, ref+8+i);
106 y = vec_add(y, vec_splat_s32(4));
107 y = vec_sra(y, vec_splat_u32(3));
109 tmp2 = vec_ld (15, ref+8+i);
111 *vbuf = vec_sub(*vbuf, y);
117 t2 = vec_perm(tmp1, tmp2, align);
119 y = vec_add(t1,vec_sld(t1,t2,4));
120 y = vec_add(vec_add(y,y),y);
122 tmp1 = vec_ld (0, ref+12+i);
124 y = vec_add(y, vec_splat_s32(4));
125 y = vec_sra(y, vec_splat_u32(3));
127 tmp2 = vec_ld (15, ref+12+i);
129 *vbuf = vec_sub(*vbuf, y);
135 t2 = vec_perm(tmp1, tmp2, align);
137 y = vec_add(t1,vec_sld(t1,t2,4));
138 y = vec_add(vec_add(y,y),y);
140 tmp1 = vec_ld (0, ref+16+i);
142 y = vec_add(y, vec_splat_s32(4));
143 y = vec_sra(y, vec_splat_u32(3));
145 tmp2 = vec_ld (15, ref+16+i);
147 *vbuf = vec_sub(*vbuf, y);
151 t2 = vec_perm(tmp1, tmp2, align);
153 y = vec_add(t1,vec_sld(t1,t2,4));
154 y = vec_add(vec_add(y,y),y);
158 y = vec_add(y, vec_splat_s32(4));
159 y = vec_sra(y, vec_splat_u32(3));
160 *vbuf = vec_sub(*vbuf, y);
169 snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
170 b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
174 DWTELEM * const dst = b+w2;
177 for(; (((long)&dst[i]) & 0xF) && i<w_r; i++){
178 dst[i] = dst[i] - (b[i] + b[i + 1]);
181 align = vec_lvsl(0, b+i);
182 tmp1 = vec_ld(0, b+i);
183 vbuf = (vector signed int*) (dst + i);
184 tmp2 = vec_ld(15, b+i);
186 t1 = vec_perm(tmp1, tmp2, align);
188 for (; i<w_r-3; i+=4) {
191 dst[i] = dst[i] - (b[i] + b[i + 1]);
192 dst[i+1] = dst[i+1] - (b[i+1] + b[i + 2]);
193 dst[i+2] = dst[i+2] - (b[i+2] + b[i + 3]);
194 dst[i+3] = dst[i+3] - (b[i+3] + b[i + 4]);
197 tmp1 = vec_ld(0, b+4+i);
198 tmp2 = vec_ld(15, b+4+i);
200 t2 = vec_perm(tmp1, tmp2, align);
202 y = vec_add(t1, vec_sld(t1,t2,4));
203 *vbuf = vec_sub (*vbuf, y);
213 snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
217 DWTELEM * const ref = b+w2 - 1;
219 vbuf= (vector signed int *) b;
221 tmp1 = vec_ld (0, ref);
222 align = vec_lvsl (0, ref);
223 tmp2 = vec_ld (15, ref);
224 t1= vec_perm(tmp1, tmp2, align);
227 for (; i<w_l-15; i+=16) {
229 b[i] = b[i] - (((8 -(ref[i] + ref[i+1])) - (b[i] <<2)) >> 4);
230 b[i+1] = b[i+1] - (((8 -(ref[i+1] + ref[i+2])) - (b[i+1]<<2)) >> 4);
231 b[i+2] = b[i+2] - (((8 -(ref[i+2] + ref[i+3])) - (b[i+2]<<2)) >> 4);
232 b[i+3] = b[i+3] - (((8 -(ref[i+3] + ref[i+4])) - (b[i+3]<<2)) >> 4);
234 tmp1 = vec_ld (0, ref+4+i);
235 tmp2 = vec_ld (15, ref+4+i);
237 t2 = vec_perm(tmp1, tmp2, align);
239 y = vec_add(t1,vec_sld(t1,t2,4));
240 y = vec_sub(vec_splat_s32(8),y);
242 tmp1 = vec_ld (0, ref+8+i);
244 x = vec_sl(*vbuf,vec_splat_u32(2));
245 y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
247 tmp2 = vec_ld (15, ref+8+i);
249 *vbuf = vec_sub( *vbuf, y);
255 t2 = vec_perm(tmp1, tmp2, align);
257 y = vec_add(t1,vec_sld(t1,t2,4));
258 y = vec_sub(vec_splat_s32(8),y);
260 tmp1 = vec_ld (0, ref+12+i);
262 x = vec_sl(*vbuf,vec_splat_u32(2));
263 y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
265 tmp2 = vec_ld (15, ref+12+i);
267 *vbuf = vec_sub( *vbuf, y);
273 t2 = vec_perm(tmp1, tmp2, align);
275 y = vec_add(t1,vec_sld(t1,t2,4));
276 y = vec_sub(vec_splat_s32(8),y);
278 tmp1 = vec_ld (0, ref+16+i);
280 x = vec_sl(*vbuf,vec_splat_u32(2));
281 y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
283 tmp2 = vec_ld (15, ref+16+i);
285 *vbuf = vec_sub( *vbuf, y);
291 t2 = vec_perm(tmp1, tmp2, align);
293 y = vec_add(t1,vec_sld(t1,t2,4));
294 y = vec_sub(vec_splat_s32(8),y);
298 x = vec_sl(*vbuf,vec_splat_u32(2));
299 y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
300 *vbuf = vec_sub( *vbuf, y);
307 snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
308 b[0] = b_0 - (((-2 * ref[1] + W_BO) - 4 * b_0) >> W_BS);
312 DWTELEM * const src = b+w2;
314 vbuf = (vector signed int *)b;
315 vtmp = (vector signed int *)temp;
318 align = vec_lvsl(0, src);
320 for (; i<w_r-3; i+=4) {
322 temp[i] = src[i] - ((-3*(b[i] + b[i+1]))>>1);
323 temp[i+1] = src[i+1] - ((-3*(b[i+1] + b[i+2]))>>1);
324 temp[i+2] = src[i+2] - ((-3*(b[i+2] + b[i+3]))>>1);
325 temp[i+3] = src[i+3] - ((-3*(b[i+3] + b[i+4]))>>1);
327 tmp1 = vec_ld(0,src+i);
328 t1 = vec_add(vbuf[0],vec_sld(vbuf[0],vbuf[1],4));
329 tmp2 = vec_ld(15,src+i);
330 t1 = vec_sub(vec_splat_s32(0),t1); //bad!
331 t1 = vec_add(t1,vec_add(t1,t1));
332 t2 = vec_perm(tmp1 ,tmp2 ,align);
333 t1 = vec_sra(t1,vec_splat_u32(1));
335 *vtmp = vec_sub(t2,t1);
342 snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -3, 0, 1);
348 vector signed int *t = (vector signed int *)temp,
349 *v = (vector signed int *)b;
351 snow_interleave_line_header(&i, width, b, temp);
353 for (; (i & 0xE) != 0xE; i-=2){
357 for (i-=14; i>=0; i-=16){
360 v[a+3]=vec_mergel(v[(a>>1)+1],t[(a>>1)+1]);
361 v[a+2]=vec_mergeh(v[(a>>1)+1],t[(a>>1)+1]);
362 v[a+1]=vec_mergel(v[a>>1],t[a>>1]);
363 v[a]=vec_mergeh(v[a>>1],t[a>>1]);
370 void ff_snow_vertical_compose97i_altivec(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width)
373 vector signed int *v0, *v1,*v2,*v3,*v4,*v5;
374 vector signed int t1, t2;
376 v0=(vector signed int *)b0;
377 v1=(vector signed int *)b1;
378 v2=(vector signed int *)b2;
379 v3=(vector signed int *)b3;
380 v4=(vector signed int *)b4;
381 v5=(vector signed int *)b5;
387 b4[i] -= (3*(b3[i] + b5[i])+4)>>3;
388 b3[i] -= ((b2[i] + b4[i]));
389 b2[i] += ((b1[i] + b3[i])+4*b2[i]+8)>>4;
390 b1[i] += (3*(b0[i] + b2[i]))>>1;
392 t1 = vec_add(v3[i], v5[i]);
393 t2 = vec_add(t1, vec_add(t1,t1));
394 t1 = vec_add(t2, vec_splat_s32(4));
395 v4[i] = vec_sub(v4[i], vec_sra(t1,vec_splat_u32(3)));
397 v3[i] = vec_sub(v3[i], vec_add(v2[i], v4[i]));
399 t1 = vec_add(vec_splat_s32(8), vec_add(v1[i], v3[i]));
400 t2 = vec_sl(v2[i], vec_splat_u32(2));
401 v2[i] = vec_add(v2[i], vec_sra(vec_add(t1,t2),vec_splat_u32(4)));
402 t1 = vec_add(v0[i], v2[i]);
403 t2 = vec_add(t1, vec_add(t1,t1));
404 v1[i] = vec_add(v1[i], vec_sra(t2,vec_splat_u32(1)));
409 for(i*=4; i < width; i++)
411 b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
412 b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
413 b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
414 b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
418 #define LOAD_BLOCKS \
419 tmp1 = vec_ld(0, &block[3][y*src_stride]);\
420 align = vec_lvsl(0, &block[3][y*src_stride]);\
421 tmp2 = vec_ld(15, &block[3][y*src_stride]);\
423 b3 = vec_perm(tmp1,tmp2,align);\
425 tmp1 = vec_ld(0, &block[2][y*src_stride]);\
426 align = vec_lvsl(0, &block[2][y*src_stride]);\
427 tmp2 = vec_ld(15, &block[2][y*src_stride]);\
429 b2 = vec_perm(tmp1,tmp2,align);\
431 tmp1 = vec_ld(0, &block[1][y*src_stride]);\
432 align = vec_lvsl(0, &block[1][y*src_stride]);\
433 tmp2 = vec_ld(15, &block[1][y*src_stride]);\
435 b1 = vec_perm(tmp1,tmp2,align);\
437 tmp1 = vec_ld(0, &block[0][y*src_stride]);\
438 align = vec_lvsl(0, &block[0][y*src_stride]);\
439 tmp2 = vec_ld(15, &block[0][y*src_stride]);\
441 b0 = vec_perm(tmp1,tmp2,align);
444 tmp1 = vec_ld(0, obmc1);\
445 align = vec_lvsl(0, obmc1);\
446 tmp2 = vec_ld(15, obmc1);\
448 ob1 = vec_perm(tmp1,tmp2,align);\
450 tmp1 = vec_ld(0, obmc2);\
451 align = vec_lvsl(0, obmc2);\
452 tmp2 = vec_ld(15, obmc2);\
454 ob2 = vec_perm(tmp1,tmp2,align);\
456 tmp1 = vec_ld(0, obmc3);\
457 align = vec_lvsl(0, obmc3);\
458 tmp2 = vec_ld(15, obmc3);\
460 ob3 = vec_perm(tmp1,tmp2,align);\
462 tmp1 = vec_ld(0, obmc4);\
463 align = vec_lvsl(0, obmc4);\
464 tmp2 = vec_ld(15, obmc4);\
466 ob4 = vec_perm(tmp1,tmp2,align);
469 * h1 <- [ a,b,a,b, a,b,a,b, a,b,a,b, a,b,a,b ]
470 * h2 <- [ c,d,c,d, c,d,c,d, c,d,c,d, c,d,c,d ]
471 * h <- [ a,b,c,d, a,b,c,d, a,b,c,d, a,b,c,d ]
475 h1 = (vector unsigned short)\
476 vec_mergeh(ob1, ob2);\
478 h2 = (vector unsigned short)\
479 vec_mergeh(ob3, ob4);\
481 ih = (vector unsigned char)\
484 l1 = (vector unsigned short) vec_mergeh(b3, b2);\
486 ih1 = (vector unsigned char) vec_mergel(h1, h2);\
488 l2 = (vector unsigned short) vec_mergeh(b1, b0);\
490 il = (vector unsigned char) vec_mergeh(l1, l2);\
492 v[0] = (vector signed int) vec_msum(ih, il, vec_splat_u32(0));\
494 il1 = (vector unsigned char) vec_mergel(l1, l2);\
496 v[1] = (vector signed int) vec_msum(ih1, il1, vec_splat_u32(0));
498 #define FINAL_STEP_SCALAR\
499 for(x=0; x<b_w; x++)\
501 vbuf[x] += dst[x + src_x];\
502 vbuf[x] = (vbuf[x] + (1<<(FRAC_BITS-1))) >> FRAC_BITS;\
503 if(vbuf[x]&(~255)) vbuf[x]= ~(vbuf[x]>>31);\
504 dst8[x + y*src_stride] = vbuf[x];\
506 dst[x + src_x] -= vbuf[x];\
509 static void inner_add_yblock_bw_8_obmc_16_altivec(uint8_t *obmc,
510 const int obmc_stride,
511 uint8_t * * block, int b_w,
512 int b_h, int src_x, int src_y,
513 int src_stride, slice_buffer * sb,
514 int add, uint8_t * dst8)
518 vector unsigned short h1, h2, l1, l2;
519 vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
520 vector unsigned char b0,b1,b2,b3;
521 vector unsigned char ob1,ob2,ob3,ob4;
523 DECLARE_ALIGNED_16(int, vbuf[16]);
524 vector signed int *v = (vector signed int *)vbuf, *d;
526 for(y=0; y<b_h; y++){
527 //FIXME ugly missue of obmc_stride
529 uint8_t *obmc1= obmc + y*obmc_stride;
530 uint8_t *obmc2= obmc1+ (obmc_stride>>1);
531 uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
532 uint8_t *obmc4= obmc3+ (obmc_stride>>1);
534 dst = slice_buffer_get_line(sb, src_y + y);
535 d = (vector signed int *)(dst + src_x);
537 //FIXME i could avoid some loads!
555 h1 = (vector unsigned short) vec_mergel(ob1, ob2);\
557 h2 = (vector unsigned short) vec_mergel(ob3, ob4);\
559 ih = (vector unsigned char) vec_mergeh(h1,h2);\
561 l1 = (vector unsigned short) vec_mergel(b3, b2);\
563 l2 = (vector unsigned short) vec_mergel(b1, b0);\
565 ih1 = (vector unsigned char) vec_mergel(h1,h2);\
567 il = (vector unsigned char) vec_mergeh(l1,l2);\
569 v[2] = (vector signed int) vec_msum(ih, il, vec_splat_u32(0));\
571 il1 = (vector unsigned char) vec_mergel(l1,l2);\
573 v[3] = (vector signed int) vec_msum(ih1, il1, vec_splat_u32(0));
576 static void inner_add_yblock_bw_16_obmc_32_altivec(uint8_t *obmc,
577 const int obmc_stride,
578 uint8_t * * block, int b_w,
579 int b_h, int src_x, int src_y,
580 int src_stride, slice_buffer * sb,
581 int add, uint8_t * dst8)
585 vector unsigned short h1, h2, l1, l2;
586 vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
587 vector unsigned char b0,b1,b2,b3;
588 vector unsigned char ob1,ob2,ob3,ob4;
589 DECLARE_ALIGNED_16(int, vbuf[b_w]);
590 vector signed int *v = (vector signed int *)vbuf, *d;
592 for(y=0; y<b_h; y++){
593 //FIXME ugly missue of obmc_stride
595 uint8_t *obmc1= obmc + y*obmc_stride;
596 uint8_t *obmc2= obmc1+ (obmc_stride>>1);
597 uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
598 uint8_t *obmc4= obmc3+ (obmc_stride>>1);
600 dst = slice_buffer_get_line(sb, src_y + y);
601 d = (vector signed int *)(dst + src_x);
619 #define FINAL_STEP_VEC \
623 for(x=0; x<b_w/4; x++)\
625 v[x] = vec_add(v[x], d[x]);\
626 v[x] = vec_sra(vec_add(v[x],\
627 vec_sl( vec_splat_s32(1),\
631 mask = (vector bool int) vec_sl((vector signed int)\
632 vec_cmpeq(v[x],v[x]),vec_splat_u32(8));\
633 mask = (vector bool int) vec_and(v[x],vec_nor(mask,mask));\
635 mask = (vector bool int)\
636 vec_cmpeq((vector signed int)mask,\
637 (vector signed int)vec_splat_u32(0));\
639 vs = vec_sra(v[x],vec_splat_u32(8));\
640 vs = vec_sra(v[x],vec_splat_u32(8));\
641 vs = vec_sra(v[x],vec_splat_u32(15));\
643 vs = vec_nor(vs,vs);\
645 v[x]= vec_sel(v[x],vs,mask);\
648 for(x=0; x<b_w; x++)\
649 dst8[x + y*src_stride] = vbuf[x];\
653 for(x=0; x<b_w/4; x++)\
654 d[x] = vec_sub(d[x], v[x]);
656 static void inner_add_yblock_a_bw_8_obmc_16_altivec(uint8_t *obmc,
657 const int obmc_stride,
658 uint8_t * * block, int b_w,
659 int b_h, int src_x, int src_y,
660 int src_stride, slice_buffer * sb,
661 int add, uint8_t * dst8)
665 vector bool int mask;
666 vector signed int vs;
667 vector unsigned short h1, h2, l1, l2;
668 vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
669 vector unsigned char b0,b1,b2,b3;
670 vector unsigned char ob1,ob2,ob3,ob4;
672 DECLARE_ALIGNED_16(int, vbuf[16]);
673 vector signed int *v = (vector signed int *)vbuf, *d;
675 for(y=0; y<b_h; y++){
676 //FIXME ugly missue of obmc_stride
678 uint8_t *obmc1= obmc + y*obmc_stride;
679 uint8_t *obmc2= obmc1+ (obmc_stride>>1);
680 uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
681 uint8_t *obmc4= obmc3+ (obmc_stride>>1);
683 dst = slice_buffer_get_line(sb, src_y + y);
684 d = (vector signed int *)(dst + src_x);
686 //FIXME i could avoid some loads!
703 static void inner_add_yblock_a_bw_16_obmc_32_altivec(uint8_t *obmc,
704 const int obmc_stride,
705 uint8_t * * block, int b_w,
706 int b_h, int src_x, int src_y,
707 int src_stride, slice_buffer * sb,
708 int add, uint8_t * dst8)
712 vector bool int mask;
713 vector signed int vs;
714 vector unsigned short h1, h2, l1, l2;
715 vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
716 vector unsigned char b0,b1,b2,b3;
717 vector unsigned char ob1,ob2,ob3,ob4;
718 DECLARE_ALIGNED_16(int, vbuf[b_w]);
719 vector signed int *v = (vector signed int *)vbuf, *d;
721 for(y=0; y<b_h; y++){
722 //FIXME ugly missue of obmc_stride
724 uint8_t *obmc1= obmc + y*obmc_stride;
725 uint8_t *obmc2= obmc1+ (obmc_stride>>1);
726 uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
727 uint8_t *obmc4= obmc3+ (obmc_stride>>1);
729 dst = slice_buffer_get_line(sb, src_y + y);
730 d = (vector signed int *)(dst + src_x);
749 void ff_snow_inner_add_yblock_altivec(uint8_t *obmc, const int obmc_stride,
750 uint8_t * * block, int b_w, int b_h,
751 int src_x, int src_y, int src_stride,
752 slice_buffer * sb, int add,
757 inner_add_yblock_bw_16_obmc_32_altivec(obmc, obmc_stride, block,
758 b_w, b_h, src_x, src_y,
759 src_stride, sb, add, dst8);
761 inner_add_yblock_bw_8_obmc_16_altivec(obmc, obmc_stride, block,
762 b_w, b_h, src_x, src_y,
763 src_stride, sb, add, dst8);
765 ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,
766 src_y, src_stride, sb, add, dst8);
769 inner_add_yblock_a_bw_16_obmc_32_altivec(obmc, obmc_stride, block,
770 b_w, b_h, src_x, src_y,
771 src_stride, sb, add, dst8);
773 inner_add_yblock_a_bw_8_obmc_16_altivec(obmc, obmc_stride, block,
774 b_w, b_h, src_x, src_y,
775 src_stride, sb, add, dst8);
777 ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,
778 src_y, src_stride, sb, add, dst8);
783 void snow_init_altivec(DSPContext* c, AVCodecContext *avctx)
785 c->horizontal_compose97i = ff_snow_horizontal_compose97i_altivec;
786 c->vertical_compose97i = ff_snow_vertical_compose97i_altivec;
787 c->inner_add_yblock = ff_snow_inner_add_yblock_altivec;