2 * Altivec optimized snow DSP utils
3 * Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "../dsputil.h"
24 #include "gcc_fixes.h"
25 #include "dsputil_altivec.h"
33 //FIXME remove this replication
34 #define slice_buffer_get_line(slice_buf, line_num) ((slice_buf)->line[line_num] ? (slice_buf)->line[line_num] : slice_buffer_load_line((slice_buf), (line_num)))
36 static DWTELEM * slice_buffer_load_line(slice_buffer * buf, int line)
41 // av_log(NULL, AV_LOG_DEBUG, "Cache hit: %d\n", line);
43 assert(buf->data_stack_top >= 0);
44 // assert(!buf->line[line]);
46 return buf->line[line];
48 offset = buf->line_width * line;
49 buffer = buf->data_stack[buf->data_stack_top];
50 buf->data_stack_top--;
51 buf->line[line] = buffer;
53 // av_log(NULL, AV_LOG_DEBUG, "slice_buffer_load_line: line: %d remaining: %d\n", line, buf->data_stack_top + 1);
61 void ff_snow_horizontal_compose97i_altivec(DWTELEM *b, int width)
63 const int w2= (width+1)>>1;
64 DECLARE_ALIGNED_16(DWTELEM, temp[(width>>1)]);
65 const int w_l= (width>>1);
66 const int w_r= w2 - 1;
68 vector signed int t1, t2, x, y, tmp1, tmp2;
69 vector signed int *vbuf, *vtmp;
70 vector unsigned char align;
75 DWTELEM * const ref = b + w2 - 1;
77 vbuf = (vector signed int *)b;
79 tmp1 = vec_ld (0, ref);
80 align = vec_lvsl (0, ref);
81 tmp2 = vec_ld (15, ref);
82 t1= vec_perm(tmp1, tmp2, align);
86 for (i=0; i<w_l-15; i+=16) {
88 b[i+0] = b[i+0] - ((3 * (ref[i+0] + ref[i+1]) + 4) >> 3);
89 b[i+1] = b[i+1] - ((3 * (ref[i+1] + ref[i+2]) + 4) >> 3);
90 b[i+2] = b[i+2] - ((3 * (ref[i+2] + ref[i+3]) + 4) >> 3);
91 b[i+3] = b[i+3] - ((3 * (ref[i+3] + ref[i+4]) + 4) >> 3);
94 tmp1 = vec_ld (0, ref+4+i);
95 tmp2 = vec_ld (15, ref+4+i);
97 t2 = vec_perm(tmp1, tmp2, align);
99 y = vec_add(t1,vec_sld(t1,t2,4));
100 y = vec_add(vec_add(y,y),y);
102 tmp1 = vec_ld (0, ref+8+i);
104 y = vec_add(y, vec_splat_s32(4));
105 y = vec_sra(y, vec_splat_u32(3));
107 tmp2 = vec_ld (15, ref+8+i);
109 *vbuf = vec_sub(*vbuf, y);
115 t2 = vec_perm(tmp1, tmp2, align);
117 y = vec_add(t1,vec_sld(t1,t2,4));
118 y = vec_add(vec_add(y,y),y);
120 tmp1 = vec_ld (0, ref+12+i);
122 y = vec_add(y, vec_splat_s32(4));
123 y = vec_sra(y, vec_splat_u32(3));
125 tmp2 = vec_ld (15, ref+12+i);
127 *vbuf = vec_sub(*vbuf, y);
133 t2 = vec_perm(tmp1, tmp2, align);
135 y = vec_add(t1,vec_sld(t1,t2,4));
136 y = vec_add(vec_add(y,y),y);
138 tmp1 = vec_ld (0, ref+16+i);
140 y = vec_add(y, vec_splat_s32(4));
141 y = vec_sra(y, vec_splat_u32(3));
143 tmp2 = vec_ld (15, ref+16+i);
145 *vbuf = vec_sub(*vbuf, y);
149 t2 = vec_perm(tmp1, tmp2, align);
151 y = vec_add(t1,vec_sld(t1,t2,4));
152 y = vec_add(vec_add(y,y),y);
156 y = vec_add(y, vec_splat_s32(4));
157 y = vec_sra(y, vec_splat_u32(3));
158 *vbuf = vec_sub(*vbuf, y);
167 snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
168 b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
172 DWTELEM * const dst = b+w2;
175 for(; (((long)&dst[i]) & 0xF) && i<w_r; i++){
176 dst[i] = dst[i] - (b[i] + b[i + 1]);
179 align = vec_lvsl(0, b+i);
180 tmp1 = vec_ld(0, b+i);
181 vbuf = (vector signed int*) (dst + i);
182 tmp2 = vec_ld(15, b+i);
184 t1 = vec_perm(tmp1, tmp2, align);
186 for (; i<w_r-3; i+=4) {
189 dst[i] = dst[i] - (b[i] + b[i + 1]);
190 dst[i+1] = dst[i+1] - (b[i+1] + b[i + 2]);
191 dst[i+2] = dst[i+2] - (b[i+2] + b[i + 3]);
192 dst[i+3] = dst[i+3] - (b[i+3] + b[i + 4]);
195 tmp1 = vec_ld(0, b+4+i);
196 tmp2 = vec_ld(15, b+4+i);
198 t2 = vec_perm(tmp1, tmp2, align);
200 y = vec_add(t1, vec_sld(t1,t2,4));
201 *vbuf = vec_sub (*vbuf, y);
211 snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
215 DWTELEM * const ref = b+w2 - 1;
217 vbuf= (vector signed int *) b;
219 tmp1 = vec_ld (0, ref);
220 align = vec_lvsl (0, ref);
221 tmp2 = vec_ld (15, ref);
222 t1= vec_perm(tmp1, tmp2, align);
225 for (; i<w_l-15; i+=16) {
227 b[i] = b[i] - (((8 -(ref[i] + ref[i+1])) - (b[i] <<2)) >> 4);
228 b[i+1] = b[i+1] - (((8 -(ref[i+1] + ref[i+2])) - (b[i+1]<<2)) >> 4);
229 b[i+2] = b[i+2] - (((8 -(ref[i+2] + ref[i+3])) - (b[i+2]<<2)) >> 4);
230 b[i+3] = b[i+3] - (((8 -(ref[i+3] + ref[i+4])) - (b[i+3]<<2)) >> 4);
232 tmp1 = vec_ld (0, ref+4+i);
233 tmp2 = vec_ld (15, ref+4+i);
235 t2 = vec_perm(tmp1, tmp2, align);
237 y = vec_add(t1,vec_sld(t1,t2,4));
238 y = vec_sub(vec_splat_s32(8),y);
240 tmp1 = vec_ld (0, ref+8+i);
242 x = vec_sl(*vbuf,vec_splat_u32(2));
243 y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
245 tmp2 = vec_ld (15, ref+8+i);
247 *vbuf = vec_sub( *vbuf, y);
253 t2 = vec_perm(tmp1, tmp2, align);
255 y = vec_add(t1,vec_sld(t1,t2,4));
256 y = vec_sub(vec_splat_s32(8),y);
258 tmp1 = vec_ld (0, ref+12+i);
260 x = vec_sl(*vbuf,vec_splat_u32(2));
261 y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
263 tmp2 = vec_ld (15, ref+12+i);
265 *vbuf = vec_sub( *vbuf, y);
271 t2 = vec_perm(tmp1, tmp2, align);
273 y = vec_add(t1,vec_sld(t1,t2,4));
274 y = vec_sub(vec_splat_s32(8),y);
276 tmp1 = vec_ld (0, ref+16+i);
278 x = vec_sl(*vbuf,vec_splat_u32(2));
279 y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
281 tmp2 = vec_ld (15, ref+16+i);
283 *vbuf = vec_sub( *vbuf, y);
289 t2 = vec_perm(tmp1, tmp2, align);
291 y = vec_add(t1,vec_sld(t1,t2,4));
292 y = vec_sub(vec_splat_s32(8),y);
296 x = vec_sl(*vbuf,vec_splat_u32(2));
297 y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
298 *vbuf = vec_sub( *vbuf, y);
305 snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
306 b[0] = b_0 - (((-2 * ref[1] + W_BO) - 4 * b_0) >> W_BS);
310 DWTELEM * const src = b+w2;
312 vbuf = (vector signed int *)b;
313 vtmp = (vector signed int *)temp;
316 align = vec_lvsl(0, src);
318 for (; i<w_r-3; i+=4) {
320 temp[i] = src[i] - ((-3*(b[i] + b[i+1]))>>1);
321 temp[i+1] = src[i+1] - ((-3*(b[i+1] + b[i+2]))>>1);
322 temp[i+2] = src[i+2] - ((-3*(b[i+2] + b[i+3]))>>1);
323 temp[i+3] = src[i+3] - ((-3*(b[i+3] + b[i+4]))>>1);
325 tmp1 = vec_ld(0,src+i);
326 t1 = vec_add(vbuf[0],vec_sld(vbuf[0],vbuf[1],4));
327 tmp2 = vec_ld(15,src+i);
328 t1 = vec_sub(vec_splat_s32(0),t1); //bad!
329 t1 = vec_add(t1,vec_add(t1,t1));
330 t2 = vec_perm(tmp1 ,tmp2 ,align);
331 t1 = vec_sra(t1,vec_splat_u32(1));
333 *vtmp = vec_sub(t2,t1);
340 snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -3, 0, 1);
346 vector signed int *t = (vector signed int *)temp,
347 *v = (vector signed int *)b;
349 snow_interleave_line_header(&i, width, b, temp);
351 for (; (i & 0xE) != 0xE; i-=2){
355 for (i-=14; i>=0; i-=16){
358 v[a+3]=vec_mergel(v[(a>>1)+1],t[(a>>1)+1]);
359 v[a+2]=vec_mergeh(v[(a>>1)+1],t[(a>>1)+1]);
360 v[a+1]=vec_mergel(v[a>>1],t[a>>1]);
361 v[a]=vec_mergeh(v[a>>1],t[a>>1]);
368 void ff_snow_vertical_compose97i_altivec(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width)
371 vector signed int *v0, *v1,*v2,*v3,*v4,*v5;
372 vector signed int t1, t2;
374 v0=(vector signed int *)b0;
375 v1=(vector signed int *)b1;
376 v2=(vector signed int *)b2;
377 v3=(vector signed int *)b3;
378 v4=(vector signed int *)b4;
379 v5=(vector signed int *)b5;
385 b4[i] -= (3*(b3[i] + b5[i])+4)>>3;
386 b3[i] -= ((b2[i] + b4[i]));
387 b2[i] += ((b1[i] + b3[i])+4*b2[i]+8)>>4;
388 b1[i] += (3*(b0[i] + b2[i]))>>1;
390 t1 = vec_add(v3[i], v5[i]);
391 t2 = vec_add(t1, vec_add(t1,t1));
392 t1 = vec_add(t2, vec_splat_s32(4));
393 v4[i] = vec_sub(v4[i], vec_sra(t1,vec_splat_u32(3)));
395 v3[i] = vec_sub(v3[i], vec_add(v2[i], v4[i]));
397 t1 = vec_add(vec_splat_s32(8), vec_add(v1[i], v3[i]));
398 t2 = vec_sl(v2[i], vec_splat_u32(2));
399 v2[i] = vec_add(v2[i], vec_sra(vec_add(t1,t2),vec_splat_u32(4)));
400 t1 = vec_add(v0[i], v2[i]);
401 t2 = vec_add(t1, vec_add(t1,t1));
402 v1[i] = vec_add(v1[i], vec_sra(t2,vec_splat_u32(1)));
407 for(i*=4; i < width; i++)
409 b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
410 b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
411 b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
412 b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
416 #define LOAD_BLOCKS \
417 tmp1 = vec_ld(0, &block[3][y*src_stride]);\
418 align = vec_lvsl(0, &block[3][y*src_stride]);\
419 tmp2 = vec_ld(15, &block[3][y*src_stride]);\
421 b3 = vec_perm(tmp1,tmp2,align);\
423 tmp1 = vec_ld(0, &block[2][y*src_stride]);\
424 align = vec_lvsl(0, &block[2][y*src_stride]);\
425 tmp2 = vec_ld(15, &block[2][y*src_stride]);\
427 b2 = vec_perm(tmp1,tmp2,align);\
429 tmp1 = vec_ld(0, &block[1][y*src_stride]);\
430 align = vec_lvsl(0, &block[1][y*src_stride]);\
431 tmp2 = vec_ld(15, &block[1][y*src_stride]);\
433 b1 = vec_perm(tmp1,tmp2,align);\
435 tmp1 = vec_ld(0, &block[0][y*src_stride]);\
436 align = vec_lvsl(0, &block[0][y*src_stride]);\
437 tmp2 = vec_ld(15, &block[0][y*src_stride]);\
439 b0 = vec_perm(tmp1,tmp2,align);
442 tmp1 = vec_ld(0, obmc1);\
443 align = vec_lvsl(0, obmc1);\
444 tmp2 = vec_ld(15, obmc1);\
446 ob1 = vec_perm(tmp1,tmp2,align);\
448 tmp1 = vec_ld(0, obmc2);\
449 align = vec_lvsl(0, obmc2);\
450 tmp2 = vec_ld(15, obmc2);\
452 ob2 = vec_perm(tmp1,tmp2,align);\
454 tmp1 = vec_ld(0, obmc3);\
455 align = vec_lvsl(0, obmc3);\
456 tmp2 = vec_ld(15, obmc3);\
458 ob3 = vec_perm(tmp1,tmp2,align);\
460 tmp1 = vec_ld(0, obmc4);\
461 align = vec_lvsl(0, obmc4);\
462 tmp2 = vec_ld(15, obmc4);\
464 ob4 = vec_perm(tmp1,tmp2,align);
467 * h1 <- [ a,b,a,b, a,b,a,b, a,b,a,b, a,b,a,b ]
468 * h2 <- [ c,d,c,d, c,d,c,d, c,d,c,d, c,d,c,d ]
469 * h <- [ a,b,c,d, a,b,c,d, a,b,c,d, a,b,c,d ]
473 h1 = (vector unsigned short)\
474 vec_mergeh(ob1, ob2);\
476 h2 = (vector unsigned short)\
477 vec_mergeh(ob3, ob4);\
479 ih = (vector unsigned char)\
482 l1 = (vector unsigned short) vec_mergeh(b3, b2);\
484 ih1 = (vector unsigned char) vec_mergel(h1, h2);\
486 l2 = (vector unsigned short) vec_mergeh(b1, b0);\
488 il = (vector unsigned char) vec_mergeh(l1, l2);\
490 v[0] = (vector signed int) vec_msum(ih, il, vec_splat_u32(0));\
492 il1 = (vector unsigned char) vec_mergel(l1, l2);\
494 v[1] = (vector signed int) vec_msum(ih1, il1, vec_splat_u32(0));
496 #define FINAL_STEP_SCALAR\
497 for(x=0; x<b_w; x++)\
499 vbuf[x] += dst[x + src_x];\
500 vbuf[x] = (vbuf[x] + (1<<(FRAC_BITS-1))) >> FRAC_BITS;\
501 if(vbuf[x]&(~255)) vbuf[x]= ~(vbuf[x]>>31);\
502 dst8[x + y*src_stride] = vbuf[x];\
504 dst[x + src_x] -= vbuf[x];\
507 static void inner_add_yblock_bw_8_obmc_16_altivec(uint8_t *obmc,
508 const int obmc_stride,
509 uint8_t * * block, int b_w,
510 int b_h, int src_x, int src_y,
511 int src_stride, slice_buffer * sb,
512 int add, uint8_t * dst8)
516 vector unsigned short h1, h2, l1, l2;
517 vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
518 vector unsigned char b0,b1,b2,b3;
519 vector unsigned char ob1,ob2,ob3,ob4;
521 DECLARE_ALIGNED_16(int, vbuf[16]);
522 vector signed int *v = (vector signed int *)vbuf, *d;
524 for(y=0; y<b_h; y++){
525 //FIXME ugly missue of obmc_stride
527 uint8_t *obmc1= obmc + y*obmc_stride;
528 uint8_t *obmc2= obmc1+ (obmc_stride>>1);
529 uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
530 uint8_t *obmc4= obmc3+ (obmc_stride>>1);
532 dst = slice_buffer_get_line(sb, src_y + y);
533 d = (vector signed int *)(dst + src_x);
535 //FIXME i could avoid some loads!
553 h1 = (vector unsigned short) vec_mergel(ob1, ob2);\
555 h2 = (vector unsigned short) vec_mergel(ob3, ob4);\
557 ih = (vector unsigned char) vec_mergeh(h1,h2);\
559 l1 = (vector unsigned short) vec_mergel(b3, b2);\
561 l2 = (vector unsigned short) vec_mergel(b1, b0);\
563 ih1 = (vector unsigned char) vec_mergel(h1,h2);\
565 il = (vector unsigned char) vec_mergeh(l1,l2);\
567 v[2] = (vector signed int) vec_msum(ih, il, vec_splat_u32(0));\
569 il1 = (vector unsigned char) vec_mergel(l1,l2);\
571 v[3] = (vector signed int) vec_msum(ih1, il1, vec_splat_u32(0));
574 static void inner_add_yblock_bw_16_obmc_32_altivec(uint8_t *obmc,
575 const int obmc_stride,
576 uint8_t * * block, int b_w,
577 int b_h, int src_x, int src_y,
578 int src_stride, slice_buffer * sb,
579 int add, uint8_t * dst8)
583 vector unsigned short h1, h2, l1, l2;
584 vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
585 vector unsigned char b0,b1,b2,b3;
586 vector unsigned char ob1,ob2,ob3,ob4;
587 DECLARE_ALIGNED_16(int, vbuf[b_w]);
588 vector signed int *v = (vector signed int *)vbuf, *d;
590 for(y=0; y<b_h; y++){
591 //FIXME ugly missue of obmc_stride
593 uint8_t *obmc1= obmc + y*obmc_stride;
594 uint8_t *obmc2= obmc1+ (obmc_stride>>1);
595 uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
596 uint8_t *obmc4= obmc3+ (obmc_stride>>1);
598 dst = slice_buffer_get_line(sb, src_y + y);
599 d = (vector signed int *)(dst + src_x);
617 #define FINAL_STEP_VEC \
621 for(x=0; x<b_w/4; x++)\
623 v[x] = vec_add(v[x], d[x]);\
624 v[x] = vec_sra(vec_add(v[x],\
625 vec_sl( vec_splat_s32(1),\
629 mask = (vector bool int) vec_sl((vector signed int)\
630 vec_cmpeq(v[x],v[x]),vec_splat_u32(8));\
631 mask = (vector bool int) vec_and(v[x],vec_nor(mask,mask));\
633 mask = (vector bool int)\
634 vec_cmpeq((vector signed int)mask,\
635 (vector signed int)vec_splat_u32(0));\
637 vs = vec_sra(v[x],vec_splat_u32(8));\
638 vs = vec_sra(v[x],vec_splat_u32(8));\
639 vs = vec_sra(v[x],vec_splat_u32(15));\
641 vs = vec_nor(vs,vs);\
643 v[x]= vec_sel(v[x],vs,mask);\
646 for(x=0; x<b_w; x++)\
647 dst8[x + y*src_stride] = vbuf[x];\
651 for(x=0; x<b_w/4; x++)\
652 d[x] = vec_sub(d[x], v[x]);
654 static void inner_add_yblock_a_bw_8_obmc_16_altivec(uint8_t *obmc,
655 const int obmc_stride,
656 uint8_t * * block, int b_w,
657 int b_h, int src_x, int src_y,
658 int src_stride, slice_buffer * sb,
659 int add, uint8_t * dst8)
663 vector bool int mask;
664 vector signed int vs;
665 vector unsigned short h1, h2, l1, l2;
666 vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
667 vector unsigned char b0,b1,b2,b3;
668 vector unsigned char ob1,ob2,ob3,ob4;
670 DECLARE_ALIGNED_16(int, vbuf[16]);
671 vector signed int *v = (vector signed int *)vbuf, *d;
673 for(y=0; y<b_h; y++){
674 //FIXME ugly missue of obmc_stride
676 uint8_t *obmc1= obmc + y*obmc_stride;
677 uint8_t *obmc2= obmc1+ (obmc_stride>>1);
678 uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
679 uint8_t *obmc4= obmc3+ (obmc_stride>>1);
681 dst = slice_buffer_get_line(sb, src_y + y);
682 d = (vector signed int *)(dst + src_x);
684 //FIXME i could avoid some loads!
701 static void inner_add_yblock_a_bw_16_obmc_32_altivec(uint8_t *obmc,
702 const int obmc_stride,
703 uint8_t * * block, int b_w,
704 int b_h, int src_x, int src_y,
705 int src_stride, slice_buffer * sb,
706 int add, uint8_t * dst8)
710 vector bool int mask;
711 vector signed int vs;
712 vector unsigned short h1, h2, l1, l2;
713 vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
714 vector unsigned char b0,b1,b2,b3;
715 vector unsigned char ob1,ob2,ob3,ob4;
716 DECLARE_ALIGNED_16(int, vbuf[b_w]);
717 vector signed int *v = (vector signed int *)vbuf, *d;
719 for(y=0; y<b_h; y++){
720 //FIXME ugly missue of obmc_stride
722 uint8_t *obmc1= obmc + y*obmc_stride;
723 uint8_t *obmc2= obmc1+ (obmc_stride>>1);
724 uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
725 uint8_t *obmc4= obmc3+ (obmc_stride>>1);
727 dst = slice_buffer_get_line(sb, src_y + y);
728 d = (vector signed int *)(dst + src_x);
747 void ff_snow_inner_add_yblock_altivec(uint8_t *obmc, const int obmc_stride,
748 uint8_t * * block, int b_w, int b_h,
749 int src_x, int src_y, int src_stride,
750 slice_buffer * sb, int add,
755 inner_add_yblock_bw_16_obmc_32_altivec(obmc, obmc_stride, block,
756 b_w, b_h, src_x, src_y,
757 src_stride, sb, add, dst8);
759 inner_add_yblock_bw_8_obmc_16_altivec(obmc, obmc_stride, block,
760 b_w, b_h, src_x, src_y,
761 src_stride, sb, add, dst8);
763 ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,
764 src_y, src_stride, sb, add, dst8);
767 inner_add_yblock_a_bw_16_obmc_32_altivec(obmc, obmc_stride, block,
768 b_w, b_h, src_x, src_y,
769 src_stride, sb, add, dst8);
771 inner_add_yblock_a_bw_8_obmc_16_altivec(obmc, obmc_stride, block,
772 b_w, b_h, src_x, src_y,
773 src_stride, sb, add, dst8);
775 ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,
776 src_y, src_stride, sb, add, dst8);
781 void snow_init_altivec(DSPContext* c, AVCodecContext *avctx)
783 c->horizontal_compose97i = ff_snow_horizontal_compose97i_altivec;
784 c->vertical_compose97i = ff_snow_vertical_compose97i_altivec;
785 c->inner_add_yblock = ff_snow_inner_add_yblock_altivec;