1 /*****************************************************************************
2 * deblock.c: Altivec-accelerated deblocking for h264 encoder
3 *****************************************************************************
4 * Copyright (C) 2007-2008 Guillaume Poirier <gpoirier@mplayerhq.hu>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
19 *****************************************************************************/
25 #include "common/common.h"
26 #include "ppccommon.h"
28 #define transpose4x16(r0, r1, r2, r3) { \
29 register vec_u8_t r4; \
30 register vec_u8_t r5; \
31 register vec_u8_t r6; \
32 register vec_u8_t r7; \
34 r4 = vec_mergeh(r0, r2); /*0, 2 set 0*/ \
35 r5 = vec_mergel(r0, r2); /*0, 2 set 1*/ \
36 r6 = vec_mergeh(r1, r3); /*1, 3 set 0*/ \
37 r7 = vec_mergel(r1, r3); /*1, 3 set 1*/ \
39 r0 = vec_mergeh(r4, r6); /*all set 0*/ \
40 r1 = vec_mergel(r4, r6); /*all set 1*/ \
41 r2 = vec_mergeh(r5, r7); /*all set 2*/ \
42 r3 = vec_mergel(r5, r7); /*all set 3*/ \
45 static inline void write16x4(uint8_t *dst, int dst_stride,
46 register vec_u8_t r0, register vec_u8_t r1,
47 register vec_u8_t r2, register vec_u8_t r3) {
48 DECLARE_ALIGNED_16(unsigned char result[64]);
49 uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
50 int int_dst_stride = dst_stride/4;
52 vec_st(r0, 0, result);
53 vec_st(r1, 16, result);
54 vec_st(r2, 32, result);
55 vec_st(r3, 48, result);
56 /* FIXME: there has to be a better way!!!! */
58 *(dst_int+ int_dst_stride) = *(src_int + 1);
59 *(dst_int+ 2*int_dst_stride) = *(src_int + 2);
60 *(dst_int+ 3*int_dst_stride) = *(src_int + 3);
61 *(dst_int+ 4*int_dst_stride) = *(src_int + 4);
62 *(dst_int+ 5*int_dst_stride) = *(src_int + 5);
63 *(dst_int+ 6*int_dst_stride) = *(src_int + 6);
64 *(dst_int+ 7*int_dst_stride) = *(src_int + 7);
65 *(dst_int+ 8*int_dst_stride) = *(src_int + 8);
66 *(dst_int+ 9*int_dst_stride) = *(src_int + 9);
67 *(dst_int+10*int_dst_stride) = *(src_int + 10);
68 *(dst_int+11*int_dst_stride) = *(src_int + 11);
69 *(dst_int+12*int_dst_stride) = *(src_int + 12);
70 *(dst_int+13*int_dst_stride) = *(src_int + 13);
71 *(dst_int+14*int_dst_stride) = *(src_int + 14);
72 *(dst_int+15*int_dst_stride) = *(src_int + 15);
75 /** \brief performs a 6x16 transpose of data in src, and stores it to dst
76 \todo FIXME: see if we can't spare some vec_lvsl() by them factorizing
77 out of unaligned_load() */
78 #define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\
79 register vec_u8_t r0, r1, r2, r3, r4, r5, r6, r7, r14, r15;\
80 VEC_LOAD(src, r0, 16, vec_u8_t); \
81 VEC_LOAD(src + src_stride, r1, 16, vec_u8_t); \
82 VEC_LOAD(src + 2*src_stride, r2, 16, vec_u8_t); \
83 VEC_LOAD(src + 3*src_stride, r3, 16, vec_u8_t); \
84 VEC_LOAD(src + 4*src_stride, r4, 16, vec_u8_t); \
85 VEC_LOAD(src + 5*src_stride, r5, 16, vec_u8_t); \
86 VEC_LOAD(src + 6*src_stride, r6, 16, vec_u8_t); \
87 VEC_LOAD(src + 7*src_stride, r7, 16, vec_u8_t); \
88 VEC_LOAD(src + 14*src_stride, r14, 16, vec_u8_t); \
89 VEC_LOAD(src + 15*src_stride, r15, 16, vec_u8_t); \
91 VEC_LOAD(src + 8*src_stride, r8, 16, vec_u8_t); \
92 VEC_LOAD(src + 9*src_stride, r9, 16, vec_u8_t); \
93 VEC_LOAD(src + 10*src_stride, r10, 16, vec_u8_t); \
94 VEC_LOAD(src + 11*src_stride, r11, 16, vec_u8_t); \
95 VEC_LOAD(src + 12*src_stride, r12, 16, vec_u8_t); \
96 VEC_LOAD(src + 13*src_stride, r13, 16, vec_u8_t); \
98 /*Merge first pairs*/ \
99 r0 = vec_mergeh(r0, r8); /*0, 8*/ \
100 r1 = vec_mergeh(r1, r9); /*1, 9*/ \
101 r2 = vec_mergeh(r2, r10); /*2,10*/ \
102 r3 = vec_mergeh(r3, r11); /*3,11*/ \
103 r4 = vec_mergeh(r4, r12); /*4,12*/ \
104 r5 = vec_mergeh(r5, r13); /*5,13*/ \
105 r6 = vec_mergeh(r6, r14); /*6,14*/ \
106 r7 = vec_mergeh(r7, r15); /*7,15*/ \
108 /*Merge second pairs*/ \
109 r8 = vec_mergeh(r0, r4); /*0,4, 8,12 set 0*/ \
110 r9 = vec_mergel(r0, r4); /*0,4, 8,12 set 1*/ \
111 r10 = vec_mergeh(r1, r5); /*1,5, 9,13 set 0*/ \
112 r11 = vec_mergel(r1, r5); /*1,5, 9,13 set 1*/ \
113 r12 = vec_mergeh(r2, r6); /*2,6,10,14 set 0*/ \
114 r13 = vec_mergel(r2, r6); /*2,6,10,14 set 1*/ \
115 r14 = vec_mergeh(r3, r7); /*3,7,11,15 set 0*/ \
116 r15 = vec_mergel(r3, r7); /*3,7,11,15 set 1*/ \
119 r0 = vec_mergeh(r8, r12); /*0,2,4,6,8,10,12,14 set 0*/ \
120 r1 = vec_mergel(r8, r12); /*0,2,4,6,8,10,12,14 set 1*/ \
121 r2 = vec_mergeh(r9, r13); /*0,2,4,6,8,10,12,14 set 2*/ \
122 r4 = vec_mergeh(r10, r14); /*1,3,5,7,9,11,13,15 set 0*/ \
123 r5 = vec_mergel(r10, r14); /*1,3,5,7,9,11,13,15 set 1*/ \
124 r6 = vec_mergeh(r11, r15); /*1,3,5,7,9,11,13,15 set 2*/ \
125 /* Don't need to compute 3 and 7*/ \
128 r8 = vec_mergeh(r0, r4); /*all set 0*/ \
129 r9 = vec_mergel(r0, r4); /*all set 1*/ \
130 r10 = vec_mergeh(r1, r5); /*all set 2*/ \
131 r11 = vec_mergel(r1, r5); /*all set 3*/ \
132 r12 = vec_mergeh(r2, r6); /*all set 4*/ \
133 r13 = vec_mergel(r2, r6); /*all set 5*/ \
134 /* Don't need to compute 14 and 15*/ \
138 // out: o = |x-y| < a
139 static inline vec_u8_t diff_lt_altivec ( register vec_u8_t x,
141 register vec_u8_t a) {
143 register vec_u8_t diff = vec_subs(x, y);
144 register vec_u8_t diffneg = vec_subs(y, x);
145 register vec_u8_t o = vec_or(diff, diffneg); /* |x-y| */
146 o = (vec_u8_t)vec_cmplt(o, a);
150 static inline vec_u8_t h264_deblock_mask ( register vec_u8_t p0,
151 register vec_u8_t p1,
152 register vec_u8_t q0,
153 register vec_u8_t q1,
154 register vec_u8_t alpha,
155 register vec_u8_t beta) {
157 register vec_u8_t mask;
158 register vec_u8_t tempmask;
160 mask = diff_lt_altivec(p0, q0, alpha);
161 tempmask = diff_lt_altivec(p1, p0, beta);
162 mask = vec_and(mask, tempmask);
163 tempmask = diff_lt_altivec(q1, q0, beta);
164 mask = vec_and(mask, tempmask);
169 // out: newp1 = clip((p2 + ((p0 + q0 + 1) >> 1)) >> 1, p1-tc0, p1+tc0)
170 static inline vec_u8_t h264_deblock_q1(register vec_u8_t p0,
171 register vec_u8_t p1,
172 register vec_u8_t p2,
173 register vec_u8_t q0,
174 register vec_u8_t tc0) {
176 register vec_u8_t average = vec_avg(p0, q0);
177 register vec_u8_t temp;
178 register vec_u8_t uncliped;
179 register vec_u8_t ones;
180 register vec_u8_t max;
181 register vec_u8_t min;
182 register vec_u8_t newp1;
184 temp = vec_xor(average, p2);
185 average = vec_avg(average, p2); /*avg(p2, avg(p0, q0)) */
186 ones = vec_splat_u8(1);
187 temp = vec_and(temp, ones); /*(p2^avg(p0, q0)) & 1 */
188 uncliped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */
189 max = vec_adds(p1, tc0);
190 min = vec_subs(p1, tc0);
191 newp1 = vec_max(min, uncliped);
192 newp1 = vec_min(max, newp1);
196 #define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) { \
198 const vec_u8_t A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4)); \
200 register vec_u8_t pq0bit = vec_xor(p0,q0); \
201 register vec_u8_t q1minus; \
202 register vec_u8_t p0minus; \
203 register vec_u8_t stage1; \
204 register vec_u8_t stage2; \
205 register vec_u8_t vec160; \
206 register vec_u8_t delta; \
207 register vec_u8_t deltaneg; \
209 q1minus = vec_nor(q1, q1); /* 255 - q1 */ \
210 stage1 = vec_avg(p1, q1minus); /* (p1 - q1 + 256)>>1 */ \
211 stage2 = vec_sr(stage1, vec_splat_u8(1)); /* (p1 - q1 + 256)>>2 = 64 + (p1 - q1) >> 2 */ \
212 p0minus = vec_nor(p0, p0); /* 255 - p0 */ \
213 stage1 = vec_avg(q0, p0minus); /* (q0 - p0 + 256)>>1 */ \
214 pq0bit = vec_and(pq0bit, vec_splat_u8(1)); \
215 stage2 = vec_avg(stage2, pq0bit); /* 32 + ((q0 - p0)&1 + (p1 - q1) >> 2 + 1) >> 1 */\
216 stage2 = vec_adds(stage2, stage1); /* 160 + ((p0 - q0) + (p1 - q1) >> 2 + 1) >> 1 */ \
217 vec160 = vec_ld(0, &A0v); \
218 deltaneg = vec_subs(vec160, stage2); /* -d */ \
219 delta = vec_subs(stage2, vec160); /* d */ \
220 deltaneg = vec_min(tc0masked, deltaneg); \
221 delta = vec_min(tc0masked, delta); \
222 p0 = vec_subs(p0, deltaneg); \
223 q0 = vec_subs(q0, delta); \
224 p0 = vec_adds(p0, delta); \
225 q0 = vec_adds(q0, deltaneg); \
228 #define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) { \
229 DECLARE_ALIGNED_16(unsigned char temp[16]); \
230 register vec_u8_t alphavec; \
231 register vec_u8_t betavec; \
232 register vec_u8_t mask; \
233 register vec_u8_t p1mask; \
234 register vec_u8_t q1mask; \
235 register vec_s8_t tc0vec; \
236 register vec_u8_t finaltc0; \
237 register vec_u8_t tc0masked; \
238 register vec_u8_t newp1; \
239 register vec_u8_t newq1; \
243 alphavec = vec_ld(0, temp); \
244 betavec = vec_splat(alphavec, 0x1); \
245 alphavec = vec_splat(alphavec, 0x0); \
246 mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); /*if in block */ \
248 *((int *)temp) = *((int *)tc0); \
249 tc0vec = vec_ld(0, (signed char*)temp); \
250 tc0vec = vec_mergeh(tc0vec, tc0vec); \
251 tc0vec = vec_mergeh(tc0vec, tc0vec); \
252 mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1))); /* if tc0[i] >= 0 */ \
253 finaltc0 = vec_and((vec_u8_t)tc0vec, mask); /* tc = tc0 */ \
255 p1mask = diff_lt_altivec(p2, p0, betavec); \
256 p1mask = vec_and(p1mask, mask); /* if( |p2 - p0| < beta) */ \
257 tc0masked = vec_and(p1mask, (vec_u8_t)tc0vec); \
258 finaltc0 = vec_sub(finaltc0, p1mask); /* tc++ */ \
259 newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \
262 q1mask = diff_lt_altivec(q2, q0, betavec); \
263 q1mask = vec_and(q1mask, mask); /* if ( |q2 - q0| < beta ) */\
264 tc0masked = vec_and(q1mask, (vec_u8_t)tc0vec); \
265 finaltc0 = vec_sub(finaltc0, q1mask); /* tc++ */ \
266 newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked); \
269 h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0); \
274 void x264_deblock_v_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
276 if((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) {
277 register vec_u8_t p2 = vec_ld(-3*stride, pix);
278 register vec_u8_t p1 = vec_ld(-2*stride, pix);
279 register vec_u8_t p0 = vec_ld(-1*stride, pix);
280 register vec_u8_t q0 = vec_ld(0, pix);
281 register vec_u8_t q1 = vec_ld(stride, pix);
282 register vec_u8_t q2 = vec_ld(2*stride, pix);
283 h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0);
284 vec_st(p1, -2*stride, pix);
285 vec_st(p0, -1*stride, pix);
287 vec_st(q1, stride, pix);
291 void x264_deblock_h_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
293 register vec_u8_t line0, line1, line2, line3, line4, line5;
294 if((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0)
297 readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5);
298 h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0);
299 transpose4x16(line1, line2, line3, line4);
300 write16x4(pix-2, stride, line1, line2, line3, line4);