1 /*****************************************************************************
2 * predict.c: h264 encoder
3 *****************************************************************************
4 * Copyright (C) 2003 Laurent Aimar
5 * $Id: predict.c,v 1.1 2004/06/03 19:27:07 fenrir Exp $
7 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
22 *****************************************************************************/
24 #include "common/common.h"
25 #include "common/clip1.h"
29 extern void predict_16x16_v_mmx( uint8_t *src );
30 extern void predict_16x16_dc_core_mmxext( uint8_t *src, int i_dc_left );
31 extern void predict_16x16_dc_top_mmxext( uint8_t *src );
32 extern void predict_16x16_p_core_mmxext( uint8_t *src, int i00, int b, int c );
33 extern void predict_8x8c_p_core_mmxext( uint8_t *src, int i00, int b, int c );
34 extern void predict_8x8c_dc_core_mmxext( uint8_t *src, int s2, int s3 );
35 extern void predict_8x8c_v_mmx( uint8_t *src );
36 extern void predict_8x8_v_mmxext( uint8_t *src, uint8_t edge[33] );
37 extern void predict_8x8_dc_mmxext( uint8_t *src, uint8_t edge[33] );
38 extern void predict_8x8_dc_top_mmxext( uint8_t *src, uint8_t edge[33] );
39 extern void predict_8x8_dc_left_mmxext( uint8_t *src, uint8_t edge[33] );
40 extern void predict_8x8_ddl_mmxext( uint8_t *src, uint8_t edge[33] );
41 extern void predict_8x8_ddr_mmxext( uint8_t *src, uint8_t edge[33] );
42 extern void predict_8x8_ddl_sse2( uint8_t *src, uint8_t edge[33] );
43 extern void predict_8x8_ddr_sse2( uint8_t *src, uint8_t edge[33] );
44 extern void predict_8x8_vl_sse2( uint8_t *src, uint8_t edge[33] );
45 extern void predict_8x8_vr_core_mmxext( uint8_t *src, uint8_t edge[33] );
46 extern void predict_4x4_ddl_mmxext( uint8_t *src );
47 extern void predict_4x4_vl_mmxext( uint8_t *src );
49 static void predict_16x16_p( uint8_t *src )
56 for( i = 1; i <= 8; i++ )
58 H += i * ( src[7+i - FDEC_STRIDE ] - src[7-i - FDEC_STRIDE ] );
59 V += i * ( src[(7+i)*FDEC_STRIDE -1] - src[(7-i)*FDEC_STRIDE -1] );
62 a = 16 * ( src[15*FDEC_STRIDE -1] + src[15 - FDEC_STRIDE] );
63 b = ( 5 * H + 32 ) >> 6;
64 c = ( 5 * V + 32 ) >> 6;
65 i00 = a - b * 7 - c * 7 + 16;
67 predict_16x16_p_core_mmxext( src, i00, b, c );
70 static void predict_8x8c_p( uint8_t *src )
77 for( i = 1; i <= 4; i++ )
79 H += i * ( src[3+i - FDEC_STRIDE] - src[3-i - FDEC_STRIDE] );
80 V += i * ( src[(3+i)*FDEC_STRIDE -1] - src[(3-i)*FDEC_STRIDE -1] );
83 a = 16 * ( src[7*FDEC_STRIDE -1] + src[7 - FDEC_STRIDE] );
84 b = ( 17 * H + 16 ) >> 5;
85 c = ( 17 * V + 16 ) >> 5;
86 i00 = a -3*b -3*c + 16;
88 predict_8x8c_p_core_mmxext( src, i00, b, c );
91 static void predict_16x16_dc( uint8_t *src )
96 for( i = 0; i < 16; i+=2 )
98 dc += src[-1 + i * FDEC_STRIDE];
99 dc += src[-1 + (i+1) * FDEC_STRIDE];
102 predict_16x16_dc_core_mmxext( src, dc );
105 static void predict_8x8c_dc( uint8_t *src )
108 + src[-1 + 0*FDEC_STRIDE]
109 + src[-1 + 1*FDEC_STRIDE]
110 + src[-1 + 2*FDEC_STRIDE]
111 + src[-1 + 3*FDEC_STRIDE];
114 + src[-1 + 4*FDEC_STRIDE]
115 + src[-1 + 5*FDEC_STRIDE]
116 + src[-1 + 6*FDEC_STRIDE]
117 + src[-1 + 7*FDEC_STRIDE];
119 predict_8x8c_dc_core_mmxext( src, s2, s3 );
123 static void predict_16x16_h( uint8_t *src )
126 for( y = 0; y < 16; y++ )
128 const uint64_t v = 0x0101010101010101ULL * src[-1];
129 uint64_t *p = (uint64_t*)src;
135 static void predict_8x8c_h( uint8_t *src )
138 for( y = 0; y < 8; y++ )
140 *(uint64_t*)src = 0x0101010101010101ULL * src[-1];
145 static void predict_16x16_dc_left( uint8_t *src )
151 for( y = 0; y < 16; y++ )
153 s += src[-1 + y * FDEC_STRIDE];
155 dc = (( s + 8 ) >> 4) * 0x0101010101010101ULL;
157 for( y = 0; y < 16; y++ )
159 uint64_t *p = (uint64_t*)src;
165 static void predict_8x8c_dc_left( uint8_t *src )
168 uint32_t s0 = 0, s1 = 0;
171 for( y = 0; y < 4; y++ )
173 s0 += src[y * FDEC_STRIDE - 1];
174 s1 += src[(y+4) * FDEC_STRIDE - 1];
176 dc0 = (( s0 + 2 ) >> 2) * 0x0101010101010101ULL;
177 dc1 = (( s1 + 2 ) >> 2) * 0x0101010101010101ULL;
179 for( y = 0; y < 4; y++ )
181 *(uint64_t*)src = dc0;
184 for( y = 0; y < 4; y++ )
186 *(uint64_t*)src = dc1;
192 static void predict_8x8c_dc_top( uint8_t *src )
195 uint32_t s0 = 0, s1 = 0;
198 for( x = 0; x < 4; x++ )
200 s0 += src[x - FDEC_STRIDE];
201 s1 += src[x + 4 - FDEC_STRIDE];
203 dc = (( s0 + 2 ) >> 2) * 0x01010101
204 + (( s1 + 2 ) >> 2) * 0x0101010100000000ULL;
206 for( y = 0; y < 8; y++ )
208 *(uint64_t*)src = dc;
216 #define PREDICT_4x4_LOAD_LEFT \
217 const int l0 = src[-1+0*FDEC_STRIDE]; \
218 const int l1 = src[-1+1*FDEC_STRIDE]; \
219 const int l2 = src[-1+2*FDEC_STRIDE]; \
220 UNUSED const int l3 = src[-1+3*FDEC_STRIDE];
222 #define PREDICT_4x4_LOAD_TOP \
223 const int t0 = src[0-1*FDEC_STRIDE]; \
224 const int t1 = src[1-1*FDEC_STRIDE]; \
225 const int t2 = src[2-1*FDEC_STRIDE]; \
226 UNUSED const int t3 = src[3-1*FDEC_STRIDE];
228 #define PREDICT_4x4_LOAD_TOP_RIGHT \
229 const int t4 = src[4-1*FDEC_STRIDE]; \
230 const int t5 = src[5-1*FDEC_STRIDE]; \
231 const int t6 = src[6-1*FDEC_STRIDE]; \
232 UNUSED const int t7 = src[7-1*FDEC_STRIDE];
234 #define F1(a,b) (((a)+(b)+1)>>1)
235 #define F2(a,b,c) (((a)+2*(b)+(c)+2)>>2)
237 #ifdef ARCH_X86_64 // slower on x86
239 static void predict_4x4_ddl( uint8_t *src )
242 PREDICT_4x4_LOAD_TOP_RIGHT
243 uint32_t vec = (F2(t3,t4,t5)<< 0)
246 + (F2(t6,t7,t7)<<24);
247 *(uint32_t*)&src[3*FDEC_STRIDE] = vec;
248 *(uint32_t*)&src[2*FDEC_STRIDE] = vec = (vec<<8) + F2(t2,t3,t4);
249 *(uint32_t*)&src[1*FDEC_STRIDE] = vec = (vec<<8) + F2(t1,t2,t3);
250 *(uint32_t*)&src[0*FDEC_STRIDE] = vec = (vec<<8) + F2(t0,t1,t2);
254 static void predict_4x4_ddr( uint8_t *src )
256 const int lt = src[-1-FDEC_STRIDE];
257 PREDICT_4x4_LOAD_LEFT
259 uint32_t vec = (F2(l0,lt,t0)<< 0)
262 + (F2(t1,t2,t3)<<24);
263 *(uint32_t*)&src[0*FDEC_STRIDE] = vec;
264 *(uint32_t*)&src[1*FDEC_STRIDE] = vec = (vec<<8) + F2(l1,l0,lt);
265 *(uint32_t*)&src[2*FDEC_STRIDE] = vec = (vec<<8) + F2(l2,l1,l0);
266 *(uint32_t*)&src[3*FDEC_STRIDE] = vec = (vec<<8) + F2(l3,l2,l1);
269 static void predict_4x4_vr( uint8_t *src )
271 const int lt = src[-1-FDEC_STRIDE];
272 PREDICT_4x4_LOAD_LEFT
274 const int ltt0 = lt + t0 + 1;
275 const int t0t1 = t0 + t1 + 1;
276 const int t1t2 = t1 + t2 + 1;
277 const int t2t3 = t2 + t3 + 1;
278 const int l0lt = l0 + lt + 1;
279 const int l1l0 = l1 + l0 + 1;
280 const int l2l1 = l2 + l1 + 1;
282 src[0*FDEC_STRIDE+0]=
283 src[2*FDEC_STRIDE+1]= ltt0 >> 1;
285 src[0*FDEC_STRIDE+1]=
286 src[2*FDEC_STRIDE+2]= t0t1 >> 1;
288 src[0*FDEC_STRIDE+2]=
289 src[2*FDEC_STRIDE+3]= t1t2 >> 1;
291 src[0*FDEC_STRIDE+3]= t2t3 >> 1;
293 src[1*FDEC_STRIDE+0]=
294 src[3*FDEC_STRIDE+1]= (l0lt + ltt0) >> 2;
296 src[1*FDEC_STRIDE+1]=
297 src[3*FDEC_STRIDE+2]= (ltt0 + t0t1) >> 2;
299 src[1*FDEC_STRIDE+2]=
300 src[3*FDEC_STRIDE+3]= (t0t1 + t1t2) >> 2;
302 src[1*FDEC_STRIDE+3]= (t1t2 + t2t3) >> 2;
303 src[2*FDEC_STRIDE+0]= (l1l0 + l0lt) >> 2;
304 src[3*FDEC_STRIDE+0]= (l2l1 + l1l0) >> 2;
307 static void predict_4x4_hd( uint8_t *src )
309 const int lt= src[-1-1*FDEC_STRIDE];
310 PREDICT_4x4_LOAD_LEFT
312 const int ltt0 = lt + t0 + 1;
313 const int t0t1 = t0 + t1 + 1;
314 const int t1t2 = t1 + t2 + 1;
315 const int l0lt = l0 + lt + 1;
316 const int l1l0 = l1 + l0 + 1;
317 const int l2l1 = l2 + l1 + 1;
318 const int l3l2 = l3 + l2 + 1;
320 src[0*FDEC_STRIDE+0]=
321 src[1*FDEC_STRIDE+2]= l0lt >> 1;
322 src[0*FDEC_STRIDE+1]=
323 src[1*FDEC_STRIDE+3]= (l0lt + ltt0) >> 2;
324 src[0*FDEC_STRIDE+2]= (ltt0 + t0t1) >> 2;
325 src[0*FDEC_STRIDE+3]= (t0t1 + t1t2) >> 2;
326 src[1*FDEC_STRIDE+0]=
327 src[2*FDEC_STRIDE+2]= l1l0 >> 1;
328 src[1*FDEC_STRIDE+1]=
329 src[2*FDEC_STRIDE+3]= (l0lt + l1l0) >> 2;
330 src[2*FDEC_STRIDE+0]=
331 src[3*FDEC_STRIDE+2]= l2l1 >> 1;
332 src[2*FDEC_STRIDE+1]=
333 src[3*FDEC_STRIDE+3]= (l1l0 + l2l1) >> 2;
334 src[3*FDEC_STRIDE+0]= l3l2 >> 1;
335 src[3*FDEC_STRIDE+1]= (l2l1 + l3l2) >> 2;
339 static void predict_4x4_vl( uint8_t *src )
342 PREDICT_4x4_LOAD_TOP_RIGHT
343 const int t0t1 = t0 + t1 + 1;
344 const int t1t2 = t1 + t2 + 1;
345 const int t2t3 = t2 + t3 + 1;
346 const int t3t4 = t3 + t4 + 1;
347 const int t4t5 = t4 + t5 + 1;
348 const int t5t6 = t5 + t6 + 1;
350 src[0*FDEC_STRIDE+0]= t0t1 >> 1;
351 src[0*FDEC_STRIDE+1]=
352 src[2*FDEC_STRIDE+0]= t1t2 >> 1;
353 src[0*FDEC_STRIDE+2]=
354 src[2*FDEC_STRIDE+1]= t2t3 >> 1;
355 src[0*FDEC_STRIDE+3]=
356 src[2*FDEC_STRIDE+2]= t3t4 >> 1;
357 src[2*FDEC_STRIDE+3]= t4t5 >> 1;
358 src[1*FDEC_STRIDE+0]= (t0t1 + t1t2) >> 2;
359 src[1*FDEC_STRIDE+1]=
360 src[3*FDEC_STRIDE+0]= (t1t2 + t2t3) >> 2;
361 src[1*FDEC_STRIDE+2]=
362 src[3*FDEC_STRIDE+1]= (t2t3 + t3t4) >> 2;
363 src[1*FDEC_STRIDE+3]=
364 src[3*FDEC_STRIDE+2]= (t3t4 + t4t5) >> 2;
365 src[3*FDEC_STRIDE+3]= (t4t5 + t5t6) >> 2;
369 static void predict_4x4_hu( uint8_t *src )
371 PREDICT_4x4_LOAD_LEFT
372 const int l1l0 = l1 + l0 + 1;
373 const int l2l1 = l2 + l1 + 1;
374 const int l3l2 = l3 + l2 + 1;
376 src[0*FDEC_STRIDE+0]= l1l0 >> 1;
377 src[0*FDEC_STRIDE+1]= (l1l0 + l2l1) >> 2;
379 src[0*FDEC_STRIDE+2]=
380 src[1*FDEC_STRIDE+0]= l2l1 >> 1;
382 src[0*FDEC_STRIDE+3]=
383 src[1*FDEC_STRIDE+1]= (l2l1 + l3l2) >> 2;
385 src[1*FDEC_STRIDE+2]=
386 src[2*FDEC_STRIDE+0]= l3l2 >> 1;
388 src[1*FDEC_STRIDE+3]=
389 src[2*FDEC_STRIDE+1]= (l2 + 3*l3 + 2) >> 2;
391 src[2*FDEC_STRIDE+3]=
392 src[3*FDEC_STRIDE+1]=
393 src[3*FDEC_STRIDE+0]=
394 src[2*FDEC_STRIDE+2]=
395 src[3*FDEC_STRIDE+2]=
396 src[3*FDEC_STRIDE+3]= l3;
400 /****************************************************************************
401 * 8x8 prediction for intra luma block
402 ****************************************************************************/
405 UNUSED int l##y = edge[14-y];
407 UNUSED int t##x = edge[16+x];
408 #define PREDICT_8x8_LOAD_TOPLEFT \
410 #define PREDICT_8x8_LOAD_LEFT \
411 PL(0) PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) PL(7)
412 #define PREDICT_8x8_LOAD_TOP \
413 PT(0) PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) PT(7)
415 #define PREDICT_8x8_DC(v) \
417 for( y = 0; y < 8; y++ ) { \
418 ((uint32_t*)src)[0] = \
419 ((uint32_t*)src)[1] = v; \
420 src += FDEC_STRIDE; \
423 #define SRC(x,y) src[(x)+(y)*FDEC_STRIDE]
425 static void predict_8x8_vr_mmxext( uint8_t *src, uint8_t edge[33] )
427 predict_8x8_vr_core_mmxext( src, edge );
429 PREDICT_8x8_LOAD_TOPLEFT
430 PREDICT_8x8_LOAD_LEFT
431 SRC(0,2)=SRC(1,4)=SRC(2,6)= (l1 + 2*l0 + lt + 2) >> 2;
432 SRC(0,3)=SRC(1,5)=SRC(2,7)= (l2 + 2*l1 + l0 + 2) >> 2;
433 SRC(0,4)=SRC(1,6)= (l3 + 2*l2 + l1 + 2) >> 2;
434 SRC(0,5)=SRC(1,7)= (l4 + 2*l3 + l2 + 2) >> 2;
435 SRC(0,6)= (l5 + 2*l4 + l3 + 2) >> 2;
436 SRC(0,7)= (l6 + 2*l5 + l4 + 2) >> 2;
440 #define SUMSUB(a,b,c,d,e,f,g,h)\
447 void x264_intra_sa8d_x3_8x8_sse2( uint8_t *fenc, uint8_t edge[33], int res[3] )
449 void x264_intra_sa8d_x3_8x8_mmxext( uint8_t *fenc, uint8_t edge[33], int res[3] )
453 PREDICT_8x8_LOAD_LEFT
455 DECLARE_ALIGNED( int16_t, sa8d_1d[2][8], 16 );
456 SUMSUB(l0,l4,l1,l5,l2,l6,l3,l7);
457 SUMSUB(l0,l2,l1,l3,l4,l6,l5,l7);
458 SUMSUB(l0,l1,l2,l3,l4,l5,l6,l7);
467 SUMSUB(t0,t4,t1,t5,t2,t6,t3,t7);
468 SUMSUB(t0,t2,t1,t3,t4,t6,t5,t7);
469 SUMSUB(t0,t1,t2,t3,t4,t5,t6,t7);
479 x264_intra_sa8d_x3_8x8_core_sse2( fenc, sa8d_1d, res );
481 x264_intra_sa8d_x3_8x8_core_mmxext( fenc, sa8d_1d, res );
485 /****************************************************************************
486 * Exported functions:
487 ****************************************************************************/
488 void x264_predict_16x16_init_mmxext( x264_predict_t pf[7] )
490 pf[I_PRED_16x16_V] = predict_16x16_v_mmx;
491 pf[I_PRED_16x16_DC] = predict_16x16_dc;
492 pf[I_PRED_16x16_DC_TOP] = predict_16x16_dc_top_mmxext;
493 pf[I_PRED_16x16_P] = predict_16x16_p;
496 pf[I_PRED_16x16_H] = predict_16x16_h;
497 pf[I_PRED_16x16_DC_LEFT] = predict_16x16_dc_left;
501 void x264_predict_8x8c_init_mmxext( x264_predict_t pf[7] )
503 pf[I_PRED_CHROMA_V] = predict_8x8c_v_mmx;
504 pf[I_PRED_CHROMA_P] = predict_8x8c_p;
505 pf[I_PRED_CHROMA_DC] = predict_8x8c_dc;
508 pf[I_PRED_CHROMA_H] = predict_8x8c_h;
509 pf[I_PRED_CHROMA_DC_LEFT] = predict_8x8c_dc_left;
510 pf[I_PRED_CHROMA_DC_TOP] = predict_8x8c_dc_top;
514 void x264_predict_8x8_init_mmxext( x264_predict8x8_t pf[12] )
516 pf[I_PRED_8x8_V] = predict_8x8_v_mmxext;
517 pf[I_PRED_8x8_DC] = predict_8x8_dc_mmxext;
518 pf[I_PRED_8x8_DC_TOP] = predict_8x8_dc_top_mmxext;
519 pf[I_PRED_8x8_DC_LEFT]= predict_8x8_dc_left_mmxext;
520 pf[I_PRED_8x8_DDL] = predict_8x8_ddl_mmxext;
521 pf[I_PRED_8x8_VR] = predict_8x8_vr_mmxext;
523 pf[I_PRED_8x8_DDR] = predict_8x8_ddr_mmxext;
527 void x264_predict_8x8_init_sse2( x264_predict8x8_t pf[12] )
529 #ifdef ARCH_X86_64 // x86 not written yet
530 pf[I_PRED_8x8_DDL] = predict_8x8_ddl_sse2;
531 pf[I_PRED_8x8_DDR] = predict_8x8_ddr_sse2;
532 pf[I_PRED_8x8_VL] = predict_8x8_vl_sse2;
536 void x264_predict_4x4_init_mmxext( x264_predict_t pf[12] )
538 #ifdef ARCH_X86_64 // x86 not written yet
539 pf[I_PRED_4x4_DDL] = predict_4x4_ddl_mmxext;
540 pf[I_PRED_4x4_VL] = predict_4x4_vl_mmxext;
542 #ifdef ARCH_X86_64 // slower on x86
543 pf[I_PRED_4x4_DDR] = predict_4x4_ddr;
544 pf[I_PRED_4x4_VR] = predict_4x4_vr;
545 pf[I_PRED_4x4_HD] = predict_4x4_hd;
546 pf[I_PRED_4x4_HU] = predict_4x4_hu;