1 /*****************************************************************************
2 * predict.c: h264 encoder
3 *****************************************************************************
4 * Copyright (C) 2003 Laurent Aimar
5 * $Id: predict.c,v 1.1 2004/06/03 19:27:07 fenrir Exp $
7 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
22 *****************************************************************************/
24 #include "common/common.h"
25 #include "common/clip1.h"
29 extern void predict_16x16_v_mmx( uint8_t *src );
30 extern void predict_16x16_dc_core_mmxext( uint8_t *src, int i_dc_left );
31 extern void predict_16x16_dc_top_mmxext( uint8_t *src );
32 extern void predict_16x16_p_core_mmxext( uint8_t *src, int i00, int b, int c );
33 extern void predict_8x8c_p_core_mmxext( uint8_t *src, int i00, int b, int c );
34 extern void predict_8x8c_dc_core_mmxext( uint8_t *src, int s2, int s3 );
35 extern void predict_8x8c_v_mmx( uint8_t *src );
36 extern void predict_8x8_v_mmxext( uint8_t *src, uint8_t edge[33] );
37 extern void predict_8x8_dc_mmxext( uint8_t *src, uint8_t edge[33] );
38 extern void predict_8x8_dc_top_mmxext( uint8_t *src, uint8_t edge[33] );
39 extern void predict_8x8_dc_left_mmxext( uint8_t *src, uint8_t edge[33] );
40 extern void predict_8x8_ddl_mmxext( uint8_t *src, uint8_t edge[33] );
41 extern void predict_8x8_ddr_mmxext( uint8_t *src, uint8_t edge[33] );
42 extern void predict_8x8_ddl_sse2( uint8_t *src, uint8_t edge[33] );
43 extern void predict_8x8_ddr_sse2( uint8_t *src, uint8_t edge[33] );
44 extern void predict_8x8_vl_sse2( uint8_t *src, uint8_t edge[33] );
45 extern void predict_8x8_vr_core_mmxext( uint8_t *src, uint8_t edge[33] );
46 extern void predict_4x4_ddl_mmxext( uint8_t *src );
47 extern void predict_4x4_vl_mmxext( uint8_t *src );
48 extern void predict_16x16_dc_top_sse2( uint8_t *src );
49 extern void predict_16x16_dc_core_sse2( uint8_t *src, int i_dc_left );
50 extern void predict_16x16_v_sse2( uint8_t *src );
51 extern void predict_16x16_p_core_sse2( uint8_t *src, int i00, int b, int c );
53 #define PREDICT_16x16_P(name)\
54 static void predict_16x16_p_##name( uint8_t *src )\
60 for( i = 1; i <= 8; i++ )\
62 H += i * ( src[7+i - FDEC_STRIDE ] - src[7-i - FDEC_STRIDE ] );\
63 V += i * ( src[(7+i)*FDEC_STRIDE -1] - src[(7-i)*FDEC_STRIDE -1] );\
65 a = 16 * ( src[15*FDEC_STRIDE -1] + src[15 - FDEC_STRIDE] );\
66 b = ( 5 * H + 32 ) >> 6;\
67 c = ( 5 * V + 32 ) >> 6;\
68 i00 = a - b * 7 - c * 7 + 16;\
69 predict_16x16_p_core_##name( src, i00, b, c );\
72 PREDICT_16x16_P( mmxext )
73 PREDICT_16x16_P( sse2 )
75 static void predict_8x8c_p_mmxext( uint8_t *src )
82 for( i = 1; i <= 4; i++ )
84 H += i * ( src[3+i - FDEC_STRIDE] - src[3-i - FDEC_STRIDE] );
85 V += i * ( src[(3+i)*FDEC_STRIDE -1] - src[(3-i)*FDEC_STRIDE -1] );
88 a = 16 * ( src[7*FDEC_STRIDE -1] + src[7 - FDEC_STRIDE] );
89 b = ( 17 * H + 16 ) >> 5;
90 c = ( 17 * V + 16 ) >> 5;
91 i00 = a -3*b -3*c + 16;
93 predict_8x8c_p_core_mmxext( src, i00, b, c );
96 #define PREDICT_16x16_DC(name)\
97 static void predict_16x16_dc_##name( uint8_t *src )\
101 for( i = 0; i < 16; i+=2 )\
103 dc += src[-1 + i * FDEC_STRIDE];\
104 dc += src[-1 + (i+1) * FDEC_STRIDE];\
106 predict_16x16_dc_core_##name( src, dc );\
109 PREDICT_16x16_DC( mmxext )
110 PREDICT_16x16_DC( sse2 )
112 static void predict_8x8c_dc_mmxext( uint8_t *src )
115 + src[-1 + 0*FDEC_STRIDE]
116 + src[-1 + 1*FDEC_STRIDE]
117 + src[-1 + 2*FDEC_STRIDE]
118 + src[-1 + 3*FDEC_STRIDE];
121 + src[-1 + 4*FDEC_STRIDE]
122 + src[-1 + 5*FDEC_STRIDE]
123 + src[-1 + 6*FDEC_STRIDE]
124 + src[-1 + 7*FDEC_STRIDE];
126 predict_8x8c_dc_core_mmxext( src, s2, s3 );
130 static void predict_16x16_h( uint8_t *src )
133 for( y = 0; y < 16; y++ )
135 const uint64_t v = 0x0101010101010101ULL * src[-1];
136 uint64_t *p = (uint64_t*)src;
142 static void predict_8x8c_h( uint8_t *src )
145 for( y = 0; y < 8; y++ )
147 *(uint64_t*)src = 0x0101010101010101ULL * src[-1];
152 static void predict_16x16_dc_left( uint8_t *src )
158 for( y = 0; y < 16; y++ )
160 s += src[-1 + y * FDEC_STRIDE];
162 dc = (( s + 8 ) >> 4) * 0x0101010101010101ULL;
164 for( y = 0; y < 16; y++ )
166 uint64_t *p = (uint64_t*)src;
172 static void predict_8x8c_dc_left( uint8_t *src )
175 uint32_t s0 = 0, s1 = 0;
178 for( y = 0; y < 4; y++ )
180 s0 += src[y * FDEC_STRIDE - 1];
181 s1 += src[(y+4) * FDEC_STRIDE - 1];
183 dc0 = (( s0 + 2 ) >> 2) * 0x0101010101010101ULL;
184 dc1 = (( s1 + 2 ) >> 2) * 0x0101010101010101ULL;
186 for( y = 0; y < 4; y++ )
188 *(uint64_t*)src = dc0;
191 for( y = 0; y < 4; y++ )
193 *(uint64_t*)src = dc1;
199 static void predict_8x8c_dc_top( uint8_t *src )
202 uint32_t s0 = 0, s1 = 0;
205 for( x = 0; x < 4; x++ )
207 s0 += src[x - FDEC_STRIDE];
208 s1 += src[x + 4 - FDEC_STRIDE];
210 dc = (( s0 + 2 ) >> 2) * 0x01010101
211 + (( s1 + 2 ) >> 2) * 0x0101010100000000ULL;
213 for( y = 0; y < 8; y++ )
215 *(uint64_t*)src = dc;
223 #define PREDICT_4x4_LOAD_LEFT \
224 const int l0 = src[-1+0*FDEC_STRIDE]; \
225 const int l1 = src[-1+1*FDEC_STRIDE]; \
226 const int l2 = src[-1+2*FDEC_STRIDE]; \
227 UNUSED const int l3 = src[-1+3*FDEC_STRIDE];
229 #define PREDICT_4x4_LOAD_TOP \
230 const int t0 = src[0-1*FDEC_STRIDE]; \
231 const int t1 = src[1-1*FDEC_STRIDE]; \
232 const int t2 = src[2-1*FDEC_STRIDE]; \
233 UNUSED const int t3 = src[3-1*FDEC_STRIDE];
235 #define PREDICT_4x4_LOAD_TOP_RIGHT \
236 const int t4 = src[4-1*FDEC_STRIDE]; \
237 const int t5 = src[5-1*FDEC_STRIDE]; \
238 const int t6 = src[6-1*FDEC_STRIDE]; \
239 UNUSED const int t7 = src[7-1*FDEC_STRIDE];
241 #define F1(a,b) (((a)+(b)+1)>>1)
242 #define F2(a,b,c) (((a)+2*(b)+(c)+2)>>2)
244 #ifdef ARCH_X86_64 // slower on x86
246 static void predict_4x4_ddl( uint8_t *src )
249 PREDICT_4x4_LOAD_TOP_RIGHT
250 uint32_t vec = (F2(t3,t4,t5)<< 0)
253 + (F2(t6,t7,t7)<<24);
254 *(uint32_t*)&src[3*FDEC_STRIDE] = vec;
255 *(uint32_t*)&src[2*FDEC_STRIDE] = vec = (vec<<8) + F2(t2,t3,t4);
256 *(uint32_t*)&src[1*FDEC_STRIDE] = vec = (vec<<8) + F2(t1,t2,t3);
257 *(uint32_t*)&src[0*FDEC_STRIDE] = vec = (vec<<8) + F2(t0,t1,t2);
261 static void predict_4x4_ddr( uint8_t *src )
263 const int lt = src[-1-FDEC_STRIDE];
264 PREDICT_4x4_LOAD_LEFT
266 uint32_t vec = (F2(l0,lt,t0)<< 0)
269 + (F2(t1,t2,t3)<<24);
270 *(uint32_t*)&src[0*FDEC_STRIDE] = vec;
271 *(uint32_t*)&src[1*FDEC_STRIDE] = vec = (vec<<8) + F2(l1,l0,lt);
272 *(uint32_t*)&src[2*FDEC_STRIDE] = vec = (vec<<8) + F2(l2,l1,l0);
273 *(uint32_t*)&src[3*FDEC_STRIDE] = vec = (vec<<8) + F2(l3,l2,l1);
276 static void predict_4x4_vr( uint8_t *src )
278 const int lt = src[-1-FDEC_STRIDE];
279 PREDICT_4x4_LOAD_LEFT
281 const int ltt0 = lt + t0 + 1;
282 const int t0t1 = t0 + t1 + 1;
283 const int t1t2 = t1 + t2 + 1;
284 const int t2t3 = t2 + t3 + 1;
285 const int l0lt = l0 + lt + 1;
286 const int l1l0 = l1 + l0 + 1;
287 const int l2l1 = l2 + l1 + 1;
289 src[0*FDEC_STRIDE+0]=
290 src[2*FDEC_STRIDE+1]= ltt0 >> 1;
292 src[0*FDEC_STRIDE+1]=
293 src[2*FDEC_STRIDE+2]= t0t1 >> 1;
295 src[0*FDEC_STRIDE+2]=
296 src[2*FDEC_STRIDE+3]= t1t2 >> 1;
298 src[0*FDEC_STRIDE+3]= t2t3 >> 1;
300 src[1*FDEC_STRIDE+0]=
301 src[3*FDEC_STRIDE+1]= (l0lt + ltt0) >> 2;
303 src[1*FDEC_STRIDE+1]=
304 src[3*FDEC_STRIDE+2]= (ltt0 + t0t1) >> 2;
306 src[1*FDEC_STRIDE+2]=
307 src[3*FDEC_STRIDE+3]= (t0t1 + t1t2) >> 2;
309 src[1*FDEC_STRIDE+3]= (t1t2 + t2t3) >> 2;
310 src[2*FDEC_STRIDE+0]= (l1l0 + l0lt) >> 2;
311 src[3*FDEC_STRIDE+0]= (l2l1 + l1l0) >> 2;
314 static void predict_4x4_hd( uint8_t *src )
316 const int lt= src[-1-1*FDEC_STRIDE];
317 PREDICT_4x4_LOAD_LEFT
319 const int ltt0 = lt + t0 + 1;
320 const int t0t1 = t0 + t1 + 1;
321 const int t1t2 = t1 + t2 + 1;
322 const int l0lt = l0 + lt + 1;
323 const int l1l0 = l1 + l0 + 1;
324 const int l2l1 = l2 + l1 + 1;
325 const int l3l2 = l3 + l2 + 1;
327 src[0*FDEC_STRIDE+0]=
328 src[1*FDEC_STRIDE+2]= l0lt >> 1;
329 src[0*FDEC_STRIDE+1]=
330 src[1*FDEC_STRIDE+3]= (l0lt + ltt0) >> 2;
331 src[0*FDEC_STRIDE+2]= (ltt0 + t0t1) >> 2;
332 src[0*FDEC_STRIDE+3]= (t0t1 + t1t2) >> 2;
333 src[1*FDEC_STRIDE+0]=
334 src[2*FDEC_STRIDE+2]= l1l0 >> 1;
335 src[1*FDEC_STRIDE+1]=
336 src[2*FDEC_STRIDE+3]= (l0lt + l1l0) >> 2;
337 src[2*FDEC_STRIDE+0]=
338 src[3*FDEC_STRIDE+2]= l2l1 >> 1;
339 src[2*FDEC_STRIDE+1]=
340 src[3*FDEC_STRIDE+3]= (l1l0 + l2l1) >> 2;
341 src[3*FDEC_STRIDE+0]= l3l2 >> 1;
342 src[3*FDEC_STRIDE+1]= (l2l1 + l3l2) >> 2;
346 static void predict_4x4_vl( uint8_t *src )
349 PREDICT_4x4_LOAD_TOP_RIGHT
350 const int t0t1 = t0 + t1 + 1;
351 const int t1t2 = t1 + t2 + 1;
352 const int t2t3 = t2 + t3 + 1;
353 const int t3t4 = t3 + t4 + 1;
354 const int t4t5 = t4 + t5 + 1;
355 const int t5t6 = t5 + t6 + 1;
357 src[0*FDEC_STRIDE+0]= t0t1 >> 1;
358 src[0*FDEC_STRIDE+1]=
359 src[2*FDEC_STRIDE+0]= t1t2 >> 1;
360 src[0*FDEC_STRIDE+2]=
361 src[2*FDEC_STRIDE+1]= t2t3 >> 1;
362 src[0*FDEC_STRIDE+3]=
363 src[2*FDEC_STRIDE+2]= t3t4 >> 1;
364 src[2*FDEC_STRIDE+3]= t4t5 >> 1;
365 src[1*FDEC_STRIDE+0]= (t0t1 + t1t2) >> 2;
366 src[1*FDEC_STRIDE+1]=
367 src[3*FDEC_STRIDE+0]= (t1t2 + t2t3) >> 2;
368 src[1*FDEC_STRIDE+2]=
369 src[3*FDEC_STRIDE+1]= (t2t3 + t3t4) >> 2;
370 src[1*FDEC_STRIDE+3]=
371 src[3*FDEC_STRIDE+2]= (t3t4 + t4t5) >> 2;
372 src[3*FDEC_STRIDE+3]= (t4t5 + t5t6) >> 2;
376 static void predict_4x4_hu( uint8_t *src )
378 PREDICT_4x4_LOAD_LEFT
379 const int l1l0 = l1 + l0 + 1;
380 const int l2l1 = l2 + l1 + 1;
381 const int l3l2 = l3 + l2 + 1;
383 src[0*FDEC_STRIDE+0]= l1l0 >> 1;
384 src[0*FDEC_STRIDE+1]= (l1l0 + l2l1) >> 2;
386 src[0*FDEC_STRIDE+2]=
387 src[1*FDEC_STRIDE+0]= l2l1 >> 1;
389 src[0*FDEC_STRIDE+3]=
390 src[1*FDEC_STRIDE+1]= (l2l1 + l3l2) >> 2;
392 src[1*FDEC_STRIDE+2]=
393 src[2*FDEC_STRIDE+0]= l3l2 >> 1;
395 src[1*FDEC_STRIDE+3]=
396 src[2*FDEC_STRIDE+1]= (l2 + 3*l3 + 2) >> 2;
398 src[2*FDEC_STRIDE+3]=
399 src[3*FDEC_STRIDE+1]=
400 src[3*FDEC_STRIDE+0]=
401 src[2*FDEC_STRIDE+2]=
402 src[3*FDEC_STRIDE+2]=
403 src[3*FDEC_STRIDE+3]= l3;
407 /****************************************************************************
408 * 8x8 prediction for intra luma block
409 ****************************************************************************/
412 UNUSED int l##y = edge[14-y];
414 UNUSED int t##x = edge[16+x];
415 #define PREDICT_8x8_LOAD_TOPLEFT \
417 #define PREDICT_8x8_LOAD_LEFT \
418 PL(0) PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) PL(7)
419 #define PREDICT_8x8_LOAD_TOP \
420 PT(0) PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) PT(7)
422 #define PREDICT_8x8_DC(v) \
424 for( y = 0; y < 8; y++ ) { \
425 ((uint32_t*)src)[0] = \
426 ((uint32_t*)src)[1] = v; \
427 src += FDEC_STRIDE; \
430 #define SRC(x,y) src[(x)+(y)*FDEC_STRIDE]
432 static void predict_8x8_vr_mmxext( uint8_t *src, uint8_t edge[33] )
434 predict_8x8_vr_core_mmxext( src, edge );
436 PREDICT_8x8_LOAD_TOPLEFT
437 PREDICT_8x8_LOAD_LEFT
438 SRC(0,2)=SRC(1,4)=SRC(2,6)= (l1 + 2*l0 + lt + 2) >> 2;
439 SRC(0,3)=SRC(1,5)=SRC(2,7)= (l2 + 2*l1 + l0 + 2) >> 2;
440 SRC(0,4)=SRC(1,6)= (l3 + 2*l2 + l1 + 2) >> 2;
441 SRC(0,5)=SRC(1,7)= (l4 + 2*l3 + l2 + 2) >> 2;
442 SRC(0,6)= (l5 + 2*l4 + l3 + 2) >> 2;
443 SRC(0,7)= (l6 + 2*l5 + l4 + 2) >> 2;
447 #define SUMSUB(a,b,c,d,e,f,g,h)\
453 #define INTRA_SA8D_X3(cpu) \
454 void x264_intra_sa8d_x3_8x8_##cpu( uint8_t *fenc, uint8_t edge[33], int res[3] )\
456 PREDICT_8x8_LOAD_TOP\
457 PREDICT_8x8_LOAD_LEFT\
459 DECLARE_ALIGNED( int16_t, sa8d_1d[2][8], 16 );\
460 SUMSUB(l0,l4,l1,l5,l2,l6,l3,l7);\
461 SUMSUB(l0,l2,l1,l3,l4,l6,l5,l7);\
462 SUMSUB(l0,l1,l2,l3,l4,l5,l6,l7);\
471 SUMSUB(t0,t4,t1,t5,t2,t6,t3,t7);\
472 SUMSUB(t0,t2,t1,t3,t4,t6,t5,t7);\
473 SUMSUB(t0,t1,t2,t3,t4,t5,t6,t7);\
482 x264_intra_sa8d_x3_8x8_core_##cpu( fenc, sa8d_1d, res );\
489 INTRA_SA8D_X3(mmxext)
492 /****************************************************************************
493 * Exported functions:
494 ****************************************************************************/
495 void x264_predict_16x16_init_mmx( int cpu, x264_predict_t pf[7] )
497 if( !(cpu&X264_CPU_MMX) )
500 pf[I_PRED_16x16_H] = predict_16x16_h;
501 pf[I_PRED_16x16_DC_LEFT] = predict_16x16_dc_left;
503 pf[I_PRED_16x16_V] = predict_16x16_v_mmx;
504 if( !(cpu&X264_CPU_MMXEXT) )
506 pf[I_PRED_16x16_DC] = predict_16x16_dc_mmxext;
507 pf[I_PRED_16x16_DC_TOP] = predict_16x16_dc_top_mmxext;
508 pf[I_PRED_16x16_P] = predict_16x16_p_mmxext;
509 if( !(cpu&X264_CPU_SSE2) || (cpu&X264_CPU_3DNOW) )
511 pf[I_PRED_16x16_DC] = predict_16x16_dc_sse2;
512 pf[I_PRED_16x16_DC_TOP] = predict_16x16_dc_top_sse2;
513 pf[I_PRED_16x16_V] = predict_16x16_v_sse2;
514 pf[I_PRED_16x16_P] = predict_16x16_p_sse2;
517 void x264_predict_8x8c_init_mmx( int cpu, x264_predict_t pf[7] )
519 if( !(cpu&X264_CPU_MMX) )
522 pf[I_PRED_CHROMA_H] = predict_8x8c_h;
523 pf[I_PRED_CHROMA_DC_LEFT] = predict_8x8c_dc_left;
524 pf[I_PRED_CHROMA_DC_TOP] = predict_8x8c_dc_top;
526 pf[I_PRED_CHROMA_V] = predict_8x8c_v_mmx;
527 if( !(cpu&X264_CPU_MMXEXT) )
529 pf[I_PRED_CHROMA_P] = predict_8x8c_p_mmxext;
530 pf[I_PRED_CHROMA_DC] = predict_8x8c_dc_mmxext;
533 void x264_predict_8x8_init_mmx( int cpu, x264_predict8x8_t pf[12] )
535 if( !(cpu&X264_CPU_MMXEXT) )
537 pf[I_PRED_8x8_V] = predict_8x8_v_mmxext;
538 pf[I_PRED_8x8_DC] = predict_8x8_dc_mmxext;
539 pf[I_PRED_8x8_DC_TOP] = predict_8x8_dc_top_mmxext;
540 pf[I_PRED_8x8_DC_LEFT]= predict_8x8_dc_left_mmxext;
541 pf[I_PRED_8x8_VR] = predict_8x8_vr_mmxext;
543 pf[I_PRED_8x8_DDL] = predict_8x8_ddl_mmxext;
544 pf[I_PRED_8x8_DDR] = predict_8x8_ddr_mmxext;
546 if( !(cpu&X264_CPU_SSE2) )
548 pf[I_PRED_8x8_DDL] = predict_8x8_ddl_sse2;
549 pf[I_PRED_8x8_VL] = predict_8x8_vl_sse2;
550 pf[I_PRED_8x8_DDR] = predict_8x8_ddr_sse2;
553 void x264_predict_4x4_init_mmx( int cpu, x264_predict_t pf[12] )
555 if( !(cpu&X264_CPU_MMX) )
558 pf[I_PRED_4x4_DDR] = predict_4x4_ddr;
559 pf[I_PRED_4x4_VR] = predict_4x4_vr;
560 pf[I_PRED_4x4_HD] = predict_4x4_hd;
561 pf[I_PRED_4x4_HU] = predict_4x4_hu;
563 if( !(cpu&X264_CPU_MMXEXT) )
565 pf[I_PRED_4x4_DDL] = predict_4x4_ddl_mmxext;
566 pf[I_PRED_4x4_VL] = predict_4x4_vl_mmxext;