1 /*****************************************************************************
2 * predict.c: h264 encoder
3 *****************************************************************************
4 * Copyright (C) 2003 Laurent Aimar
5 * $Id: predict.c,v 1.1 2004/06/03 19:27:07 fenrir Exp $
7 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
8 * Loren Merritt <lorenm@u.washington.edu>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
23 *****************************************************************************/
25 /* predict4x4 are inspired from ffmpeg h264 decoder */
32 #undef HAVE_MMX /* not finished now */
35 # include "i386/predict.h"
38 # include "ppc/predict.h"
41 /****************************************************************************
42 * 16x16 prediction for intra luma block
43 ****************************************************************************/
45 #define PREDICT_16x16_DC(v) \
46 for( i = 0; i < 16; i++ )\
48 uint32_t *p = (uint32_t*)src;\
56 static void predict_16x16_dc( uint8_t *src )
61 for( i = 0; i < 16; i++ )
63 dc += src[-1 + i * FDEC_STRIDE];
64 dc += src[i - FDEC_STRIDE];
66 dc = (( dc + 16 ) >> 5) * 0x01010101;
70 static void predict_16x16_dc_left( uint8_t *src )
75 for( i = 0; i < 16; i++ )
77 dc += src[-1 + i * FDEC_STRIDE];
79 dc = (( dc + 8 ) >> 4) * 0x01010101;
83 static void predict_16x16_dc_top( uint8_t *src )
88 for( i = 0; i < 16; i++ )
90 dc += src[i - FDEC_STRIDE];
92 dc = (( dc + 8 ) >> 4) * 0x01010101;
96 static void predict_16x16_dc_128( uint8_t *src )
99 PREDICT_16x16_DC(0x80808080);
101 static void predict_16x16_h( uint8_t *src )
105 for( i = 0; i < 16; i++ )
107 const uint32_t v = 0x01010101 * src[-1];
108 uint32_t *p = (uint32_t*)src;
119 static void predict_16x16_v( uint8_t *src )
121 uint32_t v0 = *(uint32_t*)&src[ 0-FDEC_STRIDE];
122 uint32_t v1 = *(uint32_t*)&src[ 4-FDEC_STRIDE];
123 uint32_t v2 = *(uint32_t*)&src[ 8-FDEC_STRIDE];
124 uint32_t v3 = *(uint32_t*)&src[12-FDEC_STRIDE];
127 for( i = 0; i < 16; i++ )
129 uint32_t *p = (uint32_t*)src;
137 static void predict_16x16_p( uint8_t *src )
145 /* calculate H and V */
146 for( i = 0; i <= 7; i++ )
148 H += ( i + 1 ) * ( src[ 8 + i - FDEC_STRIDE ] - src[6 -i -FDEC_STRIDE] );
149 V += ( i + 1 ) * ( src[-1 + (8+i)*FDEC_STRIDE] - src[-1 + (6-i)*FDEC_STRIDE] );
152 a = 16 * ( src[-1 + 15*FDEC_STRIDE] + src[15 - FDEC_STRIDE] );
153 b = ( 5 * H + 32 ) >> 6;
154 c = ( 5 * V + 32 ) >> 6;
156 i00 = a - b * 7 - c * 7 + 16;
158 for( y = 0; y < 16; y++ )
161 for( x = 0; x < 16; x++ )
163 src[x] = x264_clip_uint8( pix>>5 );
172 /****************************************************************************
173 * 8x8 prediction for intra chroma block
174 ****************************************************************************/
176 static void predict_8x8c_dc_128( uint8_t *src )
180 for( y = 0; y < 8; y++ )
182 uint32_t *p = (uint32_t*)src;
188 static void predict_8x8c_dc_left( uint8_t *src )
191 uint32_t dc0 = 0, dc1 = 0;
193 for( y = 0; y < 4; y++ )
195 dc0 += src[y * FDEC_STRIDE - 1];
196 dc1 += src[(y+4) * FDEC_STRIDE - 1];
198 dc0 = (( dc0 + 2 ) >> 2)*0x01010101;
199 dc1 = (( dc1 + 2 ) >> 2)*0x01010101;
201 for( y = 0; y < 4; y++ )
203 uint32_t *p = (uint32_t*)src;
208 for( y = 0; y < 4; y++ )
210 uint32_t *p = (uint32_t*)src;
217 static void predict_8x8c_dc_top( uint8_t *src )
220 uint32_t dc0 = 0, dc1 = 0;
222 for( x = 0; x < 4; x++ )
224 dc0 += src[x - FDEC_STRIDE];
225 dc1 += src[x + 4 - FDEC_STRIDE];
227 dc0 = (( dc0 + 2 ) >> 2)*0x01010101;
228 dc1 = (( dc1 + 2 ) >> 2)*0x01010101;
230 for( y = 0; y < 8; y++ )
232 uint32_t *p = (uint32_t*)src;
238 static void predict_8x8c_dc( uint8_t *src )
241 int s0 = 0, s1 = 0, s2 = 0, s3 = 0;
242 uint32_t dc0, dc1, dc2, dc3;
250 for( i = 0; i < 4; i++ )
252 s0 += src[i - FDEC_STRIDE];
253 s1 += src[i + 4 - FDEC_STRIDE];
254 s2 += src[-1 + i * FDEC_STRIDE];
255 s3 += src[-1 + (i+4)*FDEC_STRIDE];
261 dc0 = (( s0 + s2 + 4 ) >> 3)*0x01010101;
262 dc1 = (( s1 + 2 ) >> 2)*0x01010101;
263 dc2 = (( s3 + 2 ) >> 2)*0x01010101;
264 dc3 = (( s1 + s3 + 4 ) >> 3)*0x01010101;
266 for( y = 0; y < 4; y++ )
268 uint32_t *p = (uint32_t*)src;
274 for( y = 0; y < 4; y++ )
276 uint32_t *p = (uint32_t*)src;
282 static void predict_8x8c_h( uint8_t *src )
286 for( i = 0; i < 8; i++ )
288 uint32_t v = 0x01010101 * src[-1];
289 uint32_t *p = (uint32_t*)src;
295 static void predict_8x8c_v( uint8_t *src )
297 uint32_t v0 = *(uint32_t*)&src[0-FDEC_STRIDE];
298 uint32_t v1 = *(uint32_t*)&src[4-FDEC_STRIDE];
301 for( i = 0; i < 8; i++ )
303 uint32_t *p = (uint32_t*)src;
309 static void predict_8x8c_p( uint8_t *src )
318 for( i = 0; i < 4; i++ )
320 H += ( i + 1 ) * ( src[4+i - FDEC_STRIDE] - src[2 - i -FDEC_STRIDE] );
321 V += ( i + 1 ) * ( src[-1 +(i+4)*FDEC_STRIDE] - src[-1+(2-i)*FDEC_STRIDE] );
324 a = 16 * ( src[-1+7*FDEC_STRIDE] + src[7 - FDEC_STRIDE] );
325 b = ( 17 * H + 16 ) >> 5;
326 c = ( 17 * V + 16 ) >> 5;
327 i00 = a -3*b -3*c + 16;
329 for( y = 0; y < 8; y++ )
332 for( x = 0; x < 8; x++ )
334 src[x] = x264_clip_uint8( pix>>5 );
342 /****************************************************************************
343 * 4x4 prediction for intra luma block
344 ****************************************************************************/
346 #define PREDICT_4x4_DC(v) \
348 *(uint32_t*)&src[0*FDEC_STRIDE] =\
349 *(uint32_t*)&src[1*FDEC_STRIDE] =\
350 *(uint32_t*)&src[2*FDEC_STRIDE] =\
351 *(uint32_t*)&src[3*FDEC_STRIDE] = v;\
354 static void predict_4x4_dc_128( uint8_t *src )
356 PREDICT_4x4_DC(0x80808080);
358 static void predict_4x4_dc_left( uint8_t *src )
360 uint32_t dc = (( src[-1+0*FDEC_STRIDE] + src[-1+FDEC_STRIDE]+
361 src[-1+2*FDEC_STRIDE] + src[-1+3*FDEC_STRIDE] + 2 ) >> 2)*0x01010101;
364 static void predict_4x4_dc_top( uint8_t *src )
366 uint32_t dc = (( src[0 - FDEC_STRIDE] + src[1 - FDEC_STRIDE] +
367 src[2 - FDEC_STRIDE] + src[3 - FDEC_STRIDE] + 2 ) >> 2)*0x01010101;
370 static void predict_4x4_dc( uint8_t *src )
372 uint32_t dc = (( src[-1+0*FDEC_STRIDE] + src[-1+FDEC_STRIDE] +
373 src[-1+2*FDEC_STRIDE] + src[-1+3*FDEC_STRIDE] +
374 src[0 - FDEC_STRIDE] + src[1 - FDEC_STRIDE] +
375 src[2 - FDEC_STRIDE] + src[3 - FDEC_STRIDE] + 4 ) >> 3)*0x01010101;
378 static void predict_4x4_h( uint8_t *src )
380 *(uint32_t*)&src[0*FDEC_STRIDE] = src[0*FDEC_STRIDE-1] * 0x01010101;
381 *(uint32_t*)&src[1*FDEC_STRIDE] = src[1*FDEC_STRIDE-1] * 0x01010101;
382 *(uint32_t*)&src[2*FDEC_STRIDE] = src[2*FDEC_STRIDE-1] * 0x01010101;
383 *(uint32_t*)&src[3*FDEC_STRIDE] = src[3*FDEC_STRIDE-1] * 0x01010101;
385 static void predict_4x4_v( uint8_t *src )
387 uint32_t top = *((uint32_t*)&src[-FDEC_STRIDE]);
391 #define PREDICT_4x4_LOAD_LEFT \
392 const int l0 = src[-1+0*FDEC_STRIDE]; \
393 const int l1 = src[-1+1*FDEC_STRIDE]; \
394 const int l2 = src[-1+2*FDEC_STRIDE]; \
395 UNUSED const int l3 = src[-1+3*FDEC_STRIDE];
397 #define PREDICT_4x4_LOAD_TOP \
398 const int t0 = src[0-1*FDEC_STRIDE]; \
399 const int t1 = src[1-1*FDEC_STRIDE]; \
400 const int t2 = src[2-1*FDEC_STRIDE]; \
401 UNUSED const int t3 = src[3-1*FDEC_STRIDE];
403 #define PREDICT_4x4_LOAD_TOP_RIGHT \
404 const int t4 = src[4-1*FDEC_STRIDE]; \
405 const int t5 = src[5-1*FDEC_STRIDE]; \
406 const int t6 = src[6-1*FDEC_STRIDE]; \
407 UNUSED const int t7 = src[7-1*FDEC_STRIDE];
409 static void predict_4x4_ddl( uint8_t *src )
412 PREDICT_4x4_LOAD_TOP_RIGHT
414 src[0*FDEC_STRIDE+0] = ( t0 + 2*t1 + t2 + 2 ) >> 2;
416 src[0*FDEC_STRIDE+1] =
417 src[1*FDEC_STRIDE+0] = ( t1 + 2*t2 + t3 + 2 ) >> 2;
419 src[0*FDEC_STRIDE+2] =
420 src[1*FDEC_STRIDE+1] =
421 src[2*FDEC_STRIDE+0] = ( t2 + 2*t3 + t4 + 2 ) >> 2;
423 src[0*FDEC_STRIDE+3] =
424 src[1*FDEC_STRIDE+2] =
425 src[2*FDEC_STRIDE+1] =
426 src[3*FDEC_STRIDE+0] = ( t3 + 2*t4 + t5 + 2 ) >> 2;
428 src[1*FDEC_STRIDE+3] =
429 src[2*FDEC_STRIDE+2] =
430 src[3*FDEC_STRIDE+1] = ( t4 + 2*t5 + t6 + 2 ) >> 2;
432 src[2*FDEC_STRIDE+3] =
433 src[3*FDEC_STRIDE+2] = ( t5 + 2*t6 + t7 + 2 ) >> 2;
435 src[3*FDEC_STRIDE+3] = ( t6 + 3*t7 + 2 ) >> 2;
437 static void predict_4x4_ddr( uint8_t *src )
439 const int lt = src[-1-FDEC_STRIDE];
440 PREDICT_4x4_LOAD_LEFT
443 src[0*FDEC_STRIDE+0] =
444 src[1*FDEC_STRIDE+1] =
445 src[2*FDEC_STRIDE+2] =
446 src[3*FDEC_STRIDE+3] = ( t0 + 2 * lt + l0 + 2 ) >> 2;
448 src[0*FDEC_STRIDE+1] =
449 src[1*FDEC_STRIDE+2] =
450 src[2*FDEC_STRIDE+3] = ( lt + 2 * t0 + t1 + 2 ) >> 2;
452 src[0*FDEC_STRIDE+2] =
453 src[1*FDEC_STRIDE+3] = ( t0 + 2 * t1 + t2 + 2 ) >> 2;
455 src[0*FDEC_STRIDE+3] = ( t1 + 2 * t2 + t3 + 2 ) >> 2;
457 src[1*FDEC_STRIDE+0] =
458 src[2*FDEC_STRIDE+1] =
459 src[3*FDEC_STRIDE+2] = ( lt + 2 * l0 + l1 + 2 ) >> 2;
461 src[2*FDEC_STRIDE+0] =
462 src[3*FDEC_STRIDE+1] = ( l0 + 2 * l1 + l2 + 2 ) >> 2;
464 src[3*FDEC_STRIDE+0] = ( l1 + 2 * l2 + l3 + 2 ) >> 2;
467 static void predict_4x4_vr( uint8_t *src )
469 const int lt = src[-1-FDEC_STRIDE];
470 PREDICT_4x4_LOAD_LEFT
473 src[0*FDEC_STRIDE+0]=
474 src[2*FDEC_STRIDE+1]= ( lt + t0 + 1 ) >> 1;
476 src[0*FDEC_STRIDE+1]=
477 src[2*FDEC_STRIDE+2]= ( t0 + t1 + 1 ) >> 1;
479 src[0*FDEC_STRIDE+2]=
480 src[2*FDEC_STRIDE+3]= ( t1 + t2 + 1 ) >> 1;
482 src[0*FDEC_STRIDE+3]= ( t2 + t3 + 1 ) >> 1;
484 src[1*FDEC_STRIDE+0]=
485 src[3*FDEC_STRIDE+1]= ( l0 + 2 * lt + t0 + 2 ) >> 2;
487 src[1*FDEC_STRIDE+1]=
488 src[3*FDEC_STRIDE+2]= ( lt + 2 * t0 + t1 + 2 ) >> 2;
490 src[1*FDEC_STRIDE+2]=
491 src[3*FDEC_STRIDE+3]= ( t0 + 2 * t1 + t2 + 2) >> 2;
493 src[1*FDEC_STRIDE+3]= ( t1 + 2 * t2 + t3 + 2 ) >> 2;
494 src[2*FDEC_STRIDE+0]= ( lt + 2 * l0 + l1 + 2 ) >> 2;
495 src[3*FDEC_STRIDE+0]= ( l0 + 2 * l1 + l2 + 2 ) >> 2;
498 static void predict_4x4_hd( uint8_t *src )
500 const int lt= src[-1-1*FDEC_STRIDE];
501 PREDICT_4x4_LOAD_LEFT
504 src[0*FDEC_STRIDE+0]=
505 src[1*FDEC_STRIDE+2]= ( lt + l0 + 1 ) >> 1;
506 src[0*FDEC_STRIDE+1]=
507 src[1*FDEC_STRIDE+3]= ( l0 + 2 * lt + t0 + 2 ) >> 2;
508 src[0*FDEC_STRIDE+2]= ( lt + 2 * t0 + t1 + 2 ) >> 2;
509 src[0*FDEC_STRIDE+3]= ( t0 + 2 * t1 + t2 + 2 ) >> 2;
510 src[1*FDEC_STRIDE+0]=
511 src[2*FDEC_STRIDE+2]= ( l0 + l1 + 1 ) >> 1;
512 src[1*FDEC_STRIDE+1]=
513 src[2*FDEC_STRIDE+3]= ( lt + 2 * l0 + l1 + 2 ) >> 2;
514 src[2*FDEC_STRIDE+0]=
515 src[3*FDEC_STRIDE+2]= ( l1 + l2+ 1 ) >> 1;
516 src[2*FDEC_STRIDE+1]=
517 src[3*FDEC_STRIDE+3]= ( l0 + 2 * l1 + l2 + 2 ) >> 2;
518 src[3*FDEC_STRIDE+0]= ( l2 + l3 + 1 ) >> 1;
519 src[3*FDEC_STRIDE+1]= ( l1 + 2 * l2 + l3 + 2 ) >> 2;
522 static void predict_4x4_vl( uint8_t *src )
525 PREDICT_4x4_LOAD_TOP_RIGHT
527 src[0*FDEC_STRIDE+0]= ( t0 + t1 + 1 ) >> 1;
528 src[0*FDEC_STRIDE+1]=
529 src[2*FDEC_STRIDE+0]= ( t1 + t2 + 1 ) >> 1;
530 src[0*FDEC_STRIDE+2]=
531 src[2*FDEC_STRIDE+1]= ( t2 + t3 + 1 ) >> 1;
532 src[0*FDEC_STRIDE+3]=
533 src[2*FDEC_STRIDE+2]= ( t3 + t4 + 1 ) >> 1;
534 src[2*FDEC_STRIDE+3]= ( t4 + t5 + 1 ) >> 1;
535 src[1*FDEC_STRIDE+0]= ( t0 + 2 * t1 + t2 + 2 ) >> 2;
536 src[1*FDEC_STRIDE+1]=
537 src[3*FDEC_STRIDE+0]= ( t1 + 2 * t2 + t3 + 2 ) >> 2;
538 src[1*FDEC_STRIDE+2]=
539 src[3*FDEC_STRIDE+1]= ( t2 + 2 * t3 + t4 + 2 ) >> 2;
540 src[1*FDEC_STRIDE+3]=
541 src[3*FDEC_STRIDE+2]= ( t3 + 2 * t4 + t5 + 2 ) >> 2;
542 src[3*FDEC_STRIDE+3]= ( t4 + 2 * t5 + t6 + 2 ) >> 2;
545 static void predict_4x4_hu( uint8_t *src )
547 PREDICT_4x4_LOAD_LEFT
549 src[0*FDEC_STRIDE+0]= ( l0 + l1 + 1 ) >> 1;
550 src[0*FDEC_STRIDE+1]= ( l0 + 2 * l1 + l2 + 2 ) >> 2;
552 src[0*FDEC_STRIDE+2]=
553 src[1*FDEC_STRIDE+0]= ( l1 + l2 + 1 ) >> 1;
555 src[0*FDEC_STRIDE+3]=
556 src[1*FDEC_STRIDE+1]= ( l1 + 2*l2 + l3 + 2 ) >> 2;
558 src[1*FDEC_STRIDE+2]=
559 src[2*FDEC_STRIDE+0]= ( l2 + l3 + 1 ) >> 1;
561 src[1*FDEC_STRIDE+3]=
562 src[2*FDEC_STRIDE+1]= ( l2 + 2 * l3 + l3 + 2 ) >> 2;
564 src[2*FDEC_STRIDE+3]=
565 src[3*FDEC_STRIDE+1]=
566 src[3*FDEC_STRIDE+0]=
567 src[2*FDEC_STRIDE+2]=
568 src[3*FDEC_STRIDE+2]=
569 src[3*FDEC_STRIDE+3]= l3;
572 /****************************************************************************
573 * 8x8 prediction for intra luma block
574 ****************************************************************************/
576 #define SRC(x,y) src[(x)+(y)*FDEC_STRIDE]
578 edge[14-y] = (SRC(-1,y-1) + 2*SRC(-1,y) + SRC(-1,y+1) + 2) >> 2;
580 edge[16+x] = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
582 void x264_predict_8x8_filter( uint8_t *src, uint8_t edge[33], int i_neighbor, int i_filters )
584 /* edge[7..14] = l7..l0
586 * edge[16..31] = t0 .. t15
589 int have_lt = i_neighbor & MB_TOPLEFT;
590 if( i_filters & MB_LEFT )
592 edge[15] = (SRC(-1,0) + 2*SRC(-1,-1) + SRC(0,-1) + 2) >> 2;
593 edge[14] = ((have_lt ? SRC(-1,-1) : SRC(-1,0))
594 + 2*SRC(-1,0) + SRC(-1,1) + 2) >> 2;
595 PL(1) PL(2) PL(3) PL(4) PL(5) PL(6)
596 edge[7] = (SRC(-1,6) + 3*SRC(-1,7) + 2) >> 2;
599 if( i_filters & MB_TOP )
601 int have_tr = i_neighbor & MB_TOPRIGHT;
602 edge[16] = ((have_lt ? SRC(-1,-1) : SRC(0,-1))
603 + 2*SRC(0,-1) + SRC(1,-1) + 2) >> 2;
604 PT(1) PT(2) PT(3) PT(4) PT(5) PT(6)
605 edge[23] = ((have_tr ? SRC(8,-1) : SRC(7,-1))
606 + 2*SRC(7,-1) + SRC(6,-1) + 2) >> 2;
608 if( i_filters & MB_TOPRIGHT )
612 PT(8) PT(9) PT(10) PT(11) PT(12) PT(13) PT(14)
614 edge[32] = (SRC(14,-1) + 3*SRC(15,-1) + 2) >> 2;
618 *(uint64_t*)(edge+24) = SRC(7,-1) * 0x0101010101010101ULL;
619 edge[32] = SRC(7,-1);
629 UNUSED const int l##y = edge[14-y];
631 UNUSED const int t##x = edge[16+x];
632 #define PREDICT_8x8_LOAD_TOPLEFT \
633 const int lt = edge[15];
634 #define PREDICT_8x8_LOAD_LEFT \
635 PL(0) PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) PL(7)
636 #define PREDICT_8x8_LOAD_TOP \
637 PT(0) PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) PT(7)
638 #define PREDICT_8x8_LOAD_TOPRIGHT \
639 PT(8) PT(9) PT(10) PT(11) PT(12) PT(13) PT(14) PT(15)
641 #define PREDICT_8x8_DC(v) \
643 for( y = 0; y < 8; y++ ) { \
644 ((uint32_t*)src)[0] = \
645 ((uint32_t*)src)[1] = v; \
646 src += FDEC_STRIDE; \
649 static void predict_8x8_dc_128( uint8_t *src, uint8_t edge[33] )
651 PREDICT_8x8_DC(0x80808080);
653 static void predict_8x8_dc_left( uint8_t *src, uint8_t edge[33] )
655 PREDICT_8x8_LOAD_LEFT
656 const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7+4) >> 3) * 0x01010101;
659 static void predict_8x8_dc_top( uint8_t *src, uint8_t edge[33] )
662 const uint32_t dc = ((t0+t1+t2+t3+t4+t5+t6+t7+4) >> 3) * 0x01010101;
665 static void predict_8x8_dc( uint8_t *src, uint8_t edge[33] )
667 PREDICT_8x8_LOAD_LEFT
669 const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7
670 +t0+t1+t2+t3+t4+t5+t6+t7+8) >> 4) * 0x01010101;
673 static void predict_8x8_h( uint8_t *src, uint8_t edge[33] )
675 PREDICT_8x8_LOAD_LEFT
676 #define ROW(y) ((uint32_t*)(src+y*FDEC_STRIDE))[0] =\
677 ((uint32_t*)(src+y*FDEC_STRIDE))[1] = 0x01010101U * l##y
678 ROW(0); ROW(1); ROW(2); ROW(3); ROW(4); ROW(5); ROW(6); ROW(7);
681 static void predict_8x8_v( uint8_t *src, uint8_t edge[33] )
683 const uint64_t top = *(uint64_t*)(edge+16);
685 for( y = 0; y < 8; y++ )
686 *(uint64_t*)(src+y*FDEC_STRIDE) = top;
688 static void predict_8x8_ddl( uint8_t *src, uint8_t edge[33] )
691 PREDICT_8x8_LOAD_TOPRIGHT
692 SRC(0,0)= (t0 + 2*t1 + t2 + 2) >> 2;
693 SRC(0,1)=SRC(1,0)= (t1 + 2*t2 + t3 + 2) >> 2;
694 SRC(0,2)=SRC(1,1)=SRC(2,0)= (t2 + 2*t3 + t4 + 2) >> 2;
695 SRC(0,3)=SRC(1,2)=SRC(2,1)=SRC(3,0)= (t3 + 2*t4 + t5 + 2) >> 2;
696 SRC(0,4)=SRC(1,3)=SRC(2,2)=SRC(3,1)=SRC(4,0)= (t4 + 2*t5 + t6 + 2) >> 2;
697 SRC(0,5)=SRC(1,4)=SRC(2,3)=SRC(3,2)=SRC(4,1)=SRC(5,0)= (t5 + 2*t6 + t7 + 2) >> 2;
698 SRC(0,6)=SRC(1,5)=SRC(2,4)=SRC(3,3)=SRC(4,2)=SRC(5,1)=SRC(6,0)= (t6 + 2*t7 + t8 + 2) >> 2;
699 SRC(0,7)=SRC(1,6)=SRC(2,5)=SRC(3,4)=SRC(4,3)=SRC(5,2)=SRC(6,1)=SRC(7,0)= (t7 + 2*t8 + t9 + 2) >> 2;
700 SRC(1,7)=SRC(2,6)=SRC(3,5)=SRC(4,4)=SRC(5,3)=SRC(6,2)=SRC(7,1)= (t8 + 2*t9 + t10 + 2) >> 2;
701 SRC(2,7)=SRC(3,6)=SRC(4,5)=SRC(5,4)=SRC(6,3)=SRC(7,2)= (t9 + 2*t10 + t11 + 2) >> 2;
702 SRC(3,7)=SRC(4,6)=SRC(5,5)=SRC(6,4)=SRC(7,3)= (t10 + 2*t11 + t12 + 2) >> 2;
703 SRC(4,7)=SRC(5,6)=SRC(6,5)=SRC(7,4)= (t11 + 2*t12 + t13 + 2) >> 2;
704 SRC(5,7)=SRC(6,6)=SRC(7,5)= (t12 + 2*t13 + t14 + 2) >> 2;
705 SRC(6,7)=SRC(7,6)= (t13 + 2*t14 + t15 + 2) >> 2;
706 SRC(7,7)= (t14 + 3*t15 + 2) >> 2;
708 static void predict_8x8_ddr( uint8_t *src, uint8_t edge[33] )
711 PREDICT_8x8_LOAD_LEFT
712 PREDICT_8x8_LOAD_TOPLEFT
713 SRC(0,7)= (l7 + 2*l6 + l5 + 2) >> 2;
714 SRC(0,6)=SRC(1,7)= (l6 + 2*l5 + l4 + 2) >> 2;
715 SRC(0,5)=SRC(1,6)=SRC(2,7)= (l5 + 2*l4 + l3 + 2) >> 2;
716 SRC(0,4)=SRC(1,5)=SRC(2,6)=SRC(3,7)= (l4 + 2*l3 + l2 + 2) >> 2;
717 SRC(0,3)=SRC(1,4)=SRC(2,5)=SRC(3,6)=SRC(4,7)= (l3 + 2*l2 + l1 + 2) >> 2;
718 SRC(0,2)=SRC(1,3)=SRC(2,4)=SRC(3,5)=SRC(4,6)=SRC(5,7)= (l2 + 2*l1 + l0 + 2) >> 2;
719 SRC(0,1)=SRC(1,2)=SRC(2,3)=SRC(3,4)=SRC(4,5)=SRC(5,6)=SRC(6,7)= (l1 + 2*l0 + lt + 2) >> 2;
720 SRC(0,0)=SRC(1,1)=SRC(2,2)=SRC(3,3)=SRC(4,4)=SRC(5,5)=SRC(6,6)=SRC(7,7)= (l0 + 2*lt + t0 + 2) >> 2;
721 SRC(1,0)=SRC(2,1)=SRC(3,2)=SRC(4,3)=SRC(5,4)=SRC(6,5)=SRC(7,6)= (lt + 2*t0 + t1 + 2) >> 2;
722 SRC(2,0)=SRC(3,1)=SRC(4,2)=SRC(5,3)=SRC(6,4)=SRC(7,5)= (t0 + 2*t1 + t2 + 2) >> 2;
723 SRC(3,0)=SRC(4,1)=SRC(5,2)=SRC(6,3)=SRC(7,4)= (t1 + 2*t2 + t3 + 2) >> 2;
724 SRC(4,0)=SRC(5,1)=SRC(6,2)=SRC(7,3)= (t2 + 2*t3 + t4 + 2) >> 2;
725 SRC(5,0)=SRC(6,1)=SRC(7,2)= (t3 + 2*t4 + t5 + 2) >> 2;
726 SRC(6,0)=SRC(7,1)= (t4 + 2*t5 + t6 + 2) >> 2;
727 SRC(7,0)= (t5 + 2*t6 + t7 + 2) >> 2;
730 static void predict_8x8_vr( uint8_t *src, uint8_t edge[33] )
733 PREDICT_8x8_LOAD_LEFT
734 PREDICT_8x8_LOAD_TOPLEFT
735 SRC(0,6)= (l5 + 2*l4 + l3 + 2) >> 2;
736 SRC(0,7)= (l6 + 2*l5 + l4 + 2) >> 2;
737 SRC(0,4)=SRC(1,6)= (l3 + 2*l2 + l1 + 2) >> 2;
738 SRC(0,5)=SRC(1,7)= (l4 + 2*l3 + l2 + 2) >> 2;
739 SRC(0,2)=SRC(1,4)=SRC(2,6)= (l1 + 2*l0 + lt + 2) >> 2;
740 SRC(0,3)=SRC(1,5)=SRC(2,7)= (l2 + 2*l1 + l0 + 2) >> 2;
741 SRC(0,1)=SRC(1,3)=SRC(2,5)=SRC(3,7)= (l0 + 2*lt + t0 + 2) >> 2;
742 SRC(0,0)=SRC(1,2)=SRC(2,4)=SRC(3,6)= (lt + t0 + 1) >> 1;
743 SRC(1,1)=SRC(2,3)=SRC(3,5)=SRC(4,7)= (lt + 2*t0 + t1 + 2) >> 2;
744 SRC(1,0)=SRC(2,2)=SRC(3,4)=SRC(4,6)= (t0 + t1 + 1) >> 1;
745 SRC(2,1)=SRC(3,3)=SRC(4,5)=SRC(5,7)= (t0 + 2*t1 + t2 + 2) >> 2;
746 SRC(2,0)=SRC(3,2)=SRC(4,4)=SRC(5,6)= (t1 + t2 + 1) >> 1;
747 SRC(3,1)=SRC(4,3)=SRC(5,5)=SRC(6,7)= (t1 + 2*t2 + t3 + 2) >> 2;
748 SRC(3,0)=SRC(4,2)=SRC(5,4)=SRC(6,6)= (t2 + t3 + 1) >> 1;
749 SRC(4,1)=SRC(5,3)=SRC(6,5)=SRC(7,7)= (t2 + 2*t3 + t4 + 2) >> 2;
750 SRC(4,0)=SRC(5,2)=SRC(6,4)=SRC(7,6)= (t3 + t4 + 1) >> 1;
751 SRC(5,1)=SRC(6,3)=SRC(7,5)= (t3 + 2*t4 + t5 + 2) >> 2;
752 SRC(5,0)=SRC(6,2)=SRC(7,4)= (t4 + t5 + 1) >> 1;
753 SRC(6,1)=SRC(7,3)= (t4 + 2*t5 + t6 + 2) >> 2;
754 SRC(6,0)=SRC(7,2)= (t5 + t6 + 1) >> 1;
755 SRC(7,1)= (t5 + 2*t6 + t7 + 2) >> 2;
756 SRC(7,0)= (t6 + t7 + 1) >> 1;
758 static void predict_8x8_hd( uint8_t *src, uint8_t edge[33] )
761 PREDICT_8x8_LOAD_LEFT
762 PREDICT_8x8_LOAD_TOPLEFT
763 SRC(0,7)= (l6 + l7 + 1) >> 1;
764 SRC(1,7)= (l5 + 2*l6 + l7 + 2) >> 2;
765 SRC(0,6)=SRC(2,7)= (l5 + l6 + 1) >> 1;
766 SRC(1,6)=SRC(3,7)= (l4 + 2*l5 + l6 + 2) >> 2;
767 SRC(0,5)=SRC(2,6)=SRC(4,7)= (l4 + l5 + 1) >> 1;
768 SRC(1,5)=SRC(3,6)=SRC(5,7)= (l3 + 2*l4 + l5 + 2) >> 2;
769 SRC(0,4)=SRC(2,5)=SRC(4,6)=SRC(6,7)= (l3 + l4 + 1) >> 1;
770 SRC(1,4)=SRC(3,5)=SRC(5,6)=SRC(7,7)= (l2 + 2*l3 + l4 + 2) >> 2;
771 SRC(0,3)=SRC(2,4)=SRC(4,5)=SRC(6,6)= (l2 + l3 + 1) >> 1;
772 SRC(1,3)=SRC(3,4)=SRC(5,5)=SRC(7,6)= (l1 + 2*l2 + l3 + 2) >> 2;
773 SRC(0,2)=SRC(2,3)=SRC(4,4)=SRC(6,5)= (l1 + l2 + 1) >> 1;
774 SRC(1,2)=SRC(3,3)=SRC(5,4)=SRC(7,5)= (l0 + 2*l1 + l2 + 2) >> 2;
775 SRC(0,1)=SRC(2,2)=SRC(4,3)=SRC(6,4)= (l0 + l1 + 1) >> 1;
776 SRC(1,1)=SRC(3,2)=SRC(5,3)=SRC(7,4)= (lt + 2*l0 + l1 + 2) >> 2;
777 SRC(0,0)=SRC(2,1)=SRC(4,2)=SRC(6,3)= (lt + l0 + 1) >> 1;
778 SRC(1,0)=SRC(3,1)=SRC(5,2)=SRC(7,3)= (l0 + 2*lt + t0 + 2) >> 2;
779 SRC(2,0)=SRC(4,1)=SRC(6,2)= (t1 + 2*t0 + lt + 2) >> 2;
780 SRC(3,0)=SRC(5,1)=SRC(7,2)= (t2 + 2*t1 + t0 + 2) >> 2;
781 SRC(4,0)=SRC(6,1)= (t3 + 2*t2 + t1 + 2) >> 2;
782 SRC(5,0)=SRC(7,1)= (t4 + 2*t3 + t2 + 2) >> 2;
783 SRC(6,0)= (t5 + 2*t4 + t3 + 2) >> 2;
784 SRC(7,0)= (t6 + 2*t5 + t4 + 2) >> 2;
786 static void predict_8x8_vl( uint8_t *src, uint8_t edge[33] )
789 PREDICT_8x8_LOAD_TOPRIGHT
790 SRC(0,0)= (t0 + t1 + 1) >> 1;
791 SRC(0,1)= (t0 + 2*t1 + t2 + 2) >> 2;
792 SRC(0,2)=SRC(1,0)= (t1 + t2 + 1) >> 1;
793 SRC(0,3)=SRC(1,1)= (t1 + 2*t2 + t3 + 2) >> 2;
794 SRC(0,4)=SRC(1,2)=SRC(2,0)= (t2 + t3 + 1) >> 1;
795 SRC(0,5)=SRC(1,3)=SRC(2,1)= (t2 + 2*t3 + t4 + 2) >> 2;
796 SRC(0,6)=SRC(1,4)=SRC(2,2)=SRC(3,0)= (t3 + t4 + 1) >> 1;
797 SRC(0,7)=SRC(1,5)=SRC(2,3)=SRC(3,1)= (t3 + 2*t4 + t5 + 2) >> 2;
798 SRC(1,6)=SRC(2,4)=SRC(3,2)=SRC(4,0)= (t4 + t5 + 1) >> 1;
799 SRC(1,7)=SRC(2,5)=SRC(3,3)=SRC(4,1)= (t4 + 2*t5 + t6 + 2) >> 2;
800 SRC(2,6)=SRC(3,4)=SRC(4,2)=SRC(5,0)= (t5 + t6 + 1) >> 1;
801 SRC(2,7)=SRC(3,5)=SRC(4,3)=SRC(5,1)= (t5 + 2*t6 + t7 + 2) >> 2;
802 SRC(3,6)=SRC(4,4)=SRC(5,2)=SRC(6,0)= (t6 + t7 + 1) >> 1;
803 SRC(3,7)=SRC(4,5)=SRC(5,3)=SRC(6,1)= (t6 + 2*t7 + t8 + 2) >> 2;
804 SRC(4,6)=SRC(5,4)=SRC(6,2)=SRC(7,0)= (t7 + t8 + 1) >> 1;
805 SRC(4,7)=SRC(5,5)=SRC(6,3)=SRC(7,1)= (t7 + 2*t8 + t9 + 2) >> 2;
806 SRC(5,6)=SRC(6,4)=SRC(7,2)= (t8 + t9 + 1) >> 1;
807 SRC(5,7)=SRC(6,5)=SRC(7,3)= (t8 + 2*t9 + t10 + 2) >> 2;
808 SRC(6,6)=SRC(7,4)= (t9 + t10 + 1) >> 1;
809 SRC(6,7)=SRC(7,5)= (t9 + 2*t10 + t11 + 2) >> 2;
810 SRC(7,6)= (t10 + t11 + 1) >> 1;
811 SRC(7,7)= (t10 + 2*t11 + t12 + 2) >> 2;
813 static void predict_8x8_hu( uint8_t *src, uint8_t edge[33] )
815 PREDICT_8x8_LOAD_LEFT
816 SRC(0,0)= (l0 + l1 + 1) >> 1;
817 SRC(1,0)= (l0 + 2*l1 + l2 + 2) >> 2;
818 SRC(0,1)=SRC(2,0)= (l1 + l2 + 1) >> 1;
819 SRC(1,1)=SRC(3,0)= (l1 + 2*l2 + l3 + 2) >> 2;
820 SRC(0,2)=SRC(2,1)=SRC(4,0)= (l2 + l3 + 1) >> 1;
821 SRC(1,2)=SRC(3,1)=SRC(5,0)= (l2 + 2*l3 + l4 + 2) >> 2;
822 SRC(0,3)=SRC(2,2)=SRC(4,1)=SRC(6,0)= (l3 + l4 + 1) >> 1;
823 SRC(1,3)=SRC(3,2)=SRC(5,1)=SRC(7,0)= (l3 + 2*l4 + l5 + 2) >> 2;
824 SRC(0,4)=SRC(2,3)=SRC(4,2)=SRC(6,1)= (l4 + l5 + 1) >> 1;
825 SRC(1,4)=SRC(3,3)=SRC(5,2)=SRC(7,1)= (l4 + 2*l5 + l6 + 2) >> 2;
826 SRC(0,5)=SRC(2,4)=SRC(4,3)=SRC(6,2)= (l5 + l6 + 1) >> 1;
827 SRC(1,5)=SRC(3,4)=SRC(5,3)=SRC(7,2)= (l5 + 2*l6 + l7 + 2) >> 2;
828 SRC(0,6)=SRC(2,5)=SRC(4,4)=SRC(6,3)= (l6 + l7 + 1) >> 1;
829 SRC(1,6)=SRC(3,5)=SRC(5,4)=SRC(7,3)= (l6 + 3*l7 + 2) >> 2;
830 SRC(0,7)=SRC(1,7)=SRC(2,6)=SRC(2,7)=SRC(3,6)=
831 SRC(3,7)=SRC(4,5)=SRC(4,6)=SRC(4,7)=SRC(5,5)=
832 SRC(5,6)=SRC(5,7)=SRC(6,4)=SRC(6,5)=SRC(6,6)=
833 SRC(6,7)=SRC(7,4)=SRC(7,5)=SRC(7,6)=SRC(7,7)= l7;
836 /****************************************************************************
837 * Exported functions:
838 ****************************************************************************/
839 void x264_predict_16x16_init( int cpu, x264_predict_t pf[7] )
841 pf[I_PRED_16x16_V ] = predict_16x16_v;
842 pf[I_PRED_16x16_H ] = predict_16x16_h;
843 pf[I_PRED_16x16_DC] = predict_16x16_dc;
844 pf[I_PRED_16x16_P ] = predict_16x16_p;
845 pf[I_PRED_16x16_DC_LEFT]= predict_16x16_dc_left;
846 pf[I_PRED_16x16_DC_TOP ]= predict_16x16_dc_top;
847 pf[I_PRED_16x16_DC_128 ]= predict_16x16_dc_128;
850 x264_predict_16x16_init_mmx( cpu, pf );
854 if( cpu&X264_CPU_ALTIVEC )
856 x264_predict_16x16_init_altivec( pf );
861 void x264_predict_8x8c_init( int cpu, x264_predict_t pf[7] )
863 pf[I_PRED_CHROMA_V ] = predict_8x8c_v;
864 pf[I_PRED_CHROMA_H ] = predict_8x8c_h;
865 pf[I_PRED_CHROMA_DC] = predict_8x8c_dc;
866 pf[I_PRED_CHROMA_P ] = predict_8x8c_p;
867 pf[I_PRED_CHROMA_DC_LEFT]= predict_8x8c_dc_left;
868 pf[I_PRED_CHROMA_DC_TOP ]= predict_8x8c_dc_top;
869 pf[I_PRED_CHROMA_DC_128 ]= predict_8x8c_dc_128;
872 x264_predict_8x8c_init_mmx( cpu, pf );
876 void x264_predict_8x8_init( int cpu, x264_predict8x8_t pf[12] )
878 pf[I_PRED_8x8_V] = predict_8x8_v;
879 pf[I_PRED_8x8_H] = predict_8x8_h;
880 pf[I_PRED_8x8_DC] = predict_8x8_dc;
881 pf[I_PRED_8x8_DDL] = predict_8x8_ddl;
882 pf[I_PRED_8x8_DDR] = predict_8x8_ddr;
883 pf[I_PRED_8x8_VR] = predict_8x8_vr;
884 pf[I_PRED_8x8_HD] = predict_8x8_hd;
885 pf[I_PRED_8x8_VL] = predict_8x8_vl;
886 pf[I_PRED_8x8_HU] = predict_8x8_hu;
887 pf[I_PRED_8x8_DC_LEFT]= predict_8x8_dc_left;
888 pf[I_PRED_8x8_DC_TOP] = predict_8x8_dc_top;
889 pf[I_PRED_8x8_DC_128] = predict_8x8_dc_128;
892 x264_predict_8x8_init_mmx( cpu, pf );
896 void x264_predict_4x4_init( int cpu, x264_predict_t pf[12] )
898 pf[I_PRED_4x4_V] = predict_4x4_v;
899 pf[I_PRED_4x4_H] = predict_4x4_h;
900 pf[I_PRED_4x4_DC] = predict_4x4_dc;
901 pf[I_PRED_4x4_DDL] = predict_4x4_ddl;
902 pf[I_PRED_4x4_DDR] = predict_4x4_ddr;
903 pf[I_PRED_4x4_VR] = predict_4x4_vr;
904 pf[I_PRED_4x4_HD] = predict_4x4_hd;
905 pf[I_PRED_4x4_VL] = predict_4x4_vl;
906 pf[I_PRED_4x4_HU] = predict_4x4_hu;
907 pf[I_PRED_4x4_DC_LEFT]= predict_4x4_dc_left;
908 pf[I_PRED_4x4_DC_TOP] = predict_4x4_dc_top;
909 pf[I_PRED_4x4_DC_128] = predict_4x4_dc_128;
912 x264_predict_4x4_init_mmx( cpu, pf );