1 /*****************************************************************************
2 * predict.c: h264 encoder
3 *****************************************************************************
4 * Copyright (C) 2003 Laurent Aimar
5 * $Id: predict.c,v 1.1 2004/06/03 19:27:07 fenrir Exp $
7 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
8 * Loren Merritt <lorenm@u.washington.edu>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
23 *****************************************************************************/
25 /* predict4x4 are inspired from ffmpeg h264 decoder */
31 #undef HAVE_MMX /* not finished now */
34 # include "x86/predict.h"
37 # include "ppc/predict.h"
40 /****************************************************************************
41 * 16x16 prediction for intra luma block
42 ****************************************************************************/
44 #define PREDICT_16x16_DC(v) \
45 for( i = 0; i < 16; i++ )\
47 uint32_t *p = (uint32_t*)src;\
55 static void predict_16x16_dc( uint8_t *src )
60 for( i = 0; i < 16; i++ )
62 dc += src[-1 + i * FDEC_STRIDE];
63 dc += src[i - FDEC_STRIDE];
65 dc = (( dc + 16 ) >> 5) * 0x01010101;
69 static void predict_16x16_dc_left( uint8_t *src )
74 for( i = 0; i < 16; i++ )
76 dc += src[-1 + i * FDEC_STRIDE];
78 dc = (( dc + 8 ) >> 4) * 0x01010101;
82 static void predict_16x16_dc_top( uint8_t *src )
87 for( i = 0; i < 16; i++ )
89 dc += src[i - FDEC_STRIDE];
91 dc = (( dc + 8 ) >> 4) * 0x01010101;
95 static void predict_16x16_dc_128( uint8_t *src )
98 PREDICT_16x16_DC(0x80808080);
100 static void predict_16x16_h( uint8_t *src )
104 for( i = 0; i < 16; i++ )
106 const uint32_t v = 0x01010101 * src[-1];
107 uint32_t *p = (uint32_t*)src;
118 static void predict_16x16_v( uint8_t *src )
120 uint32_t v0 = *(uint32_t*)&src[ 0-FDEC_STRIDE];
121 uint32_t v1 = *(uint32_t*)&src[ 4-FDEC_STRIDE];
122 uint32_t v2 = *(uint32_t*)&src[ 8-FDEC_STRIDE];
123 uint32_t v3 = *(uint32_t*)&src[12-FDEC_STRIDE];
126 for( i = 0; i < 16; i++ )
128 uint32_t *p = (uint32_t*)src;
136 static void predict_16x16_p( uint8_t *src )
144 /* calculate H and V */
145 for( i = 0; i <= 7; i++ )
147 H += ( i + 1 ) * ( src[ 8 + i - FDEC_STRIDE ] - src[6 -i -FDEC_STRIDE] );
148 V += ( i + 1 ) * ( src[-1 + (8+i)*FDEC_STRIDE] - src[-1 + (6-i)*FDEC_STRIDE] );
151 a = 16 * ( src[-1 + 15*FDEC_STRIDE] + src[15 - FDEC_STRIDE] );
152 b = ( 5 * H + 32 ) >> 6;
153 c = ( 5 * V + 32 ) >> 6;
155 i00 = a - b * 7 - c * 7 + 16;
157 for( y = 0; y < 16; y++ )
160 for( x = 0; x < 16; x++ )
162 src[x] = x264_clip_uint8( pix>>5 );
171 /****************************************************************************
172 * 8x8 prediction for intra chroma block
173 ****************************************************************************/
175 static void predict_8x8c_dc_128( uint8_t *src )
179 for( y = 0; y < 8; y++ )
181 uint32_t *p = (uint32_t*)src;
187 static void predict_8x8c_dc_left( uint8_t *src )
190 uint32_t dc0 = 0, dc1 = 0;
192 for( y = 0; y < 4; y++ )
194 dc0 += src[y * FDEC_STRIDE - 1];
195 dc1 += src[(y+4) * FDEC_STRIDE - 1];
197 dc0 = (( dc0 + 2 ) >> 2)*0x01010101;
198 dc1 = (( dc1 + 2 ) >> 2)*0x01010101;
200 for( y = 0; y < 4; y++ )
202 uint32_t *p = (uint32_t*)src;
207 for( y = 0; y < 4; y++ )
209 uint32_t *p = (uint32_t*)src;
216 static void predict_8x8c_dc_top( uint8_t *src )
219 uint32_t dc0 = 0, dc1 = 0;
221 for( x = 0; x < 4; x++ )
223 dc0 += src[x - FDEC_STRIDE];
224 dc1 += src[x + 4 - FDEC_STRIDE];
226 dc0 = (( dc0 + 2 ) >> 2)*0x01010101;
227 dc1 = (( dc1 + 2 ) >> 2)*0x01010101;
229 for( y = 0; y < 8; y++ )
231 uint32_t *p = (uint32_t*)src;
237 static void predict_8x8c_dc( uint8_t *src )
240 int s0 = 0, s1 = 0, s2 = 0, s3 = 0;
241 uint32_t dc0, dc1, dc2, dc3;
249 for( i = 0; i < 4; i++ )
251 s0 += src[i - FDEC_STRIDE];
252 s1 += src[i + 4 - FDEC_STRIDE];
253 s2 += src[-1 + i * FDEC_STRIDE];
254 s3 += src[-1 + (i+4)*FDEC_STRIDE];
260 dc0 = (( s0 + s2 + 4 ) >> 3)*0x01010101;
261 dc1 = (( s1 + 2 ) >> 2)*0x01010101;
262 dc2 = (( s3 + 2 ) >> 2)*0x01010101;
263 dc3 = (( s1 + s3 + 4 ) >> 3)*0x01010101;
265 for( y = 0; y < 4; y++ )
267 uint32_t *p = (uint32_t*)src;
273 for( y = 0; y < 4; y++ )
275 uint32_t *p = (uint32_t*)src;
281 static void predict_8x8c_h( uint8_t *src )
285 for( i = 0; i < 8; i++ )
287 uint32_t v = 0x01010101 * src[-1];
288 uint32_t *p = (uint32_t*)src;
294 static void predict_8x8c_v( uint8_t *src )
296 uint32_t v0 = *(uint32_t*)&src[0-FDEC_STRIDE];
297 uint32_t v1 = *(uint32_t*)&src[4-FDEC_STRIDE];
300 for( i = 0; i < 8; i++ )
302 uint32_t *p = (uint32_t*)src;
308 static void predict_8x8c_p( uint8_t *src )
317 for( i = 0; i < 4; i++ )
319 H += ( i + 1 ) * ( src[4+i - FDEC_STRIDE] - src[2 - i -FDEC_STRIDE] );
320 V += ( i + 1 ) * ( src[-1 +(i+4)*FDEC_STRIDE] - src[-1+(2-i)*FDEC_STRIDE] );
323 a = 16 * ( src[-1+7*FDEC_STRIDE] + src[7 - FDEC_STRIDE] );
324 b = ( 17 * H + 16 ) >> 5;
325 c = ( 17 * V + 16 ) >> 5;
326 i00 = a -3*b -3*c + 16;
328 for( y = 0; y < 8; y++ )
331 for( x = 0; x < 8; x++ )
333 src[x] = x264_clip_uint8( pix>>5 );
341 /****************************************************************************
342 * 4x4 prediction for intra luma block
343 ****************************************************************************/
345 #define PREDICT_4x4_DC(v) \
347 *(uint32_t*)&src[0*FDEC_STRIDE] =\
348 *(uint32_t*)&src[1*FDEC_STRIDE] =\
349 *(uint32_t*)&src[2*FDEC_STRIDE] =\
350 *(uint32_t*)&src[3*FDEC_STRIDE] = v;\
353 static void predict_4x4_dc_128( uint8_t *src )
355 PREDICT_4x4_DC(0x80808080);
357 static void predict_4x4_dc_left( uint8_t *src )
359 uint32_t dc = (( src[-1+0*FDEC_STRIDE] + src[-1+FDEC_STRIDE]+
360 src[-1+2*FDEC_STRIDE] + src[-1+3*FDEC_STRIDE] + 2 ) >> 2)*0x01010101;
363 static void predict_4x4_dc_top( uint8_t *src )
365 uint32_t dc = (( src[0 - FDEC_STRIDE] + src[1 - FDEC_STRIDE] +
366 src[2 - FDEC_STRIDE] + src[3 - FDEC_STRIDE] + 2 ) >> 2)*0x01010101;
369 static void predict_4x4_dc( uint8_t *src )
371 uint32_t dc = (( src[-1+0*FDEC_STRIDE] + src[-1+FDEC_STRIDE] +
372 src[-1+2*FDEC_STRIDE] + src[-1+3*FDEC_STRIDE] +
373 src[0 - FDEC_STRIDE] + src[1 - FDEC_STRIDE] +
374 src[2 - FDEC_STRIDE] + src[3 - FDEC_STRIDE] + 4 ) >> 3)*0x01010101;
377 static void predict_4x4_h( uint8_t *src )
379 *(uint32_t*)&src[0*FDEC_STRIDE] = src[0*FDEC_STRIDE-1] * 0x01010101;
380 *(uint32_t*)&src[1*FDEC_STRIDE] = src[1*FDEC_STRIDE-1] * 0x01010101;
381 *(uint32_t*)&src[2*FDEC_STRIDE] = src[2*FDEC_STRIDE-1] * 0x01010101;
382 *(uint32_t*)&src[3*FDEC_STRIDE] = src[3*FDEC_STRIDE-1] * 0x01010101;
384 static void predict_4x4_v( uint8_t *src )
386 uint32_t top = *((uint32_t*)&src[-FDEC_STRIDE]);
390 #define PREDICT_4x4_LOAD_LEFT \
391 const int l0 = src[-1+0*FDEC_STRIDE]; \
392 const int l1 = src[-1+1*FDEC_STRIDE]; \
393 const int l2 = src[-1+2*FDEC_STRIDE]; \
394 UNUSED const int l3 = src[-1+3*FDEC_STRIDE];
396 #define PREDICT_4x4_LOAD_TOP \
397 const int t0 = src[0-1*FDEC_STRIDE]; \
398 const int t1 = src[1-1*FDEC_STRIDE]; \
399 const int t2 = src[2-1*FDEC_STRIDE]; \
400 UNUSED const int t3 = src[3-1*FDEC_STRIDE];
402 #define PREDICT_4x4_LOAD_TOP_RIGHT \
403 const int t4 = src[4-1*FDEC_STRIDE]; \
404 const int t5 = src[5-1*FDEC_STRIDE]; \
405 const int t6 = src[6-1*FDEC_STRIDE]; \
406 UNUSED const int t7 = src[7-1*FDEC_STRIDE];
408 static void predict_4x4_ddl( uint8_t *src )
411 PREDICT_4x4_LOAD_TOP_RIGHT
413 src[0*FDEC_STRIDE+0] = ( t0 + 2*t1 + t2 + 2 ) >> 2;
415 src[0*FDEC_STRIDE+1] =
416 src[1*FDEC_STRIDE+0] = ( t1 + 2*t2 + t3 + 2 ) >> 2;
418 src[0*FDEC_STRIDE+2] =
419 src[1*FDEC_STRIDE+1] =
420 src[2*FDEC_STRIDE+0] = ( t2 + 2*t3 + t4 + 2 ) >> 2;
422 src[0*FDEC_STRIDE+3] =
423 src[1*FDEC_STRIDE+2] =
424 src[2*FDEC_STRIDE+1] =
425 src[3*FDEC_STRIDE+0] = ( t3 + 2*t4 + t5 + 2 ) >> 2;
427 src[1*FDEC_STRIDE+3] =
428 src[2*FDEC_STRIDE+2] =
429 src[3*FDEC_STRIDE+1] = ( t4 + 2*t5 + t6 + 2 ) >> 2;
431 src[2*FDEC_STRIDE+3] =
432 src[3*FDEC_STRIDE+2] = ( t5 + 2*t6 + t7 + 2 ) >> 2;
434 src[3*FDEC_STRIDE+3] = ( t6 + 3*t7 + 2 ) >> 2;
436 static void predict_4x4_ddr( uint8_t *src )
438 const int lt = src[-1-FDEC_STRIDE];
439 PREDICT_4x4_LOAD_LEFT
442 src[0*FDEC_STRIDE+0] =
443 src[1*FDEC_STRIDE+1] =
444 src[2*FDEC_STRIDE+2] =
445 src[3*FDEC_STRIDE+3] = ( t0 + 2 * lt + l0 + 2 ) >> 2;
447 src[0*FDEC_STRIDE+1] =
448 src[1*FDEC_STRIDE+2] =
449 src[2*FDEC_STRIDE+3] = ( lt + 2 * t0 + t1 + 2 ) >> 2;
451 src[0*FDEC_STRIDE+2] =
452 src[1*FDEC_STRIDE+3] = ( t0 + 2 * t1 + t2 + 2 ) >> 2;
454 src[0*FDEC_STRIDE+3] = ( t1 + 2 * t2 + t3 + 2 ) >> 2;
456 src[1*FDEC_STRIDE+0] =
457 src[2*FDEC_STRIDE+1] =
458 src[3*FDEC_STRIDE+2] = ( lt + 2 * l0 + l1 + 2 ) >> 2;
460 src[2*FDEC_STRIDE+0] =
461 src[3*FDEC_STRIDE+1] = ( l0 + 2 * l1 + l2 + 2 ) >> 2;
463 src[3*FDEC_STRIDE+0] = ( l1 + 2 * l2 + l3 + 2 ) >> 2;
466 static void predict_4x4_vr( uint8_t *src )
468 const int lt = src[-1-FDEC_STRIDE];
469 PREDICT_4x4_LOAD_LEFT
472 src[0*FDEC_STRIDE+0]=
473 src[2*FDEC_STRIDE+1]= ( lt + t0 + 1 ) >> 1;
475 src[0*FDEC_STRIDE+1]=
476 src[2*FDEC_STRIDE+2]= ( t0 + t1 + 1 ) >> 1;
478 src[0*FDEC_STRIDE+2]=
479 src[2*FDEC_STRIDE+3]= ( t1 + t2 + 1 ) >> 1;
481 src[0*FDEC_STRIDE+3]= ( t2 + t3 + 1 ) >> 1;
483 src[1*FDEC_STRIDE+0]=
484 src[3*FDEC_STRIDE+1]= ( l0 + 2 * lt + t0 + 2 ) >> 2;
486 src[1*FDEC_STRIDE+1]=
487 src[3*FDEC_STRIDE+2]= ( lt + 2 * t0 + t1 + 2 ) >> 2;
489 src[1*FDEC_STRIDE+2]=
490 src[3*FDEC_STRIDE+3]= ( t0 + 2 * t1 + t2 + 2) >> 2;
492 src[1*FDEC_STRIDE+3]= ( t1 + 2 * t2 + t3 + 2 ) >> 2;
493 src[2*FDEC_STRIDE+0]= ( lt + 2 * l0 + l1 + 2 ) >> 2;
494 src[3*FDEC_STRIDE+0]= ( l0 + 2 * l1 + l2 + 2 ) >> 2;
497 static void predict_4x4_hd( uint8_t *src )
499 const int lt= src[-1-1*FDEC_STRIDE];
500 PREDICT_4x4_LOAD_LEFT
503 src[0*FDEC_STRIDE+0]=
504 src[1*FDEC_STRIDE+2]= ( lt + l0 + 1 ) >> 1;
505 src[0*FDEC_STRIDE+1]=
506 src[1*FDEC_STRIDE+3]= ( l0 + 2 * lt + t0 + 2 ) >> 2;
507 src[0*FDEC_STRIDE+2]= ( lt + 2 * t0 + t1 + 2 ) >> 2;
508 src[0*FDEC_STRIDE+3]= ( t0 + 2 * t1 + t2 + 2 ) >> 2;
509 src[1*FDEC_STRIDE+0]=
510 src[2*FDEC_STRIDE+2]= ( l0 + l1 + 1 ) >> 1;
511 src[1*FDEC_STRIDE+1]=
512 src[2*FDEC_STRIDE+3]= ( lt + 2 * l0 + l1 + 2 ) >> 2;
513 src[2*FDEC_STRIDE+0]=
514 src[3*FDEC_STRIDE+2]= ( l1 + l2+ 1 ) >> 1;
515 src[2*FDEC_STRIDE+1]=
516 src[3*FDEC_STRIDE+3]= ( l0 + 2 * l1 + l2 + 2 ) >> 2;
517 src[3*FDEC_STRIDE+0]= ( l2 + l3 + 1 ) >> 1;
518 src[3*FDEC_STRIDE+1]= ( l1 + 2 * l2 + l3 + 2 ) >> 2;
521 static void predict_4x4_vl( uint8_t *src )
524 PREDICT_4x4_LOAD_TOP_RIGHT
526 src[0*FDEC_STRIDE+0]= ( t0 + t1 + 1 ) >> 1;
527 src[0*FDEC_STRIDE+1]=
528 src[2*FDEC_STRIDE+0]= ( t1 + t2 + 1 ) >> 1;
529 src[0*FDEC_STRIDE+2]=
530 src[2*FDEC_STRIDE+1]= ( t2 + t3 + 1 ) >> 1;
531 src[0*FDEC_STRIDE+3]=
532 src[2*FDEC_STRIDE+2]= ( t3 + t4 + 1 ) >> 1;
533 src[2*FDEC_STRIDE+3]= ( t4 + t5 + 1 ) >> 1;
534 src[1*FDEC_STRIDE+0]= ( t0 + 2 * t1 + t2 + 2 ) >> 2;
535 src[1*FDEC_STRIDE+1]=
536 src[3*FDEC_STRIDE+0]= ( t1 + 2 * t2 + t3 + 2 ) >> 2;
537 src[1*FDEC_STRIDE+2]=
538 src[3*FDEC_STRIDE+1]= ( t2 + 2 * t3 + t4 + 2 ) >> 2;
539 src[1*FDEC_STRIDE+3]=
540 src[3*FDEC_STRIDE+2]= ( t3 + 2 * t4 + t5 + 2 ) >> 2;
541 src[3*FDEC_STRIDE+3]= ( t4 + 2 * t5 + t6 + 2 ) >> 2;
544 static void predict_4x4_hu( uint8_t *src )
546 PREDICT_4x4_LOAD_LEFT
548 src[0*FDEC_STRIDE+0]= ( l0 + l1 + 1 ) >> 1;
549 src[0*FDEC_STRIDE+1]= ( l0 + 2 * l1 + l2 + 2 ) >> 2;
551 src[0*FDEC_STRIDE+2]=
552 src[1*FDEC_STRIDE+0]= ( l1 + l2 + 1 ) >> 1;
554 src[0*FDEC_STRIDE+3]=
555 src[1*FDEC_STRIDE+1]= ( l1 + 2*l2 + l3 + 2 ) >> 2;
557 src[1*FDEC_STRIDE+2]=
558 src[2*FDEC_STRIDE+0]= ( l2 + l3 + 1 ) >> 1;
560 src[1*FDEC_STRIDE+3]=
561 src[2*FDEC_STRIDE+1]= ( l2 + 2 * l3 + l3 + 2 ) >> 2;
563 src[2*FDEC_STRIDE+3]=
564 src[3*FDEC_STRIDE+1]=
565 src[3*FDEC_STRIDE+0]=
566 src[2*FDEC_STRIDE+2]=
567 src[3*FDEC_STRIDE+2]=
568 src[3*FDEC_STRIDE+3]= l3;
571 /****************************************************************************
572 * 8x8 prediction for intra luma block
573 ****************************************************************************/
575 #define SRC(x,y) src[(x)+(y)*FDEC_STRIDE]
577 edge[14-y] = (SRC(-1,y-1) + 2*SRC(-1,y) + SRC(-1,y+1) + 2) >> 2;
579 edge[16+x] = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
581 void x264_predict_8x8_filter( uint8_t *src, uint8_t edge[33], int i_neighbor, int i_filters )
583 /* edge[7..14] = l7..l0
585 * edge[16..31] = t0 .. t15
588 int have_lt = i_neighbor & MB_TOPLEFT;
589 if( i_filters & MB_LEFT )
591 edge[15] = (SRC(-1,0) + 2*SRC(-1,-1) + SRC(0,-1) + 2) >> 2;
592 edge[14] = ((have_lt ? SRC(-1,-1) : SRC(-1,0))
593 + 2*SRC(-1,0) + SRC(-1,1) + 2) >> 2;
594 PL(1) PL(2) PL(3) PL(4) PL(5) PL(6)
595 edge[7] = (SRC(-1,6) + 3*SRC(-1,7) + 2) >> 2;
598 if( i_filters & MB_TOP )
600 int have_tr = i_neighbor & MB_TOPRIGHT;
601 edge[16] = ((have_lt ? SRC(-1,-1) : SRC(0,-1))
602 + 2*SRC(0,-1) + SRC(1,-1) + 2) >> 2;
603 PT(1) PT(2) PT(3) PT(4) PT(5) PT(6)
604 edge[23] = ((have_tr ? SRC(8,-1) : SRC(7,-1))
605 + 2*SRC(7,-1) + SRC(6,-1) + 2) >> 2;
607 if( i_filters & MB_TOPRIGHT )
611 PT(8) PT(9) PT(10) PT(11) PT(12) PT(13) PT(14)
613 edge[32] = (SRC(14,-1) + 3*SRC(15,-1) + 2) >> 2;
617 *(uint64_t*)(edge+24) = SRC(7,-1) * 0x0101010101010101ULL;
618 edge[32] = SRC(7,-1);
628 UNUSED const int l##y = edge[14-y];
630 UNUSED const int t##x = edge[16+x];
631 #define PREDICT_8x8_LOAD_TOPLEFT \
632 const int lt = edge[15];
633 #define PREDICT_8x8_LOAD_LEFT \
634 PL(0) PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) PL(7)
635 #define PREDICT_8x8_LOAD_TOP \
636 PT(0) PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) PT(7)
637 #define PREDICT_8x8_LOAD_TOPRIGHT \
638 PT(8) PT(9) PT(10) PT(11) PT(12) PT(13) PT(14) PT(15)
640 #define PREDICT_8x8_DC(v) \
642 for( y = 0; y < 8; y++ ) { \
643 ((uint32_t*)src)[0] = \
644 ((uint32_t*)src)[1] = v; \
645 src += FDEC_STRIDE; \
648 static void predict_8x8_dc_128( uint8_t *src, uint8_t edge[33] )
650 PREDICT_8x8_DC(0x80808080);
652 static void predict_8x8_dc_left( uint8_t *src, uint8_t edge[33] )
654 PREDICT_8x8_LOAD_LEFT
655 const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7+4) >> 3) * 0x01010101;
658 static void predict_8x8_dc_top( uint8_t *src, uint8_t edge[33] )
661 const uint32_t dc = ((t0+t1+t2+t3+t4+t5+t6+t7+4) >> 3) * 0x01010101;
664 static void predict_8x8_dc( uint8_t *src, uint8_t edge[33] )
666 PREDICT_8x8_LOAD_LEFT
668 const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7
669 +t0+t1+t2+t3+t4+t5+t6+t7+8) >> 4) * 0x01010101;
672 static void predict_8x8_h( uint8_t *src, uint8_t edge[33] )
674 PREDICT_8x8_LOAD_LEFT
675 #define ROW(y) ((uint32_t*)(src+y*FDEC_STRIDE))[0] =\
676 ((uint32_t*)(src+y*FDEC_STRIDE))[1] = 0x01010101U * l##y
677 ROW(0); ROW(1); ROW(2); ROW(3); ROW(4); ROW(5); ROW(6); ROW(7);
680 static void predict_8x8_v( uint8_t *src, uint8_t edge[33] )
682 const uint64_t top = *(uint64_t*)(edge+16);
684 for( y = 0; y < 8; y++ )
685 *(uint64_t*)(src+y*FDEC_STRIDE) = top;
687 static void predict_8x8_ddl( uint8_t *src, uint8_t edge[33] )
690 PREDICT_8x8_LOAD_TOPRIGHT
691 SRC(0,0)= (t0 + 2*t1 + t2 + 2) >> 2;
692 SRC(0,1)=SRC(1,0)= (t1 + 2*t2 + t3 + 2) >> 2;
693 SRC(0,2)=SRC(1,1)=SRC(2,0)= (t2 + 2*t3 + t4 + 2) >> 2;
694 SRC(0,3)=SRC(1,2)=SRC(2,1)=SRC(3,0)= (t3 + 2*t4 + t5 + 2) >> 2;
695 SRC(0,4)=SRC(1,3)=SRC(2,2)=SRC(3,1)=SRC(4,0)= (t4 + 2*t5 + t6 + 2) >> 2;
696 SRC(0,5)=SRC(1,4)=SRC(2,3)=SRC(3,2)=SRC(4,1)=SRC(5,0)= (t5 + 2*t6 + t7 + 2) >> 2;
697 SRC(0,6)=SRC(1,5)=SRC(2,4)=SRC(3,3)=SRC(4,2)=SRC(5,1)=SRC(6,0)= (t6 + 2*t7 + t8 + 2) >> 2;
698 SRC(0,7)=SRC(1,6)=SRC(2,5)=SRC(3,4)=SRC(4,3)=SRC(5,2)=SRC(6,1)=SRC(7,0)= (t7 + 2*t8 + t9 + 2) >> 2;
699 SRC(1,7)=SRC(2,6)=SRC(3,5)=SRC(4,4)=SRC(5,3)=SRC(6,2)=SRC(7,1)= (t8 + 2*t9 + t10 + 2) >> 2;
700 SRC(2,7)=SRC(3,6)=SRC(4,5)=SRC(5,4)=SRC(6,3)=SRC(7,2)= (t9 + 2*t10 + t11 + 2) >> 2;
701 SRC(3,7)=SRC(4,6)=SRC(5,5)=SRC(6,4)=SRC(7,3)= (t10 + 2*t11 + t12 + 2) >> 2;
702 SRC(4,7)=SRC(5,6)=SRC(6,5)=SRC(7,4)= (t11 + 2*t12 + t13 + 2) >> 2;
703 SRC(5,7)=SRC(6,6)=SRC(7,5)= (t12 + 2*t13 + t14 + 2) >> 2;
704 SRC(6,7)=SRC(7,6)= (t13 + 2*t14 + t15 + 2) >> 2;
705 SRC(7,7)= (t14 + 3*t15 + 2) >> 2;
707 static void predict_8x8_ddr( uint8_t *src, uint8_t edge[33] )
710 PREDICT_8x8_LOAD_LEFT
711 PREDICT_8x8_LOAD_TOPLEFT
712 SRC(0,7)= (l7 + 2*l6 + l5 + 2) >> 2;
713 SRC(0,6)=SRC(1,7)= (l6 + 2*l5 + l4 + 2) >> 2;
714 SRC(0,5)=SRC(1,6)=SRC(2,7)= (l5 + 2*l4 + l3 + 2) >> 2;
715 SRC(0,4)=SRC(1,5)=SRC(2,6)=SRC(3,7)= (l4 + 2*l3 + l2 + 2) >> 2;
716 SRC(0,3)=SRC(1,4)=SRC(2,5)=SRC(3,6)=SRC(4,7)= (l3 + 2*l2 + l1 + 2) >> 2;
717 SRC(0,2)=SRC(1,3)=SRC(2,4)=SRC(3,5)=SRC(4,6)=SRC(5,7)= (l2 + 2*l1 + l0 + 2) >> 2;
718 SRC(0,1)=SRC(1,2)=SRC(2,3)=SRC(3,4)=SRC(4,5)=SRC(5,6)=SRC(6,7)= (l1 + 2*l0 + lt + 2) >> 2;
719 SRC(0,0)=SRC(1,1)=SRC(2,2)=SRC(3,3)=SRC(4,4)=SRC(5,5)=SRC(6,6)=SRC(7,7)= (l0 + 2*lt + t0 + 2) >> 2;
720 SRC(1,0)=SRC(2,1)=SRC(3,2)=SRC(4,3)=SRC(5,4)=SRC(6,5)=SRC(7,6)= (lt + 2*t0 + t1 + 2) >> 2;
721 SRC(2,0)=SRC(3,1)=SRC(4,2)=SRC(5,3)=SRC(6,4)=SRC(7,5)= (t0 + 2*t1 + t2 + 2) >> 2;
722 SRC(3,0)=SRC(4,1)=SRC(5,2)=SRC(6,3)=SRC(7,4)= (t1 + 2*t2 + t3 + 2) >> 2;
723 SRC(4,0)=SRC(5,1)=SRC(6,2)=SRC(7,3)= (t2 + 2*t3 + t4 + 2) >> 2;
724 SRC(5,0)=SRC(6,1)=SRC(7,2)= (t3 + 2*t4 + t5 + 2) >> 2;
725 SRC(6,0)=SRC(7,1)= (t4 + 2*t5 + t6 + 2) >> 2;
726 SRC(7,0)= (t5 + 2*t6 + t7 + 2) >> 2;
729 static void predict_8x8_vr( uint8_t *src, uint8_t edge[33] )
732 PREDICT_8x8_LOAD_LEFT
733 PREDICT_8x8_LOAD_TOPLEFT
734 SRC(0,6)= (l5 + 2*l4 + l3 + 2) >> 2;
735 SRC(0,7)= (l6 + 2*l5 + l4 + 2) >> 2;
736 SRC(0,4)=SRC(1,6)= (l3 + 2*l2 + l1 + 2) >> 2;
737 SRC(0,5)=SRC(1,7)= (l4 + 2*l3 + l2 + 2) >> 2;
738 SRC(0,2)=SRC(1,4)=SRC(2,6)= (l1 + 2*l0 + lt + 2) >> 2;
739 SRC(0,3)=SRC(1,5)=SRC(2,7)= (l2 + 2*l1 + l0 + 2) >> 2;
740 SRC(0,1)=SRC(1,3)=SRC(2,5)=SRC(3,7)= (l0 + 2*lt + t0 + 2) >> 2;
741 SRC(0,0)=SRC(1,2)=SRC(2,4)=SRC(3,6)= (lt + t0 + 1) >> 1;
742 SRC(1,1)=SRC(2,3)=SRC(3,5)=SRC(4,7)= (lt + 2*t0 + t1 + 2) >> 2;
743 SRC(1,0)=SRC(2,2)=SRC(3,4)=SRC(4,6)= (t0 + t1 + 1) >> 1;
744 SRC(2,1)=SRC(3,3)=SRC(4,5)=SRC(5,7)= (t0 + 2*t1 + t2 + 2) >> 2;
745 SRC(2,0)=SRC(3,2)=SRC(4,4)=SRC(5,6)= (t1 + t2 + 1) >> 1;
746 SRC(3,1)=SRC(4,3)=SRC(5,5)=SRC(6,7)= (t1 + 2*t2 + t3 + 2) >> 2;
747 SRC(3,0)=SRC(4,2)=SRC(5,4)=SRC(6,6)= (t2 + t3 + 1) >> 1;
748 SRC(4,1)=SRC(5,3)=SRC(6,5)=SRC(7,7)= (t2 + 2*t3 + t4 + 2) >> 2;
749 SRC(4,0)=SRC(5,2)=SRC(6,4)=SRC(7,6)= (t3 + t4 + 1) >> 1;
750 SRC(5,1)=SRC(6,3)=SRC(7,5)= (t3 + 2*t4 + t5 + 2) >> 2;
751 SRC(5,0)=SRC(6,2)=SRC(7,4)= (t4 + t5 + 1) >> 1;
752 SRC(6,1)=SRC(7,3)= (t4 + 2*t5 + t6 + 2) >> 2;
753 SRC(6,0)=SRC(7,2)= (t5 + t6 + 1) >> 1;
754 SRC(7,1)= (t5 + 2*t6 + t7 + 2) >> 2;
755 SRC(7,0)= (t6 + t7 + 1) >> 1;
757 static void predict_8x8_hd( uint8_t *src, uint8_t edge[33] )
760 PREDICT_8x8_LOAD_LEFT
761 PREDICT_8x8_LOAD_TOPLEFT
762 SRC(0,7)= (l6 + l7 + 1) >> 1;
763 SRC(1,7)= (l5 + 2*l6 + l7 + 2) >> 2;
764 SRC(0,6)=SRC(2,7)= (l5 + l6 + 1) >> 1;
765 SRC(1,6)=SRC(3,7)= (l4 + 2*l5 + l6 + 2) >> 2;
766 SRC(0,5)=SRC(2,6)=SRC(4,7)= (l4 + l5 + 1) >> 1;
767 SRC(1,5)=SRC(3,6)=SRC(5,7)= (l3 + 2*l4 + l5 + 2) >> 2;
768 SRC(0,4)=SRC(2,5)=SRC(4,6)=SRC(6,7)= (l3 + l4 + 1) >> 1;
769 SRC(1,4)=SRC(3,5)=SRC(5,6)=SRC(7,7)= (l2 + 2*l3 + l4 + 2) >> 2;
770 SRC(0,3)=SRC(2,4)=SRC(4,5)=SRC(6,6)= (l2 + l3 + 1) >> 1;
771 SRC(1,3)=SRC(3,4)=SRC(5,5)=SRC(7,6)= (l1 + 2*l2 + l3 + 2) >> 2;
772 SRC(0,2)=SRC(2,3)=SRC(4,4)=SRC(6,5)= (l1 + l2 + 1) >> 1;
773 SRC(1,2)=SRC(3,3)=SRC(5,4)=SRC(7,5)= (l0 + 2*l1 + l2 + 2) >> 2;
774 SRC(0,1)=SRC(2,2)=SRC(4,3)=SRC(6,4)= (l0 + l1 + 1) >> 1;
775 SRC(1,1)=SRC(3,2)=SRC(5,3)=SRC(7,4)= (lt + 2*l0 + l1 + 2) >> 2;
776 SRC(0,0)=SRC(2,1)=SRC(4,2)=SRC(6,3)= (lt + l0 + 1) >> 1;
777 SRC(1,0)=SRC(3,1)=SRC(5,2)=SRC(7,3)= (l0 + 2*lt + t0 + 2) >> 2;
778 SRC(2,0)=SRC(4,1)=SRC(6,2)= (t1 + 2*t0 + lt + 2) >> 2;
779 SRC(3,0)=SRC(5,1)=SRC(7,2)= (t2 + 2*t1 + t0 + 2) >> 2;
780 SRC(4,0)=SRC(6,1)= (t3 + 2*t2 + t1 + 2) >> 2;
781 SRC(5,0)=SRC(7,1)= (t4 + 2*t3 + t2 + 2) >> 2;
782 SRC(6,0)= (t5 + 2*t4 + t3 + 2) >> 2;
783 SRC(7,0)= (t6 + 2*t5 + t4 + 2) >> 2;
785 static void predict_8x8_vl( uint8_t *src, uint8_t edge[33] )
788 PREDICT_8x8_LOAD_TOPRIGHT
789 SRC(0,0)= (t0 + t1 + 1) >> 1;
790 SRC(0,1)= (t0 + 2*t1 + t2 + 2) >> 2;
791 SRC(0,2)=SRC(1,0)= (t1 + t2 + 1) >> 1;
792 SRC(0,3)=SRC(1,1)= (t1 + 2*t2 + t3 + 2) >> 2;
793 SRC(0,4)=SRC(1,2)=SRC(2,0)= (t2 + t3 + 1) >> 1;
794 SRC(0,5)=SRC(1,3)=SRC(2,1)= (t2 + 2*t3 + t4 + 2) >> 2;
795 SRC(0,6)=SRC(1,4)=SRC(2,2)=SRC(3,0)= (t3 + t4 + 1) >> 1;
796 SRC(0,7)=SRC(1,5)=SRC(2,3)=SRC(3,1)= (t3 + 2*t4 + t5 + 2) >> 2;
797 SRC(1,6)=SRC(2,4)=SRC(3,2)=SRC(4,0)= (t4 + t5 + 1) >> 1;
798 SRC(1,7)=SRC(2,5)=SRC(3,3)=SRC(4,1)= (t4 + 2*t5 + t6 + 2) >> 2;
799 SRC(2,6)=SRC(3,4)=SRC(4,2)=SRC(5,0)= (t5 + t6 + 1) >> 1;
800 SRC(2,7)=SRC(3,5)=SRC(4,3)=SRC(5,1)= (t5 + 2*t6 + t7 + 2) >> 2;
801 SRC(3,6)=SRC(4,4)=SRC(5,2)=SRC(6,0)= (t6 + t7 + 1) >> 1;
802 SRC(3,7)=SRC(4,5)=SRC(5,3)=SRC(6,1)= (t6 + 2*t7 + t8 + 2) >> 2;
803 SRC(4,6)=SRC(5,4)=SRC(6,2)=SRC(7,0)= (t7 + t8 + 1) >> 1;
804 SRC(4,7)=SRC(5,5)=SRC(6,3)=SRC(7,1)= (t7 + 2*t8 + t9 + 2) >> 2;
805 SRC(5,6)=SRC(6,4)=SRC(7,2)= (t8 + t9 + 1) >> 1;
806 SRC(5,7)=SRC(6,5)=SRC(7,3)= (t8 + 2*t9 + t10 + 2) >> 2;
807 SRC(6,6)=SRC(7,4)= (t9 + t10 + 1) >> 1;
808 SRC(6,7)=SRC(7,5)= (t9 + 2*t10 + t11 + 2) >> 2;
809 SRC(7,6)= (t10 + t11 + 1) >> 1;
810 SRC(7,7)= (t10 + 2*t11 + t12 + 2) >> 2;
812 static void predict_8x8_hu( uint8_t *src, uint8_t edge[33] )
814 PREDICT_8x8_LOAD_LEFT
815 SRC(0,0)= (l0 + l1 + 1) >> 1;
816 SRC(1,0)= (l0 + 2*l1 + l2 + 2) >> 2;
817 SRC(0,1)=SRC(2,0)= (l1 + l2 + 1) >> 1;
818 SRC(1,1)=SRC(3,0)= (l1 + 2*l2 + l3 + 2) >> 2;
819 SRC(0,2)=SRC(2,1)=SRC(4,0)= (l2 + l3 + 1) >> 1;
820 SRC(1,2)=SRC(3,1)=SRC(5,0)= (l2 + 2*l3 + l4 + 2) >> 2;
821 SRC(0,3)=SRC(2,2)=SRC(4,1)=SRC(6,0)= (l3 + l4 + 1) >> 1;
822 SRC(1,3)=SRC(3,2)=SRC(5,1)=SRC(7,0)= (l3 + 2*l4 + l5 + 2) >> 2;
823 SRC(0,4)=SRC(2,3)=SRC(4,2)=SRC(6,1)= (l4 + l5 + 1) >> 1;
824 SRC(1,4)=SRC(3,3)=SRC(5,2)=SRC(7,1)= (l4 + 2*l5 + l6 + 2) >> 2;
825 SRC(0,5)=SRC(2,4)=SRC(4,3)=SRC(6,2)= (l5 + l6 + 1) >> 1;
826 SRC(1,5)=SRC(3,4)=SRC(5,3)=SRC(7,2)= (l5 + 2*l6 + l7 + 2) >> 2;
827 SRC(0,6)=SRC(2,5)=SRC(4,4)=SRC(6,3)= (l6 + l7 + 1) >> 1;
828 SRC(1,6)=SRC(3,5)=SRC(5,4)=SRC(7,3)= (l6 + 3*l7 + 2) >> 2;
829 SRC(0,7)=SRC(1,7)=SRC(2,6)=SRC(2,7)=SRC(3,6)=
830 SRC(3,7)=SRC(4,5)=SRC(4,6)=SRC(4,7)=SRC(5,5)=
831 SRC(5,6)=SRC(5,7)=SRC(6,4)=SRC(6,5)=SRC(6,6)=
832 SRC(6,7)=SRC(7,4)=SRC(7,5)=SRC(7,6)=SRC(7,7)= l7;
835 /****************************************************************************
836 * Exported functions:
837 ****************************************************************************/
838 void x264_predict_16x16_init( int cpu, x264_predict_t pf[7] )
840 pf[I_PRED_16x16_V ] = predict_16x16_v;
841 pf[I_PRED_16x16_H ] = predict_16x16_h;
842 pf[I_PRED_16x16_DC] = predict_16x16_dc;
843 pf[I_PRED_16x16_P ] = predict_16x16_p;
844 pf[I_PRED_16x16_DC_LEFT]= predict_16x16_dc_left;
845 pf[I_PRED_16x16_DC_TOP ]= predict_16x16_dc_top;
846 pf[I_PRED_16x16_DC_128 ]= predict_16x16_dc_128;
849 x264_predict_16x16_init_mmx( cpu, pf );
853 if( cpu&X264_CPU_ALTIVEC )
855 x264_predict_16x16_init_altivec( pf );
860 void x264_predict_8x8c_init( int cpu, x264_predict_t pf[7] )
862 pf[I_PRED_CHROMA_V ] = predict_8x8c_v;
863 pf[I_PRED_CHROMA_H ] = predict_8x8c_h;
864 pf[I_PRED_CHROMA_DC] = predict_8x8c_dc;
865 pf[I_PRED_CHROMA_P ] = predict_8x8c_p;
866 pf[I_PRED_CHROMA_DC_LEFT]= predict_8x8c_dc_left;
867 pf[I_PRED_CHROMA_DC_TOP ]= predict_8x8c_dc_top;
868 pf[I_PRED_CHROMA_DC_128 ]= predict_8x8c_dc_128;
871 x264_predict_8x8c_init_mmx( cpu, pf );
875 void x264_predict_8x8_init( int cpu, x264_predict8x8_t pf[12] )
877 pf[I_PRED_8x8_V] = predict_8x8_v;
878 pf[I_PRED_8x8_H] = predict_8x8_h;
879 pf[I_PRED_8x8_DC] = predict_8x8_dc;
880 pf[I_PRED_8x8_DDL] = predict_8x8_ddl;
881 pf[I_PRED_8x8_DDR] = predict_8x8_ddr;
882 pf[I_PRED_8x8_VR] = predict_8x8_vr;
883 pf[I_PRED_8x8_HD] = predict_8x8_hd;
884 pf[I_PRED_8x8_VL] = predict_8x8_vl;
885 pf[I_PRED_8x8_HU] = predict_8x8_hu;
886 pf[I_PRED_8x8_DC_LEFT]= predict_8x8_dc_left;
887 pf[I_PRED_8x8_DC_TOP] = predict_8x8_dc_top;
888 pf[I_PRED_8x8_DC_128] = predict_8x8_dc_128;
891 x264_predict_8x8_init_mmx( cpu, pf );
895 void x264_predict_4x4_init( int cpu, x264_predict_t pf[12] )
897 pf[I_PRED_4x4_V] = predict_4x4_v;
898 pf[I_PRED_4x4_H] = predict_4x4_h;
899 pf[I_PRED_4x4_DC] = predict_4x4_dc;
900 pf[I_PRED_4x4_DDL] = predict_4x4_ddl;
901 pf[I_PRED_4x4_DDR] = predict_4x4_ddr;
902 pf[I_PRED_4x4_VR] = predict_4x4_vr;
903 pf[I_PRED_4x4_HD] = predict_4x4_hd;
904 pf[I_PRED_4x4_VL] = predict_4x4_vl;
905 pf[I_PRED_4x4_HU] = predict_4x4_hu;
906 pf[I_PRED_4x4_DC_LEFT]= predict_4x4_dc_left;
907 pf[I_PRED_4x4_DC_TOP] = predict_4x4_dc_top;
908 pf[I_PRED_4x4_DC_128] = predict_4x4_dc_128;
911 x264_predict_4x4_init_mmx( cpu, pf );