1 /*****************************************************************************
2 * predict.c: h264 encoder
3 *****************************************************************************
4 * Copyright (C) 2003 Laurent Aimar
5 * $Id: predict.c,v 1.1 2004/06/03 19:27:07 fenrir Exp $
7 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
22 *****************************************************************************/
24 /* XXX predict4x4 are inspired from ffmpeg h264 decoder
38 #undef HAVE_MMXEXT /* not finished now */
41 # include "i386/predict.h"
44 static inline int clip_uint8( int a )
52 /****************************************************************************
53 * 16x16 prediction for intra block DC, H, V, P
54 ****************************************************************************/
55 static void predict_16x16_dc( uint8_t *src, int i_stride )
60 /* calculate DC value */
61 for( i = 0; i < 16; i++ )
63 dc += src[-1 + i * i_stride];
64 dc += src[i - i_stride];
66 dc = ( dc + 16 ) >> 5;
68 for( i = 0; i < 16; i++ )
70 for( j = 0; j < 16; j++ )
77 static void predict_16x16_dc_left( uint8_t *src, int i_stride )
82 for( i = 0; i < 16; i++ )
84 dc += src[-1 + i * i_stride];
88 for( i = 0; i < 16; i++ )
90 for( j = 0; j < 16; j++ )
97 static void predict_16x16_dc_top( uint8_t *src, int i_stride )
102 for( i = 0; i < 16; i++ )
104 dc += src[i - i_stride];
106 dc = ( dc + 8 ) >> 4;
108 for( i = 0; i < 16; i++ )
110 for( j = 0; j < 16; j++ )
117 static void predict_16x16_dc_128( uint8_t *src, int i_stride )
121 for( i = 0; i < 16; i++ )
123 for( j = 0; j < 16; j++ )
130 static void predict_16x16_h( uint8_t *src, int i_stride )
134 for( i = 0; i < 16; i++ )
139 for( j = 0; j < 16; j++ )
147 static void predict_16x16_v( uint8_t *src, int i_stride )
151 for( i = 0; i < 16; i++ )
153 for( j = 0; j < 16; j++ )
155 src[i * i_stride +j] = src[j - i_stride];
159 static void predict_16x16_p( uint8_t *src, int i_stride )
167 /* calcule H and V */
168 for( i = 0; i <= 7; i++ )
170 H += ( i + 1 ) * ( src[ 8 + i - i_stride ] - src[6 -i -i_stride] );
171 V += ( i + 1 ) * ( src[-1 + (8+i)*i_stride] - src[-1 + (6-i)*i_stride] );
174 a = 16 * ( src[-1 + 15*i_stride] + src[15 - i_stride] );
175 b = ( 5 * H + 32 ) >> 6;
176 c = ( 5 * V + 32 ) >> 6;
178 i00 = a - b * 7 - c * 7 + 16;
180 for( y = 0; y < 16; y++ )
182 for( x = 0; x < 16; x++ )
188 src[x] = clip_uint8( pix );
196 /****************************************************************************
197 * 8x8 prediction for intra chroma block DC, H, V, P
198 ****************************************************************************/
199 static void predict_8x8c_dc_128( uint8_t *src, int i_stride )
203 for( y = 0; y < 8; y++ )
205 for( x = 0; x < 8; x++ )
212 static void predict_8x8c_dc_left( uint8_t *src, int i_stride )
215 int dc0 = 0, dc1 = 0;
217 for( y = 0; y < 4; y++ )
219 dc0 += src[y * i_stride - 1];
220 dc1 += src[(y+4) * i_stride - 1];
222 dc0 = ( dc0 + 2 ) >> 2;
223 dc1 = ( dc1 + 2 ) >> 2;
225 for( y = 0; y < 4; y++ )
227 for( x = 0; x < 8; x++ )
230 src[4*i_stride+x] = dc1;
235 static void predict_8x8c_dc_top( uint8_t *src, int i_stride )
238 int dc0 = 0, dc1 = 0;
240 for( x = 0; x < 4; x++ )
242 dc0 += src[x - i_stride];
243 dc1 += src[x + 4 - i_stride];
245 dc0 = ( dc0 + 2 ) >> 2;
246 dc1 = ( dc1 + 2 ) >> 2;
248 for( y = 0; y < 8; y++ )
250 for( x = 0; x < 4; x++ )
258 static void predict_8x8c_dc( uint8_t *src, int i_stride )
261 int s0 = 0, s1 = 0, s2 = 0, s3 = 0;
262 int dc0, dc1, dc2, dc3;
270 for( i = 0; i < 4; i++ )
272 s0 += src[i - i_stride];
273 s1 += src[i + 4 - i_stride];
274 s2 += src[-1 + i * i_stride];
275 s3 += src[-1 + (i+4)*i_stride];
281 dc0 = ( s0 + s2 + 4 ) >> 3;
282 dc1 = ( s1 + 2 ) >> 2;
283 dc2 = ( s3 + 2 ) >> 2;
284 dc3 = ( s1 + s3 + 4 ) >> 3;
286 for( y = 0; y < 4; y++ )
288 for( x = 0; x < 4; x++ )
292 src[4*i_stride + x ] = dc2;
293 src[4*i_stride + x + 4] = dc3;
299 static void predict_8x8c_h( uint8_t *src, int i_stride )
303 for( i = 0; i < 8; i++ )
309 for( j = 0; j < 8; j++ )
316 static void predict_8x8c_v( uint8_t *src, int i_stride )
320 for( i = 0; i < 8; i++ )
322 for( j = 0; j < 8; j++ )
324 src[i * i_stride +j] = src[j - i_stride];
329 static void predict_8x8c_p( uint8_t *src, int i_stride )
338 for( i = 0; i < 4; i++ )
340 H += ( i + 1 ) * ( src[4+i - i_stride] - src[2 - i -i_stride] );
341 V += ( i + 1 ) * ( src[-1 +(i+4)*i_stride] - src[-1+(2-i)*i_stride] );
344 a = 16 * ( src[-1+7*i_stride] + src[7 - i_stride] );
345 b = ( 17 * H + 16 ) >> 5;
346 c = ( 17 * V + 16 ) >> 5;
347 i00 = a -3*b -3*c + 16;
349 for( y = 0; y < 8; y++ )
351 for( x = 0; x < 8; x++ )
355 pix = (i00 +b*x) >> 5;
356 src[x] = clip_uint8( pix );
363 /****************************************************************************
364 * 4x4 prediction for intra luma block
365 ****************************************************************************/
366 static void predict_4x4_dc_128( uint8_t *src, int i_stride )
369 for( y = 0; y < 4; y++ )
371 for( x = 0; x < 4; x++ )
378 static void predict_4x4_dc_left( uint8_t *src, int i_stride )
381 int dc = ( src[-1+0*i_stride] + src[-1+i_stride]+
382 src[-1+2*i_stride] + src[-1+3*i_stride] + 2 ) >> 2;
384 for( y = 0; y < 4; y++ )
386 for( x = 0; x < 4; x++ )
393 static void predict_4x4_dc_top( uint8_t *src, int i_stride )
396 int dc = ( src[0 - i_stride] + src[1 - i_stride] +
397 src[2 - i_stride] + src[3 - i_stride] + 2 ) >> 2;
399 for( y = 0; y < 4; y++ )
401 for( x = 0; x < 4; x++ )
408 static void predict_4x4_dc( uint8_t *src, int i_stride )
411 int dc = ( src[-1+0*i_stride] + src[-1+i_stride]+
412 src[-1+2*i_stride] + src[-1+3*i_stride] +
413 src[0 - i_stride] + src[1 - i_stride] +
414 src[2 - i_stride] + src[3 - i_stride] + 4 ) >> 3;
416 for( y = 0; y < 4; y++ )
418 for( x = 0; x < 4; x++ )
425 static void predict_4x4_h( uint8_t *src, int i_stride )
429 for( i = 0; i < 4; i++ )
435 for( j = 0; j < 4; j++ )
442 static void predict_4x4_v( uint8_t *src, int i_stride )
446 for( i = 0; i < 4; i++ )
448 for( j = 0; j < 4; j++ )
450 src[i * i_stride +j] = src[j - i_stride];
455 #define PREDICT_4x4_LOAD_LEFT \
456 const int l0 = src[-1+0*i_stride]; \
457 const int l1 = src[-1+1*i_stride]; \
458 const int l2 = src[-1+2*i_stride]; \
459 const int l3 = src[-1+3*i_stride];
461 #define PREDICT_4x4_LOAD_TOP \
462 const int t0 = src[0-1*i_stride]; \
463 const int t1 = src[1-1*i_stride]; \
464 const int t2 = src[2-1*i_stride]; \
465 const int t3 = src[3-1*i_stride];
467 #define PREDICT_4x4_LOAD_TOP_RIGHT \
468 const int t4 = src[4-1*i_stride]; \
469 const int t5 = src[5-1*i_stride]; \
470 const int t6 = src[6-1*i_stride]; \
471 const int t7 = src[7-1*i_stride];
474 static void predict_4x4_ddl( uint8_t *src, int i_stride )
477 PREDICT_4x4_LOAD_TOP_RIGHT
479 src[0*i_stride+0] = ( t0 + 2*t1+ t2 + 2 ) >> 2;
482 src[1*i_stride+0] = ( t1 + 2*t2+ t3 + 2 ) >> 2;
486 src[2*i_stride+0] = ( t2 + 2*t3+ t4 + 2 ) >> 2;
491 src[3*i_stride+0] = ( t3 + 2*t4+ t5 + 2 ) >> 2;
495 src[3*i_stride+1] = ( t4 + 2*t5+ t6 + 2 ) >> 2;
498 src[3*i_stride+2] = ( t5 + 2*t6+ t7 + 2 ) >> 2;
500 src[3*i_stride+3] = ( t6 + 3 * t7 + 2 ) >> 2;
502 static void predict_4x4_ddr( uint8_t *src, int i_stride )
504 const int lt = src[-1-i_stride];
505 PREDICT_4x4_LOAD_LEFT
511 src[3*i_stride+3] = ( t0 + 2*lt +l0 + 2 ) >> 2;
515 src[2*i_stride+3] = ( lt + 2 * t0 + t1 + 2 ) >> 2;
518 src[1*i_stride+3] = ( t0 + 2 * t1 + t2 + 2 ) >> 2;
520 src[0*i_stride+3] = ( t1 + 2 * t2 + t3 + 2 ) >> 2;
524 src[3*i_stride+2] = ( lt + 2 * l0 + l1 + 2 ) >> 2;
527 src[3*i_stride+1] = ( l0 + 2 * l1 + l2 + 2 ) >> 2;
529 src[3*i_stride+0] = ( l1 + 2 * l2 + l3 + 2 ) >> 2;
532 static void predict_4x4_vr( uint8_t *src, int i_stride )
534 const int lt = src[-1-i_stride];
535 PREDICT_4x4_LOAD_LEFT
537 /* produce warning as l3 is unused */
540 src[2*i_stride+1]= ( lt + t0 + 1 ) >> 1;
543 src[2*i_stride+2]= ( t0 + t1 + 1 ) >> 1;
546 src[2*i_stride+3]= ( t1 + t2 + 1 ) >> 1;
548 src[0*i_stride+3]= ( t2 + t3 + 1 ) >> 1;
551 src[3*i_stride+1]= ( l0 + 2 * lt + t0 + 2 ) >> 2;
554 src[3*i_stride+2]= ( lt + 2 * t0 + t1 + 2 ) >> 2;
557 src[3*i_stride+3]= ( t0 + 2 * t1 + t2 + 2) >> 2;
559 src[1*i_stride+3]= ( t1 + 2 * t2 + t3 + 2 ) >> 2;
560 src[2*i_stride+0]= ( lt + 2 * l0 + l1 + 2 ) >> 2;
561 src[3*i_stride+0]= ( l0 + 2 * l1 + l2 + 2 ) >> 2;
564 static void predict_4x4_hd( uint8_t *src, int i_stride )
566 const int lt= src[-1-1*i_stride];
567 PREDICT_4x4_LOAD_LEFT
569 /* produce warning as t3 is unused */
572 src[1*i_stride+2]= ( lt + l0 + 1 ) >> 1;
574 src[1*i_stride+3]= ( l0 + 2 * lt + t0 + 2 ) >> 2;
575 src[0*i_stride+2]= ( lt + 2 * t0 + t1 + 2 ) >> 2;
576 src[0*i_stride+3]= ( t0 + 2 * t1 + t2 + 2 ) >> 2;
578 src[2*i_stride+2]= ( l0 + l1 + 1 ) >> 1;
580 src[2*i_stride+3]= ( lt + 2 * l0 + l1 + 2 ) >> 2;
582 src[3*i_stride+2]= ( l1 + l2+ 1 ) >> 1;
584 src[3*i_stride+3]= ( l0 + 2 * l1 + l2 + 2 ) >> 2;
585 src[3*i_stride+0]= ( l2 + l3 + 1 ) >> 1;
586 src[3*i_stride+1]= ( l1 + 2 * l2 + l3 + 2 ) >> 2;
589 static void predict_4x4_vl( uint8_t *src, int i_stride )
592 PREDICT_4x4_LOAD_TOP_RIGHT
593 /* produce warning as t7 is unused */
595 src[0*i_stride+0]= ( t0 + t1 + 1 ) >> 1;
597 src[2*i_stride+0]= ( t1 + t2 + 1 ) >> 1;
599 src[2*i_stride+1]= ( t2 + t3 + 1 ) >> 1;
601 src[2*i_stride+2]= ( t3 + t4+ 1 ) >> 1;
602 src[2*i_stride+3]= ( t4 + t5+ 1 ) >> 1;
603 src[1*i_stride+0]= ( t0 + 2 * t1 + t2 + 2 ) >> 2;
605 src[3*i_stride+0]= ( t1 + 2 * t2 + t3 + 2 ) >> 2;
607 src[3*i_stride+1]= ( t2 + 2 * t3 + t4 + 2 ) >> 2;
609 src[3*i_stride+2]= ( t3 + 2 * t4 + t5 + 2 ) >> 2;
610 src[3*i_stride+3]= ( t4 + 2 * t5 + t6 + 2 ) >> 2;
613 static void predict_4x4_hu( uint8_t *src, int i_stride )
615 PREDICT_4x4_LOAD_LEFT
617 src[0*i_stride+0]= ( l0 + l1 + 1 ) >> 1;
618 src[0*i_stride+1]= ( l0 + 2 * l1 + l2 + 2 ) >> 2;
621 src[1*i_stride+0]= ( l1 + l2 + 1 ) >> 1;
624 src[1*i_stride+1]= ( l1 + 2*l2 + l3 + 2 ) >> 2;
627 src[2*i_stride+0]= ( l2 + l3 + 1 ) >> 1;
630 src[2*i_stride+1]= ( l2 + 2 * l3 + l3 + 2 ) >> 2;
637 src[3*i_stride+3]= l3;
640 /****************************************************************************
641 * 8x8 prediction for intra luma block
642 ****************************************************************************/
644 #define SRC(x,y) src[(x)+(y)*i_stride]
646 const int l##y = (SRC(-1,y-1) + 2*SRC(-1,y) + SRC(-1,y+1) + 2) >> 2;
647 #define PREDICT_8x8_LOAD_LEFT \
648 const int l0 = ((i_neighbor&MB_TOPLEFT ? SRC(-1,-1) : SRC(-1,0)) \
649 + 2*SRC(-1,0) + SRC(-1,1) + 2) >> 2; \
650 PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) \
651 const int l7 = (SRC(-1,6) + 3*SRC(-1,7) + 2) >> 2;
654 const int t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
655 #define PREDICT_8x8_LOAD_TOP \
656 const int t0 = ((i_neighbor&MB_TOPLEFT ? SRC(-1,-1) : SRC(0,-1)) \
657 + 2*SRC(0,-1) + SRC(1,-1) + 2) >> 2; \
658 PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) \
659 const int t7 = ((i_neighbor&MB_TOPRIGHT ? SRC(8,-1) : SRC(7,-1)) \
660 + 2*SRC(7,-1) + SRC(6,-1) + 2) >> 2; \
663 t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
664 #define PREDICT_8x8_LOAD_TOPRIGHT \
665 int t8, t9, t10, t11, t12, t13, t14, t15; \
666 if(i_neighbor&MB_TOPRIGHT) { \
667 PTR(8) PTR(9) PTR(10) PTR(11) PTR(12) PTR(13) PTR(14) \
668 t15 = (SRC(14,-1) + 3*SRC(15,-1) + 2) >> 2; \
669 } else t8=t9=t10=t11=t12=t13=t14=t15= SRC(7,-1);
671 #define PREDICT_8x8_LOAD_TOPLEFT \
672 const int lt = (SRC(-1,0) + 2*SRC(-1,-1) + SRC(0,-1) + 2) >> 2;
674 #define PREDICT_8x8_DC(v) \
676 for( y = 0; y < 8; y++ ) { \
677 ((uint32_t*)src)[0] = \
678 ((uint32_t*)src)[1] = v; \
682 static void predict_8x8_dc_128( uint8_t *src, int i_stride, int i_neighbor )
684 PREDICT_8x8_DC(0x80808080);
686 static void predict_8x8_dc_left( uint8_t *src, int i_stride, int i_neighbor )
688 PREDICT_8x8_LOAD_LEFT
689 const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7+4) >> 3) * 0x01010101;
692 static void predict_8x8_dc_top( uint8_t *src, int i_stride, int i_neighbor )
695 const uint32_t dc = ((t0+t1+t2+t3+t4+t5+t6+t7+4) >> 3) * 0x01010101;
698 static void predict_8x8_dc( uint8_t *src, int i_stride, int i_neighbor )
700 PREDICT_8x8_LOAD_LEFT
702 const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7
703 +t0+t1+t2+t3+t4+t5+t6+t7+8) >> 4) * 0x01010101;
706 static void predict_8x8_h( uint8_t *src, int i_stride, int i_neighbor )
708 PREDICT_8x8_LOAD_LEFT
709 #define ROW(y) ((uint32_t*)(src+y*i_stride))[0] =\
710 ((uint32_t*)(src+y*i_stride))[1] = 0x01010101U * l##y
711 ROW(0); ROW(1); ROW(2); ROW(3); ROW(4); ROW(5); ROW(6); ROW(7);
714 static void predict_8x8_v( uint8_t *src, int i_stride, int i_neighbor )
717 PREDICT_8x8_LOAD_TOP;
726 for( y = 1; y < 8; y++ )
727 *(uint64_t*)(src+y*i_stride) = *(uint64_t*)src;
729 static void predict_8x8_ddl( uint8_t *src, int i_stride, int i_neighbor )
732 PREDICT_8x8_LOAD_TOPRIGHT
733 SRC(0,0)= (t0 + 2*t1 + t2 + 2) >> 2;
734 SRC(0,1)=SRC(1,0)= (t1 + 2*t2 + t3 + 2) >> 2;
735 SRC(0,2)=SRC(1,1)=SRC(2,0)= (t2 + 2*t3 + t4 + 2) >> 2;
736 SRC(0,3)=SRC(1,2)=SRC(2,1)=SRC(3,0)= (t3 + 2*t4 + t5 + 2) >> 2;
737 SRC(0,4)=SRC(1,3)=SRC(2,2)=SRC(3,1)=SRC(4,0)= (t4 + 2*t5 + t6 + 2) >> 2;
738 SRC(0,5)=SRC(1,4)=SRC(2,3)=SRC(3,2)=SRC(4,1)=SRC(5,0)= (t5 + 2*t6 + t7 + 2) >> 2;
739 SRC(0,6)=SRC(1,5)=SRC(2,4)=SRC(3,3)=SRC(4,2)=SRC(5,1)=SRC(6,0)= (t6 + 2*t7 + t8 + 2) >> 2;
740 SRC(0,7)=SRC(1,6)=SRC(2,5)=SRC(3,4)=SRC(4,3)=SRC(5,2)=SRC(6,1)=SRC(7,0)= (t7 + 2*t8 + t9 + 2) >> 2;
741 SRC(1,7)=SRC(2,6)=SRC(3,5)=SRC(4,4)=SRC(5,3)=SRC(6,2)=SRC(7,1)= (t8 + 2*t9 + t10 + 2) >> 2;
742 SRC(2,7)=SRC(3,6)=SRC(4,5)=SRC(5,4)=SRC(6,3)=SRC(7,2)= (t9 + 2*t10 + t11 + 2) >> 2;
743 SRC(3,7)=SRC(4,6)=SRC(5,5)=SRC(6,4)=SRC(7,3)= (t10 + 2*t11 + t12 + 2) >> 2;
744 SRC(4,7)=SRC(5,6)=SRC(6,5)=SRC(7,4)= (t11 + 2*t12 + t13 + 2) >> 2;
745 SRC(5,7)=SRC(6,6)=SRC(7,5)= (t12 + 2*t13 + t14 + 2) >> 2;
746 SRC(6,7)=SRC(7,6)= (t13 + 2*t14 + t15 + 2) >> 2;
747 SRC(7,7)= (t14 + 3*t15 + 2) >> 2;
749 static void predict_8x8_ddr( uint8_t *src, int i_stride, int i_neighbor )
752 PREDICT_8x8_LOAD_LEFT
753 PREDICT_8x8_LOAD_TOPLEFT
754 SRC(0,7)= (l7 + 2*l6 + l5 + 2) >> 2;
755 SRC(0,6)=SRC(1,7)= (l6 + 2*l5 + l4 + 2) >> 2;
756 SRC(0,5)=SRC(1,6)=SRC(2,7)= (l5 + 2*l4 + l3 + 2) >> 2;
757 SRC(0,4)=SRC(1,5)=SRC(2,6)=SRC(3,7)= (l4 + 2*l3 + l2 + 2) >> 2;
758 SRC(0,3)=SRC(1,4)=SRC(2,5)=SRC(3,6)=SRC(4,7)= (l3 + 2*l2 + l1 + 2) >> 2;
759 SRC(0,2)=SRC(1,3)=SRC(2,4)=SRC(3,5)=SRC(4,6)=SRC(5,7)= (l2 + 2*l1 + l0 + 2) >> 2;
760 SRC(0,1)=SRC(1,2)=SRC(2,3)=SRC(3,4)=SRC(4,5)=SRC(5,6)=SRC(6,7)= (l1 + 2*l0 + lt + 2) >> 2;
761 SRC(0,0)=SRC(1,1)=SRC(2,2)=SRC(3,3)=SRC(4,4)=SRC(5,5)=SRC(6,6)=SRC(7,7)= (l0 + 2*lt + t0 + 2) >> 2;
762 SRC(1,0)=SRC(2,1)=SRC(3,2)=SRC(4,3)=SRC(5,4)=SRC(6,5)=SRC(7,6)= (lt + 2*t0 + t1 + 2) >> 2;
763 SRC(2,0)=SRC(3,1)=SRC(4,2)=SRC(5,3)=SRC(6,4)=SRC(7,5)= (t0 + 2*t1 + t2 + 2) >> 2;
764 SRC(3,0)=SRC(4,1)=SRC(5,2)=SRC(6,3)=SRC(7,4)= (t1 + 2*t2 + t3 + 2) >> 2;
765 SRC(4,0)=SRC(5,1)=SRC(6,2)=SRC(7,3)= (t2 + 2*t3 + t4 + 2) >> 2;
766 SRC(5,0)=SRC(6,1)=SRC(7,2)= (t3 + 2*t4 + t5 + 2) >> 2;
767 SRC(6,0)=SRC(7,1)= (t4 + 2*t5 + t6 + 2) >> 2;
768 SRC(7,0)= (t5 + 2*t6 + t7 + 2) >> 2;
771 static void predict_8x8_vr( uint8_t *src, int i_stride, int i_neighbor )
774 PREDICT_8x8_LOAD_LEFT
775 PREDICT_8x8_LOAD_TOPLEFT
776 /* produce warning as l7 is unused */
777 SRC(0,6)= (l5 + 2*l4 + l3 + 2) >> 2;
778 SRC(0,7)= (l6 + 2*l5 + l4 + 2) >> 2;
779 SRC(0,4)=SRC(1,6)= (l3 + 2*l2 + l1 + 2) >> 2;
780 SRC(0,5)=SRC(1,7)= (l4 + 2*l3 + l2 + 2) >> 2;
781 SRC(0,2)=SRC(1,4)=SRC(2,6)= (l1 + 2*l0 + lt + 2) >> 2;
782 SRC(0,3)=SRC(1,5)=SRC(2,7)= (l2 + 2*l1 + l0 + 2) >> 2;
783 SRC(0,1)=SRC(1,3)=SRC(2,5)=SRC(3,7)= (l0 + 2*lt + t0 + 2) >> 2;
784 SRC(0,0)=SRC(1,2)=SRC(2,4)=SRC(3,6)= (lt + t0 + 1) >> 1;
785 SRC(1,1)=SRC(2,3)=SRC(3,5)=SRC(4,7)= (lt + 2*t0 + t1 + 2) >> 2;
786 SRC(1,0)=SRC(2,2)=SRC(3,4)=SRC(4,6)= (t0 + t1 + 1) >> 1;
787 SRC(2,1)=SRC(3,3)=SRC(4,5)=SRC(5,7)= (t0 + 2*t1 + t2 + 2) >> 2;
788 SRC(2,0)=SRC(3,2)=SRC(4,4)=SRC(5,6)= (t1 + t2 + 1) >> 1;
789 SRC(3,1)=SRC(4,3)=SRC(5,5)=SRC(6,7)= (t1 + 2*t2 + t3 + 2) >> 2;
790 SRC(3,0)=SRC(4,2)=SRC(5,4)=SRC(6,6)= (t2 + t3 + 1) >> 1;
791 SRC(4,1)=SRC(5,3)=SRC(6,5)=SRC(7,7)= (t2 + 2*t3 + t4 + 2) >> 2;
792 SRC(4,0)=SRC(5,2)=SRC(6,4)=SRC(7,6)= (t3 + t4 + 1) >> 1;
793 SRC(5,1)=SRC(6,3)=SRC(7,5)= (t3 + 2*t4 + t5 + 2) >> 2;
794 SRC(5,0)=SRC(6,2)=SRC(7,4)= (t4 + t5 + 1) >> 1;
795 SRC(6,1)=SRC(7,3)= (t4 + 2*t5 + t6 + 2) >> 2;
796 SRC(6,0)=SRC(7,2)= (t5 + t6 + 1) >> 1;
797 SRC(7,1)= (t5 + 2*t6 + t7 + 2) >> 2;
798 SRC(7,0)= (t6 + t7 + 1) >> 1;
800 static void predict_8x8_hd( uint8_t *src, int i_stride, int i_neighbor )
803 PREDICT_8x8_LOAD_LEFT
804 PREDICT_8x8_LOAD_TOPLEFT
805 /* produce warning as t7 is unused */
806 SRC(0,7)= (l6 + l7 + 1) >> 1;
807 SRC(1,7)= (l5 + 2*l6 + l7 + 2) >> 2;
808 SRC(0,6)=SRC(2,7)= (l5 + l6 + 1) >> 1;
809 SRC(1,6)=SRC(3,7)= (l4 + 2*l5 + l6 + 2) >> 2;
810 SRC(0,5)=SRC(2,6)=SRC(4,7)= (l4 + l5 + 1) >> 1;
811 SRC(1,5)=SRC(3,6)=SRC(5,7)= (l3 + 2*l4 + l5 + 2) >> 2;
812 SRC(0,4)=SRC(2,5)=SRC(4,6)=SRC(6,7)= (l3 + l4 + 1) >> 1;
813 SRC(1,4)=SRC(3,5)=SRC(5,6)=SRC(7,7)= (l2 + 2*l3 + l4 + 2) >> 2;
814 SRC(0,3)=SRC(2,4)=SRC(4,5)=SRC(6,6)= (l2 + l3 + 1) >> 1;
815 SRC(1,3)=SRC(3,4)=SRC(5,5)=SRC(7,6)= (l1 + 2*l2 + l3 + 2) >> 2;
816 SRC(0,2)=SRC(2,3)=SRC(4,4)=SRC(6,5)= (l1 + l2 + 1) >> 1;
817 SRC(1,2)=SRC(3,3)=SRC(5,4)=SRC(7,5)= (l0 + 2*l1 + l2 + 2) >> 2;
818 SRC(0,1)=SRC(2,2)=SRC(4,3)=SRC(6,4)= (l0 + l1 + 1) >> 1;
819 SRC(1,1)=SRC(3,2)=SRC(5,3)=SRC(7,4)= (lt + 2*l0 + l1 + 2) >> 2;
820 SRC(0,0)=SRC(2,1)=SRC(4,2)=SRC(6,3)= (lt + l0 + 1) >> 1;
821 SRC(1,0)=SRC(3,1)=SRC(5,2)=SRC(7,3)= (l0 + 2*lt + t0 + 2) >> 2;
822 SRC(2,0)=SRC(4,1)=SRC(6,2)= (t1 + 2*t0 + lt + 2) >> 2;
823 SRC(3,0)=SRC(5,1)=SRC(7,2)= (t2 + 2*t1 + t0 + 2) >> 2;
824 SRC(4,0)=SRC(6,1)= (t3 + 2*t2 + t1 + 2) >> 2;
825 SRC(5,0)=SRC(7,1)= (t4 + 2*t3 + t2 + 2) >> 2;
826 SRC(6,0)= (t5 + 2*t4 + t3 + 2) >> 2;
827 SRC(7,0)= (t6 + 2*t5 + t4 + 2) >> 2;
829 static void predict_8x8_vl( uint8_t *src, int i_stride, int i_neighbor )
832 PREDICT_8x8_LOAD_TOPRIGHT
833 SRC(0,0)= (t0 + t1 + 1) >> 1;
834 SRC(0,1)= (t0 + 2*t1 + t2 + 2) >> 2;
835 SRC(0,2)=SRC(1,0)= (t1 + t2 + 1) >> 1;
836 SRC(0,3)=SRC(1,1)= (t1 + 2*t2 + t3 + 2) >> 2;
837 SRC(0,4)=SRC(1,2)=SRC(2,0)= (t2 + t3 + 1) >> 1;
838 SRC(0,5)=SRC(1,3)=SRC(2,1)= (t2 + 2*t3 + t4 + 2) >> 2;
839 SRC(0,6)=SRC(1,4)=SRC(2,2)=SRC(3,0)= (t3 + t4 + 1) >> 1;
840 SRC(0,7)=SRC(1,5)=SRC(2,3)=SRC(3,1)= (t3 + 2*t4 + t5 + 2) >> 2;
841 SRC(1,6)=SRC(2,4)=SRC(3,2)=SRC(4,0)= (t4 + t5 + 1) >> 1;
842 SRC(1,7)=SRC(2,5)=SRC(3,3)=SRC(4,1)= (t4 + 2*t5 + t6 + 2) >> 2;
843 SRC(2,6)=SRC(3,4)=SRC(4,2)=SRC(5,0)= (t5 + t6 + 1) >> 1;
844 SRC(2,7)=SRC(3,5)=SRC(4,3)=SRC(5,1)= (t5 + 2*t6 + t7 + 2) >> 2;
845 SRC(3,6)=SRC(4,4)=SRC(5,2)=SRC(6,0)= (t6 + t7 + 1) >> 1;
846 SRC(3,7)=SRC(4,5)=SRC(5,3)=SRC(6,1)= (t6 + 2*t7 + t8 + 2) >> 2;
847 SRC(4,6)=SRC(5,4)=SRC(6,2)=SRC(7,0)= (t7 + t8 + 1) >> 1;
848 SRC(4,7)=SRC(5,5)=SRC(6,3)=SRC(7,1)= (t7 + 2*t8 + t9 + 2) >> 2;
849 SRC(5,6)=SRC(6,4)=SRC(7,2)= (t8 + t9 + 1) >> 1;
850 SRC(5,7)=SRC(6,5)=SRC(7,3)= (t8 + 2*t9 + t10 + 2) >> 2;
851 SRC(6,6)=SRC(7,4)= (t9 + t10 + 1) >> 1;
852 SRC(6,7)=SRC(7,5)= (t9 + 2*t10 + t11 + 2) >> 2;
853 SRC(7,6)= (t10 + t11 + 1) >> 1;
854 SRC(7,7)= (t10 + 2*t11 + t12 + 2) >> 2;
856 static void predict_8x8_hu( uint8_t *src, int i_stride, int i_neighbor )
858 PREDICT_8x8_LOAD_LEFT
859 SRC(0,0)= (l0 + l1 + 1) >> 1;
860 SRC(1,0)= (l0 + 2*l1 + l2 + 2) >> 2;
861 SRC(0,1)=SRC(2,0)= (l1 + l2 + 1) >> 1;
862 SRC(1,1)=SRC(3,0)= (l1 + 2*l2 + l3 + 2) >> 2;
863 SRC(0,2)=SRC(2,1)=SRC(4,0)= (l2 + l3 + 1) >> 1;
864 SRC(1,2)=SRC(3,1)=SRC(5,0)= (l2 + 2*l3 + l4 + 2) >> 2;
865 SRC(0,3)=SRC(2,2)=SRC(4,1)=SRC(6,0)= (l3 + l4 + 1) >> 1;
866 SRC(1,3)=SRC(3,2)=SRC(5,1)=SRC(7,0)= (l3 + 2*l4 + l5 + 2) >> 2;
867 SRC(0,4)=SRC(2,3)=SRC(4,2)=SRC(6,1)= (l4 + l5 + 1) >> 1;
868 SRC(1,4)=SRC(3,3)=SRC(5,2)=SRC(7,1)= (l4 + 2*l5 + l6 + 2) >> 2;
869 SRC(0,5)=SRC(2,4)=SRC(4,3)=SRC(6,2)= (l5 + l6 + 1) >> 1;
870 SRC(1,5)=SRC(3,4)=SRC(5,3)=SRC(7,2)= (l5 + 2*l6 + l7 + 2) >> 2;
871 SRC(0,6)=SRC(2,5)=SRC(4,4)=SRC(6,3)= (l6 + l7 + 1) >> 1;
872 SRC(1,6)=SRC(3,5)=SRC(5,4)=SRC(7,3)= (l6 + 3*l7 + 2) >> 2;
873 SRC(0,7)=SRC(1,7)=SRC(2,6)=SRC(2,7)=SRC(3,6)=
874 SRC(3,7)=SRC(4,5)=SRC(4,6)=SRC(4,7)=SRC(5,5)=
875 SRC(5,6)=SRC(5,7)=SRC(6,4)=SRC(6,5)=SRC(6,6)=
876 SRC(6,7)=SRC(7,4)=SRC(7,5)=SRC(7,6)=SRC(7,7)= l7;
879 /****************************************************************************
880 * Exported functions:
881 ****************************************************************************/
882 void x264_predict_16x16_init( int cpu, x264_predict_t pf[7] )
884 pf[I_PRED_16x16_V ] = predict_16x16_v;
885 pf[I_PRED_16x16_H ] = predict_16x16_h;
886 pf[I_PRED_16x16_DC] = predict_16x16_dc;
887 pf[I_PRED_16x16_P ] = predict_16x16_p;
888 pf[I_PRED_16x16_DC_LEFT]= predict_16x16_dc_left;
889 pf[I_PRED_16x16_DC_TOP ]= predict_16x16_dc_top;
890 pf[I_PRED_16x16_DC_128 ]= predict_16x16_dc_128;
893 if( cpu&X264_CPU_MMXEXT )
895 x264_predict_16x16_init_mmxext( pf );
900 void x264_predict_8x8c_init( int cpu, x264_predict_t pf[7] )
902 pf[I_PRED_CHROMA_V ] = predict_8x8c_v;
903 pf[I_PRED_CHROMA_H ] = predict_8x8c_h;
904 pf[I_PRED_CHROMA_DC] = predict_8x8c_dc;
905 pf[I_PRED_CHROMA_P ] = predict_8x8c_p;
906 pf[I_PRED_CHROMA_DC_LEFT]= predict_8x8c_dc_left;
907 pf[I_PRED_CHROMA_DC_TOP ]= predict_8x8c_dc_top;
908 pf[I_PRED_CHROMA_DC_128 ]= predict_8x8c_dc_128;
911 if( cpu&X264_CPU_MMXEXT )
913 x264_predict_8x8c_init_mmxext( pf );
918 void x264_predict_8x8_init( int cpu, x264_predict8x8_t pf[12] )
920 pf[I_PRED_8x8_V] = predict_8x8_v;
921 pf[I_PRED_8x8_H] = predict_8x8_h;
922 pf[I_PRED_8x8_DC] = predict_8x8_dc;
923 pf[I_PRED_8x8_DDL] = predict_8x8_ddl;
924 pf[I_PRED_8x8_DDR] = predict_8x8_ddr;
925 pf[I_PRED_8x8_VR] = predict_8x8_vr;
926 pf[I_PRED_8x8_HD] = predict_8x8_hd;
927 pf[I_PRED_8x8_VL] = predict_8x8_vl;
928 pf[I_PRED_8x8_HU] = predict_8x8_hu;
929 pf[I_PRED_8x8_DC_LEFT]= predict_8x8_dc_left;
930 pf[I_PRED_8x8_DC_TOP] = predict_8x8_dc_top;
931 pf[I_PRED_8x8_DC_128] = predict_8x8_dc_128;
934 void x264_predict_4x4_init( int cpu, x264_predict_t pf[12] )
936 pf[I_PRED_4x4_V] = predict_4x4_v;
937 pf[I_PRED_4x4_H] = predict_4x4_h;
938 pf[I_PRED_4x4_DC] = predict_4x4_dc;
939 pf[I_PRED_4x4_DDL] = predict_4x4_ddl;
940 pf[I_PRED_4x4_DDR] = predict_4x4_ddr;
941 pf[I_PRED_4x4_VR] = predict_4x4_vr;
942 pf[I_PRED_4x4_HD] = predict_4x4_hd;
943 pf[I_PRED_4x4_VL] = predict_4x4_vl;
944 pf[I_PRED_4x4_HU] = predict_4x4_hu;
945 pf[I_PRED_4x4_DC_LEFT]= predict_4x4_dc_left;
946 pf[I_PRED_4x4_DC_TOP] = predict_4x4_dc_top;
947 pf[I_PRED_4x4_DC_128] = predict_4x4_dc_128;
950 if( cpu&X264_CPU_MMXEXT )
952 x264_predict_4x4_init_mmxext( pf );