1 /*****************************************************************************
2 * predict.c: h264 encoder
3 *****************************************************************************
4 * Copyright (C) 2003 Laurent Aimar
5 * $Id: predict.c,v 1.1 2004/06/03 19:27:07 fenrir Exp $
7 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
22 *****************************************************************************/
24 /* XXX predict4x4 are inspired from ffmpeg h264 decoder
36 #include "macroblock.h"
39 #undef HAVE_MMXEXT /* not finished now */
42 # include "i386/predict.h"
45 static inline int clip_uint8( int a )
53 /****************************************************************************
54 * 16x16 prediction for intra block DC, H, V, P
55 ****************************************************************************/
56 static void predict_16x16_dc( uint8_t *src, int i_stride )
61 /* calculate DC value */
62 for( i = 0; i < 16; i++ )
64 dc += src[-1 + i * i_stride];
65 dc += src[i - i_stride];
67 dc = ( dc + 16 ) >> 5;
69 for( i = 0; i < 16; i++ )
71 for( j = 0; j < 16; j++ )
78 static void predict_16x16_dc_left( uint8_t *src, int i_stride )
83 for( i = 0; i < 16; i++ )
85 dc += src[-1 + i * i_stride];
89 for( i = 0; i < 16; i++ )
91 for( j = 0; j < 16; j++ )
98 static void predict_16x16_dc_top( uint8_t *src, int i_stride )
103 for( i = 0; i < 16; i++ )
105 dc += src[i - i_stride];
107 dc = ( dc + 8 ) >> 4;
109 for( i = 0; i < 16; i++ )
111 for( j = 0; j < 16; j++ )
118 static void predict_16x16_dc_128( uint8_t *src, int i_stride )
122 for( i = 0; i < 16; i++ )
124 for( j = 0; j < 16; j++ )
131 static void predict_16x16_h( uint8_t *src, int i_stride )
135 for( i = 0; i < 16; i++ )
140 for( j = 0; j < 16; j++ )
148 static void predict_16x16_v( uint8_t *src, int i_stride )
152 for( i = 0; i < 16; i++ )
154 for( j = 0; j < 16; j++ )
156 src[i * i_stride +j] = src[j - i_stride];
160 static void predict_16x16_p( uint8_t *src, int i_stride )
168 /* calcule H and V */
169 for( i = 0; i <= 7; i++ )
171 H += ( i + 1 ) * ( src[ 8 + i - i_stride ] - src[6 -i -i_stride] );
172 V += ( i + 1 ) * ( src[-1 + (8+i)*i_stride] - src[-1 + (6-i)*i_stride] );
175 a = 16 * ( src[-1 + 15*i_stride] + src[15 - i_stride] );
176 b = ( 5 * H + 32 ) >> 6;
177 c = ( 5 * V + 32 ) >> 6;
179 i00 = a - b * 7 - c * 7 + 16;
181 for( y = 0; y < 16; y++ )
183 for( x = 0; x < 16; x++ )
189 src[x] = clip_uint8( pix );
197 /****************************************************************************
198 * 8x8 prediction for intra chroma block DC, H, V, P
199 ****************************************************************************/
200 static void predict_8x8c_dc_128( uint8_t *src, int i_stride )
204 for( y = 0; y < 8; y++ )
206 for( x = 0; x < 8; x++ )
213 static void predict_8x8c_dc_left( uint8_t *src, int i_stride )
216 int dc0 = 0, dc1 = 0;
218 for( y = 0; y < 4; y++ )
220 dc0 += src[y * i_stride - 1];
221 dc1 += src[(y+4) * i_stride - 1];
223 dc0 = ( dc0 + 2 ) >> 2;
224 dc1 = ( dc1 + 2 ) >> 2;
226 for( y = 0; y < 4; y++ )
228 for( x = 0; x < 8; x++ )
231 src[4*i_stride+x] = dc1;
236 static void predict_8x8c_dc_top( uint8_t *src, int i_stride )
239 int dc0 = 0, dc1 = 0;
241 for( x = 0; x < 4; x++ )
243 dc0 += src[x - i_stride];
244 dc1 += src[x + 4 - i_stride];
246 dc0 = ( dc0 + 2 ) >> 2;
247 dc1 = ( dc1 + 2 ) >> 2;
249 for( y = 0; y < 8; y++ )
251 for( x = 0; x < 4; x++ )
259 static void predict_8x8c_dc( uint8_t *src, int i_stride )
262 int s0 = 0, s1 = 0, s2 = 0, s3 = 0;
263 int dc0, dc1, dc2, dc3;
271 for( i = 0; i < 4; i++ )
273 s0 += src[i - i_stride];
274 s1 += src[i + 4 - i_stride];
275 s2 += src[-1 + i * i_stride];
276 s3 += src[-1 + (i+4)*i_stride];
282 dc0 = ( s0 + s2 + 4 ) >> 3;
283 dc1 = ( s1 + 2 ) >> 2;
284 dc2 = ( s3 + 2 ) >> 2;
285 dc3 = ( s1 + s3 + 4 ) >> 3;
287 for( y = 0; y < 4; y++ )
289 for( x = 0; x < 4; x++ )
293 src[4*i_stride + x ] = dc2;
294 src[4*i_stride + x + 4] = dc3;
300 static void predict_8x8c_h( uint8_t *src, int i_stride )
304 for( i = 0; i < 8; i++ )
310 for( j = 0; j < 8; j++ )
317 static void predict_8x8c_v( uint8_t *src, int i_stride )
321 for( i = 0; i < 8; i++ )
323 for( j = 0; j < 8; j++ )
325 src[i * i_stride +j] = src[j - i_stride];
330 static void predict_8x8c_p( uint8_t *src, int i_stride )
339 for( i = 0; i < 4; i++ )
341 H += ( i + 1 ) * ( src[4+i - i_stride] - src[2 - i -i_stride] );
342 V += ( i + 1 ) * ( src[-1 +(i+4)*i_stride] - src[-1+(2-i)*i_stride] );
345 a = 16 * ( src[-1+7*i_stride] + src[7 - i_stride] );
346 b = ( 17 * H + 16 ) >> 5;
347 c = ( 17 * V + 16 ) >> 5;
348 i00 = a -3*b -3*c + 16;
350 for( y = 0; y < 8; y++ )
352 for( x = 0; x < 8; x++ )
356 pix = (i00 +b*x) >> 5;
357 src[x] = clip_uint8( pix );
364 /****************************************************************************
365 * 4x4 prediction for intra luma block
366 ****************************************************************************/
367 static void predict_4x4_dc_128( uint8_t *src, int i_stride )
370 for( y = 0; y < 4; y++ )
372 for( x = 0; x < 4; x++ )
379 static void predict_4x4_dc_left( uint8_t *src, int i_stride )
382 int dc = ( src[-1+0*i_stride] + src[-1+i_stride]+
383 src[-1+2*i_stride] + src[-1+3*i_stride] + 2 ) >> 2;
385 for( y = 0; y < 4; y++ )
387 for( x = 0; x < 4; x++ )
394 static void predict_4x4_dc_top( uint8_t *src, int i_stride )
397 int dc = ( src[0 - i_stride] + src[1 - i_stride] +
398 src[2 - i_stride] + src[3 - i_stride] + 2 ) >> 2;
400 for( y = 0; y < 4; y++ )
402 for( x = 0; x < 4; x++ )
409 static void predict_4x4_dc( uint8_t *src, int i_stride )
412 int dc = ( src[-1+0*i_stride] + src[-1+i_stride]+
413 src[-1+2*i_stride] + src[-1+3*i_stride] +
414 src[0 - i_stride] + src[1 - i_stride] +
415 src[2 - i_stride] + src[3 - i_stride] + 4 ) >> 3;
417 for( y = 0; y < 4; y++ )
419 for( x = 0; x < 4; x++ )
426 static void predict_4x4_h( uint8_t *src, int i_stride )
430 for( i = 0; i < 4; i++ )
436 for( j = 0; j < 4; j++ )
443 static void predict_4x4_v( uint8_t *src, int i_stride )
447 for( i = 0; i < 4; i++ )
449 for( j = 0; j < 4; j++ )
451 src[i * i_stride +j] = src[j - i_stride];
456 #define PREDICT_4x4_LOAD_LEFT \
457 const int l0 = src[-1+0*i_stride]; \
458 const int l1 = src[-1+1*i_stride]; \
459 const int l2 = src[-1+2*i_stride]; \
460 const int l3 = src[-1+3*i_stride];
462 #define PREDICT_4x4_LOAD_TOP \
463 const int t0 = src[0-1*i_stride]; \
464 const int t1 = src[1-1*i_stride]; \
465 const int t2 = src[2-1*i_stride]; \
466 const int t3 = src[3-1*i_stride];
468 #define PREDICT_4x4_LOAD_TOP_RIGHT \
469 const int t4 = src[4-1*i_stride]; \
470 const int t5 = src[5-1*i_stride]; \
471 const int t6 = src[6-1*i_stride]; \
472 const int t7 = src[7-1*i_stride];
475 static void predict_4x4_ddl( uint8_t *src, int i_stride )
478 PREDICT_4x4_LOAD_TOP_RIGHT
480 src[0*i_stride+0] = ( t0 + 2*t1+ t2 + 2 ) >> 2;
483 src[1*i_stride+0] = ( t1 + 2*t2+ t3 + 2 ) >> 2;
487 src[2*i_stride+0] = ( t2 + 2*t3+ t4 + 2 ) >> 2;
492 src[3*i_stride+0] = ( t3 + 2*t4+ t5 + 2 ) >> 2;
496 src[3*i_stride+1] = ( t4 + 2*t5+ t6 + 2 ) >> 2;
499 src[3*i_stride+2] = ( t5 + 2*t6+ t7 + 2 ) >> 2;
501 src[3*i_stride+3] = ( t6 + 3 * t7 + 2 ) >> 2;
503 static void predict_4x4_ddr( uint8_t *src, int i_stride )
505 const int lt = src[-1-i_stride];
506 PREDICT_4x4_LOAD_LEFT
512 src[3*i_stride+3] = ( t0 + 2*lt +l0 + 2 ) >> 2;
516 src[2*i_stride+3] = ( lt + 2 * t0 + t1 + 2 ) >> 2;
519 src[1*i_stride+3] = ( t0 + 2 * t1 + t2 + 2 ) >> 2;
521 src[0*i_stride+3] = ( t1 + 2 * t2 + t3 + 2 ) >> 2;
525 src[3*i_stride+2] = ( lt + 2 * l0 + l1 + 2 ) >> 2;
528 src[3*i_stride+1] = ( l0 + 2 * l1 + l2 + 2 ) >> 2;
530 src[3*i_stride+0] = ( l1 + 2 * l2 + l3 + 2 ) >> 2;
533 static void predict_4x4_vr( uint8_t *src, int i_stride )
535 const int lt = src[-1-i_stride];
536 PREDICT_4x4_LOAD_LEFT
538 /* produce warning as l3 is unused */
541 src[2*i_stride+1]= ( lt + t0 + 1 ) >> 1;
544 src[2*i_stride+2]= ( t0 + t1 + 1 ) >> 1;
547 src[2*i_stride+3]= ( t1 + t2 + 1 ) >> 1;
549 src[0*i_stride+3]= ( t2 + t3 + 1 ) >> 1;
552 src[3*i_stride+1]= ( l0 + 2 * lt + t0 + 2 ) >> 2;
555 src[3*i_stride+2]= ( lt + 2 * t0 + t1 + 2 ) >> 2;
558 src[3*i_stride+3]= ( t0 + 2 * t1 + t2 + 2) >> 2;
560 src[1*i_stride+3]= ( t1 + 2 * t2 + t3 + 2 ) >> 2;
561 src[2*i_stride+0]= ( lt + 2 * l0 + l1 + 2 ) >> 2;
562 src[3*i_stride+0]= ( l0 + 2 * l1 + l2 + 2 ) >> 2;
565 static void predict_4x4_hd( uint8_t *src, int i_stride )
567 const int lt= src[-1-1*i_stride];
568 PREDICT_4x4_LOAD_LEFT
570 /* produce warning as t3 is unused */
573 src[1*i_stride+2]= ( lt + l0 + 1 ) >> 1;
575 src[1*i_stride+3]= ( l0 + 2 * lt + t0 + 2 ) >> 2;
576 src[0*i_stride+2]= ( lt + 2 * t0 + t1 + 2 ) >> 2;
577 src[0*i_stride+3]= ( t0 + 2 * t1 + t2 + 2 ) >> 2;
579 src[2*i_stride+2]= ( l0 + l1 + 1 ) >> 1;
581 src[2*i_stride+3]= ( lt + 2 * l0 + l1 + 2 ) >> 2;
583 src[3*i_stride+2]= ( l1 + l2+ 1 ) >> 1;
585 src[3*i_stride+3]= ( l0 + 2 * l1 + l2 + 2 ) >> 2;
586 src[3*i_stride+0]= ( l2 + l3 + 1 ) >> 1;
587 src[3*i_stride+1]= ( l1 + 2 * l2 + l3 + 2 ) >> 2;
590 static void predict_4x4_vl( uint8_t *src, int i_stride )
593 PREDICT_4x4_LOAD_TOP_RIGHT
594 /* produce warning as t7 is unused */
596 src[0*i_stride+0]= ( t0 + t1 + 1 ) >> 1;
598 src[2*i_stride+0]= ( t1 + t2 + 1 ) >> 1;
600 src[2*i_stride+1]= ( t2 + t3 + 1 ) >> 1;
602 src[2*i_stride+2]= ( t3 + t4+ 1 ) >> 1;
603 src[2*i_stride+3]= ( t4 + t5+ 1 ) >> 1;
604 src[1*i_stride+0]= ( t0 + 2 * t1 + t2 + 2 ) >> 2;
606 src[3*i_stride+0]= ( t1 + 2 * t2 + t3 + 2 ) >> 2;
608 src[3*i_stride+1]= ( t2 + 2 * t3 + t4 + 2 ) >> 2;
610 src[3*i_stride+2]= ( t3 + 2 * t4 + t5 + 2 ) >> 2;
611 src[3*i_stride+3]= ( t4 + 2 * t5 + t6 + 2 ) >> 2;
614 static void predict_4x4_hu( uint8_t *src, int i_stride )
616 PREDICT_4x4_LOAD_LEFT
618 src[0*i_stride+0]= ( l0 + l1 + 1 ) >> 1;
619 src[0*i_stride+1]= ( l0 + 2 * l1 + l2 + 2 ) >> 2;
622 src[1*i_stride+0]= ( l1 + l2 + 1 ) >> 1;
625 src[1*i_stride+1]= ( l1 + 2*l2 + l3 + 2 ) >> 2;
628 src[2*i_stride+0]= ( l2 + l3 + 1 ) >> 1;
631 src[2*i_stride+1]= ( l2 + 2 * l3 + l3 + 2 ) >> 2;
638 src[3*i_stride+3]= l3;
641 /****************************************************************************
642 * 8x8 prediction for intra luma block
643 ****************************************************************************/
645 #define SRC(x,y) src[(x)+(y)*i_stride]
647 const int l##y = (SRC(-1,y-1) + 2*SRC(-1,y) + SRC(-1,y+1) + 2) >> 2;
648 #define PREDICT_8x8_LOAD_LEFT \
649 const int l0 = ((i_neighbor&MB_TOPLEFT ? SRC(-1,-1) : SRC(-1,0)) \
650 + 2*SRC(-1,0) + SRC(-1,1) + 2) >> 2; \
651 PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) \
652 const int l7 = (SRC(-1,6) + 3*SRC(-1,7) + 2) >> 2;
655 const int t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
656 #define PREDICT_8x8_LOAD_TOP \
657 const int t0 = ((i_neighbor&MB_TOPLEFT ? SRC(-1,-1) : SRC(0,-1)) \
658 + 2*SRC(0,-1) + SRC(1,-1) + 2) >> 2; \
659 PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) \
660 const int t7 = ((i_neighbor&MB_TOPRIGHT ? SRC(8,-1) : SRC(7,-1)) \
661 + 2*SRC(7,-1) + SRC(6,-1) + 2) >> 2; \
664 t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
665 #define PREDICT_8x8_LOAD_TOPRIGHT \
666 int t8, t9, t10, t11, t12, t13, t14, t15; \
667 if(i_neighbor&MB_TOPRIGHT) { \
668 PTR(8) PTR(9) PTR(10) PTR(11) PTR(12) PTR(13) PTR(14) \
669 t15 = (SRC(14,-1) + 3*SRC(15,-1) + 2) >> 2; \
670 } else t8=t9=t10=t11=t12=t13=t14=t15= SRC(7,-1);
672 #define PREDICT_8x8_LOAD_TOPLEFT \
673 const int lt = (SRC(-1,0) + 2*SRC(-1,-1) + SRC(0,-1) + 2) >> 2;
675 #define PREDICT_8x8_DC(v) \
677 for( y = 0; y < 8; y++ ) { \
678 ((uint32_t*)src)[0] = \
679 ((uint32_t*)src)[1] = v; \
683 static void predict_8x8_dc_128( uint8_t *src, int i_stride, int i_neighbor )
685 PREDICT_8x8_DC(0x80808080);
687 static void predict_8x8_dc_left( uint8_t *src, int i_stride, int i_neighbor )
689 PREDICT_8x8_LOAD_LEFT
690 const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7+4) >> 3) * 0x01010101;
693 static void predict_8x8_dc_top( uint8_t *src, int i_stride, int i_neighbor )
696 const uint32_t dc = ((t0+t1+t2+t3+t4+t5+t6+t7+4) >> 3) * 0x01010101;
699 static void predict_8x8_dc( uint8_t *src, int i_stride, int i_neighbor )
701 PREDICT_8x8_LOAD_LEFT
703 const uint32_t dc = ((l0+l1+l2+l3+l4+l5+l6+l7
704 +t0+t1+t2+t3+t4+t5+t6+t7+8) >> 4) * 0x01010101;
707 static void predict_8x8_h( uint8_t *src, int i_stride, int i_neighbor )
709 PREDICT_8x8_LOAD_LEFT
710 #define ROW(y) ((uint32_t*)(src+y*i_stride))[0] =\
711 ((uint32_t*)(src+y*i_stride))[1] = 0x01010101U * l##y
712 ROW(0); ROW(1); ROW(2); ROW(3); ROW(4); ROW(5); ROW(6); ROW(7);
715 static void predict_8x8_v( uint8_t *src, int i_stride, int i_neighbor )
718 PREDICT_8x8_LOAD_TOP;
727 for( y = 1; y < 8; y++ )
728 *(uint64_t*)(src+y*i_stride) = *(uint64_t*)src;
730 static void predict_8x8_ddl( uint8_t *src, int i_stride, int i_neighbor )
733 PREDICT_8x8_LOAD_TOPRIGHT
734 SRC(0,0)= (t0 + 2*t1 + t2 + 2) >> 2;
735 SRC(0,1)=SRC(1,0)= (t1 + 2*t2 + t3 + 2) >> 2;
736 SRC(0,2)=SRC(1,1)=SRC(2,0)= (t2 + 2*t3 + t4 + 2) >> 2;
737 SRC(0,3)=SRC(1,2)=SRC(2,1)=SRC(3,0)= (t3 + 2*t4 + t5 + 2) >> 2;
738 SRC(0,4)=SRC(1,3)=SRC(2,2)=SRC(3,1)=SRC(4,0)= (t4 + 2*t5 + t6 + 2) >> 2;
739 SRC(0,5)=SRC(1,4)=SRC(2,3)=SRC(3,2)=SRC(4,1)=SRC(5,0)= (t5 + 2*t6 + t7 + 2) >> 2;
740 SRC(0,6)=SRC(1,5)=SRC(2,4)=SRC(3,3)=SRC(4,2)=SRC(5,1)=SRC(6,0)= (t6 + 2*t7 + t8 + 2) >> 2;
741 SRC(0,7)=SRC(1,6)=SRC(2,5)=SRC(3,4)=SRC(4,3)=SRC(5,2)=SRC(6,1)=SRC(7,0)= (t7 + 2*t8 + t9 + 2) >> 2;
742 SRC(1,7)=SRC(2,6)=SRC(3,5)=SRC(4,4)=SRC(5,3)=SRC(6,2)=SRC(7,1)= (t8 + 2*t9 + t10 + 2) >> 2;
743 SRC(2,7)=SRC(3,6)=SRC(4,5)=SRC(5,4)=SRC(6,3)=SRC(7,2)= (t9 + 2*t10 + t11 + 2) >> 2;
744 SRC(3,7)=SRC(4,6)=SRC(5,5)=SRC(6,4)=SRC(7,3)= (t10 + 2*t11 + t12 + 2) >> 2;
745 SRC(4,7)=SRC(5,6)=SRC(6,5)=SRC(7,4)= (t11 + 2*t12 + t13 + 2) >> 2;
746 SRC(5,7)=SRC(6,6)=SRC(7,5)= (t12 + 2*t13 + t14 + 2) >> 2;
747 SRC(6,7)=SRC(7,6)= (t13 + 2*t14 + t15 + 2) >> 2;
748 SRC(7,7)= (t14 + 3*t15 + 2) >> 2;
750 static void predict_8x8_ddr( uint8_t *src, int i_stride, int i_neighbor )
753 PREDICT_8x8_LOAD_LEFT
754 PREDICT_8x8_LOAD_TOPLEFT
755 SRC(0,7)= (l7 + 2*l6 + l5 + 2) >> 2;
756 SRC(0,6)=SRC(1,7)= (l6 + 2*l5 + l4 + 2) >> 2;
757 SRC(0,5)=SRC(1,6)=SRC(2,7)= (l5 + 2*l4 + l3 + 2) >> 2;
758 SRC(0,4)=SRC(1,5)=SRC(2,6)=SRC(3,7)= (l4 + 2*l3 + l2 + 2) >> 2;
759 SRC(0,3)=SRC(1,4)=SRC(2,5)=SRC(3,6)=SRC(4,7)= (l3 + 2*l2 + l1 + 2) >> 2;
760 SRC(0,2)=SRC(1,3)=SRC(2,4)=SRC(3,5)=SRC(4,6)=SRC(5,7)= (l2 + 2*l1 + l0 + 2) >> 2;
761 SRC(0,1)=SRC(1,2)=SRC(2,3)=SRC(3,4)=SRC(4,5)=SRC(5,6)=SRC(6,7)= (l1 + 2*l0 + lt + 2) >> 2;
762 SRC(0,0)=SRC(1,1)=SRC(2,2)=SRC(3,3)=SRC(4,4)=SRC(5,5)=SRC(6,6)=SRC(7,7)= (l0 + 2*lt + t0 + 2) >> 2;
763 SRC(1,0)=SRC(2,1)=SRC(3,2)=SRC(4,3)=SRC(5,4)=SRC(6,5)=SRC(7,6)= (lt + 2*t0 + t1 + 2) >> 2;
764 SRC(2,0)=SRC(3,1)=SRC(4,2)=SRC(5,3)=SRC(6,4)=SRC(7,5)= (t0 + 2*t1 + t2 + 2) >> 2;
765 SRC(3,0)=SRC(4,1)=SRC(5,2)=SRC(6,3)=SRC(7,4)= (t1 + 2*t2 + t3 + 2) >> 2;
766 SRC(4,0)=SRC(5,1)=SRC(6,2)=SRC(7,3)= (t2 + 2*t3 + t4 + 2) >> 2;
767 SRC(5,0)=SRC(6,1)=SRC(7,2)= (t3 + 2*t4 + t5 + 2) >> 2;
768 SRC(6,0)=SRC(7,1)= (t4 + 2*t5 + t6 + 2) >> 2;
769 SRC(7,0)= (t5 + 2*t6 + t7 + 2) >> 2;
772 static void predict_8x8_vr( uint8_t *src, int i_stride, int i_neighbor )
775 PREDICT_8x8_LOAD_LEFT
776 PREDICT_8x8_LOAD_TOPLEFT
777 /* produce warning as l7 is unused */
778 SRC(0,6)= (l5 + 2*l4 + l3 + 2) >> 2;
779 SRC(0,7)= (l6 + 2*l5 + l4 + 2) >> 2;
780 SRC(0,4)=SRC(1,6)= (l3 + 2*l2 + l1 + 2) >> 2;
781 SRC(0,5)=SRC(1,7)= (l4 + 2*l3 + l2 + 2) >> 2;
782 SRC(0,2)=SRC(1,4)=SRC(2,6)= (l1 + 2*l0 + lt + 2) >> 2;
783 SRC(0,3)=SRC(1,5)=SRC(2,7)= (l2 + 2*l1 + l0 + 2) >> 2;
784 SRC(0,1)=SRC(1,3)=SRC(2,5)=SRC(3,7)= (l0 + 2*lt + t0 + 2) >> 2;
785 SRC(0,0)=SRC(1,2)=SRC(2,4)=SRC(3,6)= (lt + t0 + 1) >> 1;
786 SRC(1,1)=SRC(2,3)=SRC(3,5)=SRC(4,7)= (lt + 2*t0 + t1 + 2) >> 2;
787 SRC(1,0)=SRC(2,2)=SRC(3,4)=SRC(4,6)= (t0 + t1 + 1) >> 1;
788 SRC(2,1)=SRC(3,3)=SRC(4,5)=SRC(5,7)= (t0 + 2*t1 + t2 + 2) >> 2;
789 SRC(2,0)=SRC(3,2)=SRC(4,4)=SRC(5,6)= (t1 + t2 + 1) >> 1;
790 SRC(3,1)=SRC(4,3)=SRC(5,5)=SRC(6,7)= (t1 + 2*t2 + t3 + 2) >> 2;
791 SRC(3,0)=SRC(4,2)=SRC(5,4)=SRC(6,6)= (t2 + t3 + 1) >> 1;
792 SRC(4,1)=SRC(5,3)=SRC(6,5)=SRC(7,7)= (t2 + 2*t3 + t4 + 2) >> 2;
793 SRC(4,0)=SRC(5,2)=SRC(6,4)=SRC(7,6)= (t3 + t4 + 1) >> 1;
794 SRC(5,1)=SRC(6,3)=SRC(7,5)= (t3 + 2*t4 + t5 + 2) >> 2;
795 SRC(5,0)=SRC(6,2)=SRC(7,4)= (t4 + t5 + 1) >> 1;
796 SRC(6,1)=SRC(7,3)= (t4 + 2*t5 + t6 + 2) >> 2;
797 SRC(6,0)=SRC(7,2)= (t5 + t6 + 1) >> 1;
798 SRC(7,1)= (t5 + 2*t6 + t7 + 2) >> 2;
799 SRC(7,0)= (t6 + t7 + 1) >> 1;
801 static void predict_8x8_hd( uint8_t *src, int i_stride, int i_neighbor )
804 PREDICT_8x8_LOAD_LEFT
805 PREDICT_8x8_LOAD_TOPLEFT
806 /* produce warning as t7 is unused */
807 SRC(0,7)= (l6 + l7 + 1) >> 1;
808 SRC(1,7)= (l5 + 2*l6 + l7 + 2) >> 2;
809 SRC(0,6)=SRC(2,7)= (l5 + l6 + 1) >> 1;
810 SRC(1,6)=SRC(3,7)= (l4 + 2*l5 + l6 + 2) >> 2;
811 SRC(0,5)=SRC(2,6)=SRC(4,7)= (l4 + l5 + 1) >> 1;
812 SRC(1,5)=SRC(3,6)=SRC(5,7)= (l3 + 2*l4 + l5 + 2) >> 2;
813 SRC(0,4)=SRC(2,5)=SRC(4,6)=SRC(6,7)= (l3 + l4 + 1) >> 1;
814 SRC(1,4)=SRC(3,5)=SRC(5,6)=SRC(7,7)= (l2 + 2*l3 + l4 + 2) >> 2;
815 SRC(0,3)=SRC(2,4)=SRC(4,5)=SRC(6,6)= (l2 + l3 + 1) >> 1;
816 SRC(1,3)=SRC(3,4)=SRC(5,5)=SRC(7,6)= (l1 + 2*l2 + l3 + 2) >> 2;
817 SRC(0,2)=SRC(2,3)=SRC(4,4)=SRC(6,5)= (l1 + l2 + 1) >> 1;
818 SRC(1,2)=SRC(3,3)=SRC(5,4)=SRC(7,5)= (l0 + 2*l1 + l2 + 2) >> 2;
819 SRC(0,1)=SRC(2,2)=SRC(4,3)=SRC(6,4)= (l0 + l1 + 1) >> 1;
820 SRC(1,1)=SRC(3,2)=SRC(5,3)=SRC(7,4)= (lt + 2*l0 + l1 + 2) >> 2;
821 SRC(0,0)=SRC(2,1)=SRC(4,2)=SRC(6,3)= (lt + l0 + 1) >> 1;
822 SRC(1,0)=SRC(3,1)=SRC(5,2)=SRC(7,3)= (l0 + 2*lt + t0 + 2) >> 2;
823 SRC(2,0)=SRC(4,1)=SRC(6,2)= (t1 + 2*t0 + lt + 2) >> 2;
824 SRC(3,0)=SRC(5,1)=SRC(7,2)= (t2 + 2*t1 + t0 + 2) >> 2;
825 SRC(4,0)=SRC(6,1)= (t3 + 2*t2 + t1 + 2) >> 2;
826 SRC(5,0)=SRC(7,1)= (t4 + 2*t3 + t2 + 2) >> 2;
827 SRC(6,0)= (t5 + 2*t4 + t3 + 2) >> 2;
828 SRC(7,0)= (t6 + 2*t5 + t4 + 2) >> 2;
830 static void predict_8x8_vl( uint8_t *src, int i_stride, int i_neighbor )
833 PREDICT_8x8_LOAD_TOPRIGHT
834 SRC(0,0)= (t0 + t1 + 1) >> 1;
835 SRC(0,1)= (t0 + 2*t1 + t2 + 2) >> 2;
836 SRC(0,2)=SRC(1,0)= (t1 + t2 + 1) >> 1;
837 SRC(0,3)=SRC(1,1)= (t1 + 2*t2 + t3 + 2) >> 2;
838 SRC(0,4)=SRC(1,2)=SRC(2,0)= (t2 + t3 + 1) >> 1;
839 SRC(0,5)=SRC(1,3)=SRC(2,1)= (t2 + 2*t3 + t4 + 2) >> 2;
840 SRC(0,6)=SRC(1,4)=SRC(2,2)=SRC(3,0)= (t3 + t4 + 1) >> 1;
841 SRC(0,7)=SRC(1,5)=SRC(2,3)=SRC(3,1)= (t3 + 2*t4 + t5 + 2) >> 2;
842 SRC(1,6)=SRC(2,4)=SRC(3,2)=SRC(4,0)= (t4 + t5 + 1) >> 1;
843 SRC(1,7)=SRC(2,5)=SRC(3,3)=SRC(4,1)= (t4 + 2*t5 + t6 + 2) >> 2;
844 SRC(2,6)=SRC(3,4)=SRC(4,2)=SRC(5,0)= (t5 + t6 + 1) >> 1;
845 SRC(2,7)=SRC(3,5)=SRC(4,3)=SRC(5,1)= (t5 + 2*t6 + t7 + 2) >> 2;
846 SRC(3,6)=SRC(4,4)=SRC(5,2)=SRC(6,0)= (t6 + t7 + 1) >> 1;
847 SRC(3,7)=SRC(4,5)=SRC(5,3)=SRC(6,1)= (t6 + 2*t7 + t8 + 2) >> 2;
848 SRC(4,6)=SRC(5,4)=SRC(6,2)=SRC(7,0)= (t7 + t8 + 1) >> 1;
849 SRC(4,7)=SRC(5,5)=SRC(6,3)=SRC(7,1)= (t7 + 2*t8 + t9 + 2) >> 2;
850 SRC(5,6)=SRC(6,4)=SRC(7,2)= (t8 + t9 + 1) >> 1;
851 SRC(5,7)=SRC(6,5)=SRC(7,3)= (t8 + 2*t9 + t10 + 2) >> 2;
852 SRC(6,6)=SRC(7,4)= (t9 + t10 + 1) >> 1;
853 SRC(6,7)=SRC(7,5)= (t9 + 2*t10 + t11 + 2) >> 2;
854 SRC(7,6)= (t10 + t11 + 1) >> 1;
855 SRC(7,7)= (t10 + 2*t11 + t12 + 2) >> 2;
857 static void predict_8x8_hu( uint8_t *src, int i_stride, int i_neighbor )
859 PREDICT_8x8_LOAD_LEFT
860 SRC(0,0)= (l0 + l1 + 1) >> 1;
861 SRC(1,0)= (l0 + 2*l1 + l2 + 2) >> 2;
862 SRC(0,1)=SRC(2,0)= (l1 + l2 + 1) >> 1;
863 SRC(1,1)=SRC(3,0)= (l1 + 2*l2 + l3 + 2) >> 2;
864 SRC(0,2)=SRC(2,1)=SRC(4,0)= (l2 + l3 + 1) >> 1;
865 SRC(1,2)=SRC(3,1)=SRC(5,0)= (l2 + 2*l3 + l4 + 2) >> 2;
866 SRC(0,3)=SRC(2,2)=SRC(4,1)=SRC(6,0)= (l3 + l4 + 1) >> 1;
867 SRC(1,3)=SRC(3,2)=SRC(5,1)=SRC(7,0)= (l3 + 2*l4 + l5 + 2) >> 2;
868 SRC(0,4)=SRC(2,3)=SRC(4,2)=SRC(6,1)= (l4 + l5 + 1) >> 1;
869 SRC(1,4)=SRC(3,3)=SRC(5,2)=SRC(7,1)= (l4 + 2*l5 + l6 + 2) >> 2;
870 SRC(0,5)=SRC(2,4)=SRC(4,3)=SRC(6,2)= (l5 + l6 + 1) >> 1;
871 SRC(1,5)=SRC(3,4)=SRC(5,3)=SRC(7,2)= (l5 + 2*l6 + l7 + 2) >> 2;
872 SRC(0,6)=SRC(2,5)=SRC(4,4)=SRC(6,3)= (l6 + l7 + 1) >> 1;
873 SRC(1,6)=SRC(3,5)=SRC(5,4)=SRC(7,3)= (l6 + 3*l7 + 2) >> 2;
874 SRC(0,7)=SRC(1,7)=SRC(2,6)=SRC(2,7)=SRC(3,6)=
875 SRC(3,7)=SRC(4,5)=SRC(4,6)=SRC(4,7)=SRC(5,5)=
876 SRC(5,6)=SRC(5,7)=SRC(6,4)=SRC(6,5)=SRC(6,6)=
877 SRC(6,7)=SRC(7,4)=SRC(7,5)=SRC(7,6)=SRC(7,7)= l7;
880 /****************************************************************************
881 * Exported functions:
882 ****************************************************************************/
883 void x264_predict_16x16_init( int cpu, x264_predict_t pf[7] )
885 pf[I_PRED_16x16_V ] = predict_16x16_v;
886 pf[I_PRED_16x16_H ] = predict_16x16_h;
887 pf[I_PRED_16x16_DC] = predict_16x16_dc;
888 pf[I_PRED_16x16_P ] = predict_16x16_p;
889 pf[I_PRED_16x16_DC_LEFT]= predict_16x16_dc_left;
890 pf[I_PRED_16x16_DC_TOP ]= predict_16x16_dc_top;
891 pf[I_PRED_16x16_DC_128 ]= predict_16x16_dc_128;
894 if( cpu&X264_CPU_MMXEXT )
896 x264_predict_16x16_init_mmxext( pf );
901 void x264_predict_8x8c_init( int cpu, x264_predict_t pf[7] )
903 pf[I_PRED_CHROMA_V ] = predict_8x8c_v;
904 pf[I_PRED_CHROMA_H ] = predict_8x8c_h;
905 pf[I_PRED_CHROMA_DC] = predict_8x8c_dc;
906 pf[I_PRED_CHROMA_P ] = predict_8x8c_p;
907 pf[I_PRED_CHROMA_DC_LEFT]= predict_8x8c_dc_left;
908 pf[I_PRED_CHROMA_DC_TOP ]= predict_8x8c_dc_top;
909 pf[I_PRED_CHROMA_DC_128 ]= predict_8x8c_dc_128;
912 if( cpu&X264_CPU_MMXEXT )
914 x264_predict_8x8c_init_mmxext( pf );
919 void x264_predict_8x8_init( int cpu, x264_predict8x8_t pf[12] )
921 pf[I_PRED_8x8_V] = predict_8x8_v;
922 pf[I_PRED_8x8_H] = predict_8x8_h;
923 pf[I_PRED_8x8_DC] = predict_8x8_dc;
924 pf[I_PRED_8x8_DDL] = predict_8x8_ddl;
925 pf[I_PRED_8x8_DDR] = predict_8x8_ddr;
926 pf[I_PRED_8x8_VR] = predict_8x8_vr;
927 pf[I_PRED_8x8_HD] = predict_8x8_hd;
928 pf[I_PRED_8x8_VL] = predict_8x8_vl;
929 pf[I_PRED_8x8_HU] = predict_8x8_hu;
930 pf[I_PRED_8x8_DC_LEFT]= predict_8x8_dc_left;
931 pf[I_PRED_8x8_DC_TOP] = predict_8x8_dc_top;
932 pf[I_PRED_8x8_DC_128] = predict_8x8_dc_128;
935 void x264_predict_4x4_init( int cpu, x264_predict_t pf[12] )
937 pf[I_PRED_4x4_V] = predict_4x4_v;
938 pf[I_PRED_4x4_H] = predict_4x4_h;
939 pf[I_PRED_4x4_DC] = predict_4x4_dc;
940 pf[I_PRED_4x4_DDL] = predict_4x4_ddl;
941 pf[I_PRED_4x4_DDR] = predict_4x4_ddr;
942 pf[I_PRED_4x4_VR] = predict_4x4_vr;
943 pf[I_PRED_4x4_HD] = predict_4x4_hd;
944 pf[I_PRED_4x4_VL] = predict_4x4_vl;
945 pf[I_PRED_4x4_HU] = predict_4x4_hu;
946 pf[I_PRED_4x4_DC_LEFT]= predict_4x4_dc_left;
947 pf[I_PRED_4x4_DC_TOP] = predict_4x4_dc_top;
948 pf[I_PRED_4x4_DC_128] = predict_4x4_dc_128;
951 if( cpu&X264_CPU_MMXEXT )
953 x264_predict_4x4_init_mmxext( pf );