1 /*****************************************************************************
2 * predict.c: intra prediction
3 *****************************************************************************
4 * Copyright (C) 2003-2015 x264 project
6 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7 * Loren Merritt <lorenm@u.washington.edu>
8 * Fiona Glaser <fiona@x264.com>
9 * Henrik Gramner <henrik@gramner.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
25 * This program is also available under a commercial proprietary license.
26 * For more information, contact us at licensing@x264.com.
27 *****************************************************************************/
29 /* predict4x4 are inspired from ffmpeg h264 decoder */
35 # include "x86/predict.h"
38 # include "ppc/predict.h"
41 # include "arm/predict.h"
44 # include "aarch64/predict.h"
47 # include "mips/predict.h"
50 /****************************************************************************
51 * 16x16 prediction for intra luma block
52 ****************************************************************************/
54 #define PREDICT_16x16_DC(v)\
55 for( int i = 0; i < 16; i++ )\
57 MPIXEL_X4( src+ 0 ) = v;\
58 MPIXEL_X4( src+ 4 ) = v;\
59 MPIXEL_X4( src+ 8 ) = v;\
60 MPIXEL_X4( src+12 ) = v;\
64 void x264_predict_16x16_dc_c( pixel *src )
68 for( int i = 0; i < 16; i++ )
70 dc += src[-1 + i * FDEC_STRIDE];
71 dc += src[i - FDEC_STRIDE];
73 pixel4 dcsplat = PIXEL_SPLAT_X4( ( dc + 16 ) >> 5 );
75 PREDICT_16x16_DC( dcsplat );
77 static void x264_predict_16x16_dc_left_c( pixel *src )
81 for( int i = 0; i < 16; i++ )
82 dc += src[-1 + i * FDEC_STRIDE];
83 pixel4 dcsplat = PIXEL_SPLAT_X4( ( dc + 8 ) >> 4 );
85 PREDICT_16x16_DC( dcsplat );
87 static void x264_predict_16x16_dc_top_c( pixel *src )
91 for( int i = 0; i < 16; i++ )
92 dc += src[i - FDEC_STRIDE];
93 pixel4 dcsplat = PIXEL_SPLAT_X4( ( dc + 8 ) >> 4 );
95 PREDICT_16x16_DC( dcsplat );
97 static void x264_predict_16x16_dc_128_c( pixel *src )
99 PREDICT_16x16_DC( PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) ) );
101 void x264_predict_16x16_h_c( pixel *src )
103 for( int i = 0; i < 16; i++ )
105 const pixel4 v = PIXEL_SPLAT_X4( src[-1] );
106 MPIXEL_X4( src+ 0 ) = v;
107 MPIXEL_X4( src+ 4 ) = v;
108 MPIXEL_X4( src+ 8 ) = v;
109 MPIXEL_X4( src+12 ) = v;
113 void x264_predict_16x16_v_c( pixel *src )
115 pixel4 v0 = MPIXEL_X4( &src[ 0-FDEC_STRIDE] );
116 pixel4 v1 = MPIXEL_X4( &src[ 4-FDEC_STRIDE] );
117 pixel4 v2 = MPIXEL_X4( &src[ 8-FDEC_STRIDE] );
118 pixel4 v3 = MPIXEL_X4( &src[12-FDEC_STRIDE] );
120 for( int i = 0; i < 16; i++ )
122 MPIXEL_X4( src+ 0 ) = v0;
123 MPIXEL_X4( src+ 4 ) = v1;
124 MPIXEL_X4( src+ 8 ) = v2;
125 MPIXEL_X4( src+12 ) = v3;
129 void x264_predict_16x16_p_c( pixel *src )
133 /* calculate H and V */
134 for( int i = 0; i <= 7; i++ )
136 H += ( i + 1 ) * ( src[ 8 + i - FDEC_STRIDE ] - src[6 -i -FDEC_STRIDE] );
137 V += ( i + 1 ) * ( src[-1 + (8+i)*FDEC_STRIDE] - src[-1 + (6-i)*FDEC_STRIDE] );
140 int a = 16 * ( src[-1 + 15*FDEC_STRIDE] + src[15 - FDEC_STRIDE] );
141 int b = ( 5 * H + 32 ) >> 6;
142 int c = ( 5 * V + 32 ) >> 6;
144 int i00 = a - b * 7 - c * 7 + 16;
146 for( int y = 0; y < 16; y++ )
149 for( int x = 0; x < 16; x++ )
151 src[x] = x264_clip_pixel( pix>>5 );
160 /****************************************************************************
161 * 8x8 prediction for intra chroma block (4:2:0)
162 ****************************************************************************/
164 static void x264_predict_8x8c_dc_128_c( pixel *src )
166 for( int y = 0; y < 8; y++ )
168 MPIXEL_X4( src+0 ) = PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) );
169 MPIXEL_X4( src+4 ) = PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) );
173 static void x264_predict_8x8c_dc_left_c( pixel *src )
175 int dc0 = 0, dc1 = 0;
177 for( int y = 0; y < 4; y++ )
179 dc0 += src[y * FDEC_STRIDE - 1];
180 dc1 += src[(y+4) * FDEC_STRIDE - 1];
182 pixel4 dc0splat = PIXEL_SPLAT_X4( ( dc0 + 2 ) >> 2 );
183 pixel4 dc1splat = PIXEL_SPLAT_X4( ( dc1 + 2 ) >> 2 );
185 for( int y = 0; y < 4; y++ )
187 MPIXEL_X4( src+0 ) = dc0splat;
188 MPIXEL_X4( src+4 ) = dc0splat;
191 for( int y = 0; y < 4; y++ )
193 MPIXEL_X4( src+0 ) = dc1splat;
194 MPIXEL_X4( src+4 ) = dc1splat;
199 static void x264_predict_8x8c_dc_top_c( pixel *src )
201 int dc0 = 0, dc1 = 0;
203 for( int x = 0; x < 4; x++ )
205 dc0 += src[x - FDEC_STRIDE];
206 dc1 += src[x + 4 - FDEC_STRIDE];
208 pixel4 dc0splat = PIXEL_SPLAT_X4( ( dc0 + 2 ) >> 2 );
209 pixel4 dc1splat = PIXEL_SPLAT_X4( ( dc1 + 2 ) >> 2 );
211 for( int y = 0; y < 8; y++ )
213 MPIXEL_X4( src+0 ) = dc0splat;
214 MPIXEL_X4( src+4 ) = dc1splat;
218 void x264_predict_8x8c_dc_c( pixel *src )
220 int s0 = 0, s1 = 0, s2 = 0, s3 = 0;
227 for( int i = 0; i < 4; i++ )
229 s0 += src[i - FDEC_STRIDE];
230 s1 += src[i + 4 - FDEC_STRIDE];
231 s2 += src[-1 + i * FDEC_STRIDE];
232 s3 += src[-1 + (i+4)*FDEC_STRIDE];
238 pixel4 dc0 = PIXEL_SPLAT_X4( ( s0 + s2 + 4 ) >> 3 );
239 pixel4 dc1 = PIXEL_SPLAT_X4( ( s1 + 2 ) >> 2 );
240 pixel4 dc2 = PIXEL_SPLAT_X4( ( s3 + 2 ) >> 2 );
241 pixel4 dc3 = PIXEL_SPLAT_X4( ( s1 + s3 + 4 ) >> 3 );
243 for( int y = 0; y < 4; y++ )
245 MPIXEL_X4( src+0 ) = dc0;
246 MPIXEL_X4( src+4 ) = dc1;
250 for( int y = 0; y < 4; y++ )
252 MPIXEL_X4( src+0 ) = dc2;
253 MPIXEL_X4( src+4 ) = dc3;
257 void x264_predict_8x8c_h_c( pixel *src )
259 for( int i = 0; i < 8; i++ )
261 pixel4 v = PIXEL_SPLAT_X4( src[-1] );
262 MPIXEL_X4( src+0 ) = v;
263 MPIXEL_X4( src+4 ) = v;
267 void x264_predict_8x8c_v_c( pixel *src )
269 pixel4 v0 = MPIXEL_X4( src+0-FDEC_STRIDE );
270 pixel4 v1 = MPIXEL_X4( src+4-FDEC_STRIDE );
272 for( int i = 0; i < 8; i++ )
274 MPIXEL_X4( src+0 ) = v0;
275 MPIXEL_X4( src+4 ) = v1;
279 void x264_predict_8x8c_p_c( pixel *src )
283 for( int i = 0; i < 4; i++ )
285 H += ( i + 1 ) * ( src[4+i - FDEC_STRIDE] - src[2 - i -FDEC_STRIDE] );
286 V += ( i + 1 ) * ( src[-1 +(i+4)*FDEC_STRIDE] - src[-1+(2-i)*FDEC_STRIDE] );
289 int a = 16 * ( src[-1+7*FDEC_STRIDE] + src[7 - FDEC_STRIDE] );
290 int b = ( 17 * H + 16 ) >> 5;
291 int c = ( 17 * V + 16 ) >> 5;
292 int i00 = a -3*b -3*c + 16;
294 for( int y = 0; y < 8; y++ )
297 for( int x = 0; x < 8; x++ )
299 src[x] = x264_clip_pixel( pix>>5 );
307 /****************************************************************************
308 * 8x16 prediction for intra chroma block (4:2:2)
309 ****************************************************************************/
311 static void x264_predict_8x16c_dc_128_c( pixel *src )
313 for( int y = 0; y < 16; y++ )
315 MPIXEL_X4( src+0 ) = PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) );
316 MPIXEL_X4( src+4 ) = PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) );
320 static void x264_predict_8x16c_dc_left_c( pixel *src )
322 for( int i = 0; i < 4; i++ )
326 for( int y = 0; y < 4; y++ )
327 dc += src[y*FDEC_STRIDE - 1];
329 pixel4 dcsplat = PIXEL_SPLAT_X4( (dc + 2) >> 2 );
331 for( int y = 0; y < 4; y++ )
333 MPIXEL_X4( src+0 ) = dcsplat;
334 MPIXEL_X4( src+4 ) = dcsplat;
339 static void x264_predict_8x16c_dc_top_c( pixel *src )
341 int dc0 = 0, dc1 = 0;
343 for(int x = 0; x < 4; x++ )
345 dc0 += src[x - FDEC_STRIDE];
346 dc1 += src[x + 4 - FDEC_STRIDE];
348 pixel4 dc0splat = PIXEL_SPLAT_X4( ( dc0 + 2 ) >> 2 );
349 pixel4 dc1splat = PIXEL_SPLAT_X4( ( dc1 + 2 ) >> 2 );
351 for( int y = 0; y < 16; y++ )
353 MPIXEL_X4( src+0 ) = dc0splat;
354 MPIXEL_X4( src+4 ) = dc1splat;
358 void x264_predict_8x16c_dc_c( pixel *src )
360 int s0 = 0, s1 = 0, s2 = 0, s3 = 0, s4 = 0, s5 = 0;
369 for( int i = 0; i < 4; i++ )
371 s0 += src[i+0 - FDEC_STRIDE];
372 s1 += src[i+4 - FDEC_STRIDE];
373 s2 += src[-1 + (i+0) * FDEC_STRIDE];
374 s3 += src[-1 + (i+4) * FDEC_STRIDE];
375 s4 += src[-1 + (i+8) * FDEC_STRIDE];
376 s5 += src[-1 + (i+12) * FDEC_STRIDE];
384 pixel4 dc0 = PIXEL_SPLAT_X4( ( s0 + s2 + 4 ) >> 3 );
385 pixel4 dc1 = PIXEL_SPLAT_X4( ( s1 + 2 ) >> 2 );
386 pixel4 dc2 = PIXEL_SPLAT_X4( ( s3 + 2 ) >> 2 );
387 pixel4 dc3 = PIXEL_SPLAT_X4( ( s1 + s3 + 4 ) >> 3 );
388 pixel4 dc4 = PIXEL_SPLAT_X4( ( s4 + 2 ) >> 2 );
389 pixel4 dc5 = PIXEL_SPLAT_X4( ( s1 + s4 + 4 ) >> 3 );
390 pixel4 dc6 = PIXEL_SPLAT_X4( ( s5 + 2 ) >> 2 );
391 pixel4 dc7 = PIXEL_SPLAT_X4( ( s1 + s5 + 4 ) >> 3 );
393 for( int y = 0; y < 4; y++ )
395 MPIXEL_X4( src+0 ) = dc0;
396 MPIXEL_X4( src+4 ) = dc1;
399 for( int y = 0; y < 4; y++ )
401 MPIXEL_X4( src+0 ) = dc2;
402 MPIXEL_X4( src+4 ) = dc3;
405 for( int y = 0; y < 4; y++ )
407 MPIXEL_X4( src+0 ) = dc4;
408 MPIXEL_X4( src+4 ) = dc5;
411 for( int y = 0; y < 4; y++ )
413 MPIXEL_X4( src+0 ) = dc6;
414 MPIXEL_X4( src+4 ) = dc7;
418 void x264_predict_8x16c_h_c( pixel *src )
420 for( int i = 0; i < 16; i++ )
422 pixel4 v = PIXEL_SPLAT_X4( src[-1] );
423 MPIXEL_X4( src+0 ) = v;
424 MPIXEL_X4( src+4 ) = v;
428 void x264_predict_8x16c_v_c( pixel *src )
430 pixel4 v0 = MPIXEL_X4( src+0-FDEC_STRIDE );
431 pixel4 v1 = MPIXEL_X4( src+4-FDEC_STRIDE );
433 for( int i = 0; i < 16; i++ )
435 MPIXEL_X4( src+0 ) = v0;
436 MPIXEL_X4( src+4 ) = v1;
440 void x264_predict_8x16c_p_c( pixel *src )
445 for( int i = 0; i < 4; i++ )
446 H += ( i + 1 ) * ( src[4 + i - FDEC_STRIDE] - src[2 - i - FDEC_STRIDE] );
447 for( int i = 0; i < 8; i++ )
448 V += ( i + 1 ) * ( src[-1 + (i+8)*FDEC_STRIDE] - src[-1 + (6-i)*FDEC_STRIDE] );
450 int a = 16 * ( src[-1 + 15*FDEC_STRIDE] + src[7 - FDEC_STRIDE] );
451 int b = ( 17 * H + 16 ) >> 5;
452 int c = ( 5 * V + 32 ) >> 6;
453 int i00 = a -3*b -7*c + 16;
455 for( int y = 0; y < 16; y++ )
458 for( int x = 0; x < 8; x++ )
460 src[x] = x264_clip_pixel( pix>>5 );
468 /****************************************************************************
469 * 4x4 prediction for intra luma block
470 ****************************************************************************/
472 #define SRC(x,y) src[(x)+(y)*FDEC_STRIDE]
473 #define SRC_X4(x,y) MPIXEL_X4( &SRC(x,y) )
475 #define PREDICT_4x4_DC(v)\
476 SRC_X4(0,0) = SRC_X4(0,1) = SRC_X4(0,2) = SRC_X4(0,3) = v;
478 static void x264_predict_4x4_dc_128_c( pixel *src )
480 PREDICT_4x4_DC( PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) ) );
482 static void x264_predict_4x4_dc_left_c( pixel *src )
484 pixel4 dc = PIXEL_SPLAT_X4( (SRC(-1,0) + SRC(-1,1) + SRC(-1,2) + SRC(-1,3) + 2) >> 2 );
485 PREDICT_4x4_DC( dc );
487 static void x264_predict_4x4_dc_top_c( pixel *src )
489 pixel4 dc = PIXEL_SPLAT_X4( (SRC(0,-1) + SRC(1,-1) + SRC(2,-1) + SRC(3,-1) + 2) >> 2 );
490 PREDICT_4x4_DC( dc );
492 void x264_predict_4x4_dc_c( pixel *src )
494 pixel4 dc = PIXEL_SPLAT_X4( (SRC(-1,0) + SRC(-1,1) + SRC(-1,2) + SRC(-1,3) +
495 SRC(0,-1) + SRC(1,-1) + SRC(2,-1) + SRC(3,-1) + 4) >> 3 );
496 PREDICT_4x4_DC( dc );
498 void x264_predict_4x4_h_c( pixel *src )
500 SRC_X4(0,0) = PIXEL_SPLAT_X4( SRC(-1,0) );
501 SRC_X4(0,1) = PIXEL_SPLAT_X4( SRC(-1,1) );
502 SRC_X4(0,2) = PIXEL_SPLAT_X4( SRC(-1,2) );
503 SRC_X4(0,3) = PIXEL_SPLAT_X4( SRC(-1,3) );
505 void x264_predict_4x4_v_c( pixel *src )
507 PREDICT_4x4_DC(SRC_X4(0,-1));
510 #define PREDICT_4x4_LOAD_LEFT\
514 UNUSED int l3 = SRC(-1,3);
516 #define PREDICT_4x4_LOAD_TOP\
520 UNUSED int t3 = SRC(3,-1);
522 #define PREDICT_4x4_LOAD_TOP_RIGHT\
526 UNUSED int t7 = SRC(7,-1);
528 #define F1(a,b) (((a)+(b)+1)>>1)
529 #define F2(a,b,c) (((a)+2*(b)+(c)+2)>>2)
531 static void x264_predict_4x4_ddl_c( pixel *src )
534 PREDICT_4x4_LOAD_TOP_RIGHT
535 SRC(0,0)= F2(t0,t1,t2);
536 SRC(1,0)=SRC(0,1)= F2(t1,t2,t3);
537 SRC(2,0)=SRC(1,1)=SRC(0,2)= F2(t2,t3,t4);
538 SRC(3,0)=SRC(2,1)=SRC(1,2)=SRC(0,3)= F2(t3,t4,t5);
539 SRC(3,1)=SRC(2,2)=SRC(1,3)= F2(t4,t5,t6);
540 SRC(3,2)=SRC(2,3)= F2(t5,t6,t7);
541 SRC(3,3)= F2(t6,t7,t7);
543 static void x264_predict_4x4_ddr_c( pixel *src )
546 PREDICT_4x4_LOAD_LEFT
548 SRC(3,0)= F2(t3,t2,t1);
549 SRC(2,0)=SRC(3,1)= F2(t2,t1,t0);
550 SRC(1,0)=SRC(2,1)=SRC(3,2)= F2(t1,t0,lt);
551 SRC(0,0)=SRC(1,1)=SRC(2,2)=SRC(3,3)= F2(t0,lt,l0);
552 SRC(0,1)=SRC(1,2)=SRC(2,3)= F2(lt,l0,l1);
553 SRC(0,2)=SRC(1,3)= F2(l0,l1,l2);
554 SRC(0,3)= F2(l1,l2,l3);
557 static void x264_predict_4x4_vr_c( pixel *src )
560 PREDICT_4x4_LOAD_LEFT
562 SRC(0,3)= F2(l2,l1,l0);
563 SRC(0,2)= F2(l1,l0,lt);
564 SRC(0,1)=SRC(1,3)= F2(l0,lt,t0);
565 SRC(0,0)=SRC(1,2)= F1(lt,t0);
566 SRC(1,1)=SRC(2,3)= F2(lt,t0,t1);
567 SRC(1,0)=SRC(2,2)= F1(t0,t1);
568 SRC(2,1)=SRC(3,3)= F2(t0,t1,t2);
569 SRC(2,0)=SRC(3,2)= F1(t1,t2);
570 SRC(3,1)= F2(t1,t2,t3);
574 static void x264_predict_4x4_hd_c( pixel *src )
577 PREDICT_4x4_LOAD_LEFT
580 SRC(1,3)= F2(l1,l2,l3);
581 SRC(0,2)=SRC(2,3)= F1(l1,l2);
582 SRC(1,2)=SRC(3,3)= F2(l0,l1,l2);
583 SRC(0,1)=SRC(2,2)= F1(l0,l1);
584 SRC(1,1)=SRC(3,2)= F2(lt,l0,l1);
585 SRC(0,0)=SRC(2,1)= F1(lt,l0);
586 SRC(1,0)=SRC(3,1)= F2(t0,lt,l0);
587 SRC(2,0)= F2(t1,t0,lt);
588 SRC(3,0)= F2(t2,t1,t0);
591 static void x264_predict_4x4_vl_c( pixel *src )
594 PREDICT_4x4_LOAD_TOP_RIGHT
596 SRC(0,1)= F2(t0,t1,t2);
597 SRC(1,0)=SRC(0,2)= F1(t1,t2);
598 SRC(1,1)=SRC(0,3)= F2(t1,t2,t3);
599 SRC(2,0)=SRC(1,2)= F1(t2,t3);
600 SRC(2,1)=SRC(1,3)= F2(t2,t3,t4);
601 SRC(3,0)=SRC(2,2)= F1(t3,t4);
602 SRC(3,1)=SRC(2,3)= F2(t3,t4,t5);
604 SRC(3,3)= F2(t4,t5,t6);
607 static void x264_predict_4x4_hu_c( pixel *src )
609 PREDICT_4x4_LOAD_LEFT
611 SRC(1,0)= F2(l0,l1,l2);
612 SRC(2,0)=SRC(0,1)= F1(l1,l2);
613 SRC(3,0)=SRC(1,1)= F2(l1,l2,l3);
614 SRC(2,1)=SRC(0,2)= F1(l2,l3);
615 SRC(3,1)=SRC(1,2)= F2(l2,l3,l3);
616 SRC(3,2)=SRC(1,3)=SRC(0,3)=
617 SRC(2,2)=SRC(2,3)=SRC(3,3)= l3;
620 /****************************************************************************
621 * 8x8 prediction for intra luma block
622 ****************************************************************************/
625 edge[14-y] = F2(SRC(-1,y-1), SRC(-1,y), SRC(-1,y+1));
627 edge[16+x] = F2(SRC(x-1,-1), SRC(x,-1), SRC(x+1,-1));
629 static void x264_predict_8x8_filter_c( pixel *src, pixel edge[36], int i_neighbor, int i_filters )
631 /* edge[7..14] = l7..l0
633 * edge[16..31] = t0 .. t15
636 int have_lt = i_neighbor & MB_TOPLEFT;
637 if( i_filters & MB_LEFT )
639 edge[15] = (SRC(0,-1) + 2*SRC(-1,-1) + SRC(-1,0) + 2) >> 2;
640 edge[14] = ((have_lt ? SRC(-1,-1) : SRC(-1,0))
641 + 2*SRC(-1,0) + SRC(-1,1) + 2) >> 2;
642 PL(1) PL(2) PL(3) PL(4) PL(5) PL(6)
644 edge[7] = (SRC(-1,6) + 3*SRC(-1,7) + 2) >> 2;
647 if( i_filters & MB_TOP )
649 int have_tr = i_neighbor & MB_TOPRIGHT;
650 edge[16] = ((have_lt ? SRC(-1,-1) : SRC(0,-1))
651 + 2*SRC(0,-1) + SRC(1,-1) + 2) >> 2;
652 PT(1) PT(2) PT(3) PT(4) PT(5) PT(6)
653 edge[23] = (SRC(6,-1) + 2*SRC(7,-1)
654 + (have_tr ? SRC(8,-1) : SRC(7,-1)) + 2) >> 2;
656 if( i_filters & MB_TOPRIGHT )
660 PT(8) PT(9) PT(10) PT(11) PT(12) PT(13) PT(14)
662 edge[32] = (SRC(14,-1) + 3*SRC(15,-1) + 2) >> 2;
666 MPIXEL_X4( edge+24 ) = PIXEL_SPLAT_X4( SRC(7,-1) );
667 MPIXEL_X4( edge+28 ) = PIXEL_SPLAT_X4( SRC(7,-1) );
668 edge[32] = SRC(7,-1);
678 UNUSED int l##y = edge[14-y];
680 UNUSED int t##x = edge[16+x];
681 #define PREDICT_8x8_LOAD_TOPLEFT \
683 #define PREDICT_8x8_LOAD_LEFT \
684 PL(0) PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) PL(7)
685 #define PREDICT_8x8_LOAD_TOP \
686 PT(0) PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) PT(7)
687 #define PREDICT_8x8_LOAD_TOPRIGHT \
688 PT(8) PT(9) PT(10) PT(11) PT(12) PT(13) PT(14) PT(15)
690 #define PREDICT_8x8_DC(v) \
691 for( int y = 0; y < 8; y++ ) { \
692 MPIXEL_X4( src+0 ) = v; \
693 MPIXEL_X4( src+4 ) = v; \
694 src += FDEC_STRIDE; \
697 static void x264_predict_8x8_dc_128_c( pixel *src, pixel edge[36] )
699 PREDICT_8x8_DC( PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) ) );
701 static void x264_predict_8x8_dc_left_c( pixel *src, pixel edge[36] )
703 PREDICT_8x8_LOAD_LEFT
704 pixel4 dc = PIXEL_SPLAT_X4( (l0+l1+l2+l3+l4+l5+l6+l7+4) >> 3 );
705 PREDICT_8x8_DC( dc );
707 static void x264_predict_8x8_dc_top_c( pixel *src, pixel edge[36] )
710 pixel4 dc = PIXEL_SPLAT_X4( (t0+t1+t2+t3+t4+t5+t6+t7+4) >> 3 );
711 PREDICT_8x8_DC( dc );
713 void x264_predict_8x8_dc_c( pixel *src, pixel edge[36] )
715 PREDICT_8x8_LOAD_LEFT
717 pixel4 dc = PIXEL_SPLAT_X4( (l0+l1+l2+l3+l4+l5+l6+l7+t0+t1+t2+t3+t4+t5+t6+t7+8) >> 4 );
718 PREDICT_8x8_DC( dc );
720 void x264_predict_8x8_h_c( pixel *src, pixel edge[36] )
722 PREDICT_8x8_LOAD_LEFT
723 #define ROW(y) MPIXEL_X4( src+y*FDEC_STRIDE+0 ) =\
724 MPIXEL_X4( src+y*FDEC_STRIDE+4 ) = PIXEL_SPLAT_X4( l##y );
725 ROW(0); ROW(1); ROW(2); ROW(3); ROW(4); ROW(5); ROW(6); ROW(7);
728 void x264_predict_8x8_v_c( pixel *src, pixel edge[36] )
730 pixel4 top[2] = { MPIXEL_X4( edge+16 ),
731 MPIXEL_X4( edge+20 ) };
732 for( int y = 0; y < 8; y++ )
734 MPIXEL_X4( src+y*FDEC_STRIDE+0 ) = top[0];
735 MPIXEL_X4( src+y*FDEC_STRIDE+4 ) = top[1];
738 static void x264_predict_8x8_ddl_c( pixel *src, pixel edge[36] )
741 PREDICT_8x8_LOAD_TOPRIGHT
742 SRC(0,0)= F2(t0,t1,t2);
743 SRC(0,1)=SRC(1,0)= F2(t1,t2,t3);
744 SRC(0,2)=SRC(1,1)=SRC(2,0)= F2(t2,t3,t4);
745 SRC(0,3)=SRC(1,2)=SRC(2,1)=SRC(3,0)= F2(t3,t4,t5);
746 SRC(0,4)=SRC(1,3)=SRC(2,2)=SRC(3,1)=SRC(4,0)= F2(t4,t5,t6);
747 SRC(0,5)=SRC(1,4)=SRC(2,3)=SRC(3,2)=SRC(4,1)=SRC(5,0)= F2(t5,t6,t7);
748 SRC(0,6)=SRC(1,5)=SRC(2,4)=SRC(3,3)=SRC(4,2)=SRC(5,1)=SRC(6,0)= F2(t6,t7,t8);
749 SRC(0,7)=SRC(1,6)=SRC(2,5)=SRC(3,4)=SRC(4,3)=SRC(5,2)=SRC(6,1)=SRC(7,0)= F2(t7,t8,t9);
750 SRC(1,7)=SRC(2,6)=SRC(3,5)=SRC(4,4)=SRC(5,3)=SRC(6,2)=SRC(7,1)= F2(t8,t9,t10);
751 SRC(2,7)=SRC(3,6)=SRC(4,5)=SRC(5,4)=SRC(6,3)=SRC(7,2)= F2(t9,t10,t11);
752 SRC(3,7)=SRC(4,6)=SRC(5,5)=SRC(6,4)=SRC(7,3)= F2(t10,t11,t12);
753 SRC(4,7)=SRC(5,6)=SRC(6,5)=SRC(7,4)= F2(t11,t12,t13);
754 SRC(5,7)=SRC(6,6)=SRC(7,5)= F2(t12,t13,t14);
755 SRC(6,7)=SRC(7,6)= F2(t13,t14,t15);
756 SRC(7,7)= F2(t14,t15,t15);
758 static void x264_predict_8x8_ddr_c( pixel *src, pixel edge[36] )
761 PREDICT_8x8_LOAD_LEFT
762 PREDICT_8x8_LOAD_TOPLEFT
763 SRC(0,7)= F2(l7,l6,l5);
764 SRC(0,6)=SRC(1,7)= F2(l6,l5,l4);
765 SRC(0,5)=SRC(1,6)=SRC(2,7)= F2(l5,l4,l3);
766 SRC(0,4)=SRC(1,5)=SRC(2,6)=SRC(3,7)= F2(l4,l3,l2);
767 SRC(0,3)=SRC(1,4)=SRC(2,5)=SRC(3,6)=SRC(4,7)= F2(l3,l2,l1);
768 SRC(0,2)=SRC(1,3)=SRC(2,4)=SRC(3,5)=SRC(4,6)=SRC(5,7)= F2(l2,l1,l0);
769 SRC(0,1)=SRC(1,2)=SRC(2,3)=SRC(3,4)=SRC(4,5)=SRC(5,6)=SRC(6,7)= F2(l1,l0,lt);
770 SRC(0,0)=SRC(1,1)=SRC(2,2)=SRC(3,3)=SRC(4,4)=SRC(5,5)=SRC(6,6)=SRC(7,7)= F2(l0,lt,t0);
771 SRC(1,0)=SRC(2,1)=SRC(3,2)=SRC(4,3)=SRC(5,4)=SRC(6,5)=SRC(7,6)= F2(lt,t0,t1);
772 SRC(2,0)=SRC(3,1)=SRC(4,2)=SRC(5,3)=SRC(6,4)=SRC(7,5)= F2(t0,t1,t2);
773 SRC(3,0)=SRC(4,1)=SRC(5,2)=SRC(6,3)=SRC(7,4)= F2(t1,t2,t3);
774 SRC(4,0)=SRC(5,1)=SRC(6,2)=SRC(7,3)= F2(t2,t3,t4);
775 SRC(5,0)=SRC(6,1)=SRC(7,2)= F2(t3,t4,t5);
776 SRC(6,0)=SRC(7,1)= F2(t4,t5,t6);
777 SRC(7,0)= F2(t5,t6,t7);
780 static void x264_predict_8x8_vr_c( pixel *src, pixel edge[36] )
783 PREDICT_8x8_LOAD_LEFT
784 PREDICT_8x8_LOAD_TOPLEFT
785 SRC(0,6)= F2(l5,l4,l3);
786 SRC(0,7)= F2(l6,l5,l4);
787 SRC(0,4)=SRC(1,6)= F2(l3,l2,l1);
788 SRC(0,5)=SRC(1,7)= F2(l4,l3,l2);
789 SRC(0,2)=SRC(1,4)=SRC(2,6)= F2(l1,l0,lt);
790 SRC(0,3)=SRC(1,5)=SRC(2,7)= F2(l2,l1,l0);
791 SRC(0,1)=SRC(1,3)=SRC(2,5)=SRC(3,7)= F2(l0,lt,t0);
792 SRC(0,0)=SRC(1,2)=SRC(2,4)=SRC(3,6)= F1(lt,t0);
793 SRC(1,1)=SRC(2,3)=SRC(3,5)=SRC(4,7)= F2(lt,t0,t1);
794 SRC(1,0)=SRC(2,2)=SRC(3,4)=SRC(4,6)= F1(t0,t1);
795 SRC(2,1)=SRC(3,3)=SRC(4,5)=SRC(5,7)= F2(t0,t1,t2);
796 SRC(2,0)=SRC(3,2)=SRC(4,4)=SRC(5,6)= F1(t1,t2);
797 SRC(3,1)=SRC(4,3)=SRC(5,5)=SRC(6,7)= F2(t1,t2,t3);
798 SRC(3,0)=SRC(4,2)=SRC(5,4)=SRC(6,6)= F1(t2,t3);
799 SRC(4,1)=SRC(5,3)=SRC(6,5)=SRC(7,7)= F2(t2,t3,t4);
800 SRC(4,0)=SRC(5,2)=SRC(6,4)=SRC(7,6)= F1(t3,t4);
801 SRC(5,1)=SRC(6,3)=SRC(7,5)= F2(t3,t4,t5);
802 SRC(5,0)=SRC(6,2)=SRC(7,4)= F1(t4,t5);
803 SRC(6,1)=SRC(7,3)= F2(t4,t5,t6);
804 SRC(6,0)=SRC(7,2)= F1(t5,t6);
805 SRC(7,1)= F2(t5,t6,t7);
808 static void x264_predict_8x8_hd_c( pixel *src, pixel edge[36] )
811 PREDICT_8x8_LOAD_LEFT
812 PREDICT_8x8_LOAD_TOPLEFT
813 int p1 = pack_pixel_1to2(F1(l6,l7), F2(l5,l6,l7));
814 int p2 = pack_pixel_1to2(F1(l5,l6), F2(l4,l5,l6));
815 int p3 = pack_pixel_1to2(F1(l4,l5), F2(l3,l4,l5));
816 int p4 = pack_pixel_1to2(F1(l3,l4), F2(l2,l3,l4));
817 int p5 = pack_pixel_1to2(F1(l2,l3), F2(l1,l2,l3));
818 int p6 = pack_pixel_1to2(F1(l1,l2), F2(l0,l1,l2));
819 int p7 = pack_pixel_1to2(F1(l0,l1), F2(lt,l0,l1));
820 int p8 = pack_pixel_1to2(F1(lt,l0), F2(l0,lt,t0));
821 int p9 = pack_pixel_1to2(F2(t1,t0,lt), F2(t2,t1,t0));
822 int p10 = pack_pixel_1to2(F2(t3,t2,t1), F2(t4,t3,t2));
823 int p11 = pack_pixel_1to2(F2(t5,t4,t3), F2(t6,t5,t4));
824 SRC_X4(0,7)= pack_pixel_2to4(p1,p2);
825 SRC_X4(0,6)= pack_pixel_2to4(p2,p3);
826 SRC_X4(4,7)=SRC_X4(0,5)= pack_pixel_2to4(p3,p4);
827 SRC_X4(4,6)=SRC_X4(0,4)= pack_pixel_2to4(p4,p5);
828 SRC_X4(4,5)=SRC_X4(0,3)= pack_pixel_2to4(p5,p6);
829 SRC_X4(4,4)=SRC_X4(0,2)= pack_pixel_2to4(p6,p7);
830 SRC_X4(4,3)=SRC_X4(0,1)= pack_pixel_2to4(p7,p8);
831 SRC_X4(4,2)=SRC_X4(0,0)= pack_pixel_2to4(p8,p9);
832 SRC_X4(4,1)= pack_pixel_2to4(p9,p10);
833 SRC_X4(4,0)= pack_pixel_2to4(p10,p11);
835 static void x264_predict_8x8_vl_c( pixel *src, pixel edge[36] )
838 PREDICT_8x8_LOAD_TOPRIGHT
840 SRC(0,1)= F2(t0,t1,t2);
841 SRC(0,2)=SRC(1,0)= F1(t1,t2);
842 SRC(0,3)=SRC(1,1)= F2(t1,t2,t3);
843 SRC(0,4)=SRC(1,2)=SRC(2,0)= F1(t2,t3);
844 SRC(0,5)=SRC(1,3)=SRC(2,1)= F2(t2,t3,t4);
845 SRC(0,6)=SRC(1,4)=SRC(2,2)=SRC(3,0)= F1(t3,t4);
846 SRC(0,7)=SRC(1,5)=SRC(2,3)=SRC(3,1)= F2(t3,t4,t5);
847 SRC(1,6)=SRC(2,4)=SRC(3,2)=SRC(4,0)= F1(t4,t5);
848 SRC(1,7)=SRC(2,5)=SRC(3,3)=SRC(4,1)= F2(t4,t5,t6);
849 SRC(2,6)=SRC(3,4)=SRC(4,2)=SRC(5,0)= F1(t5,t6);
850 SRC(2,7)=SRC(3,5)=SRC(4,3)=SRC(5,1)= F2(t5,t6,t7);
851 SRC(3,6)=SRC(4,4)=SRC(5,2)=SRC(6,0)= F1(t6,t7);
852 SRC(3,7)=SRC(4,5)=SRC(5,3)=SRC(6,1)= F2(t6,t7,t8);
853 SRC(4,6)=SRC(5,4)=SRC(6,2)=SRC(7,0)= F1(t7,t8);
854 SRC(4,7)=SRC(5,5)=SRC(6,3)=SRC(7,1)= F2(t7,t8,t9);
855 SRC(5,6)=SRC(6,4)=SRC(7,2)= F1(t8,t9);
856 SRC(5,7)=SRC(6,5)=SRC(7,3)= F2(t8,t9,t10);
857 SRC(6,6)=SRC(7,4)= F1(t9,t10);
858 SRC(6,7)=SRC(7,5)= F2(t9,t10,t11);
859 SRC(7,6)= F1(t10,t11);
860 SRC(7,7)= F2(t10,t11,t12);
862 static void x264_predict_8x8_hu_c( pixel *src, pixel edge[36] )
864 PREDICT_8x8_LOAD_LEFT
865 int p1 = pack_pixel_1to2(F1(l0,l1), F2(l0,l1,l2));
866 int p2 = pack_pixel_1to2(F1(l1,l2), F2(l1,l2,l3));
867 int p3 = pack_pixel_1to2(F1(l2,l3), F2(l2,l3,l4));
868 int p4 = pack_pixel_1to2(F1(l3,l4), F2(l3,l4,l5));
869 int p5 = pack_pixel_1to2(F1(l4,l5), F2(l4,l5,l6));
870 int p6 = pack_pixel_1to2(F1(l5,l6), F2(l5,l6,l7));
871 int p7 = pack_pixel_1to2(F1(l6,l7), F2(l6,l7,l7));
872 int p8 = pack_pixel_1to2(l7,l7);
873 SRC_X4(0,0)= pack_pixel_2to4(p1,p2);
874 SRC_X4(0,1)= pack_pixel_2to4(p2,p3);
875 SRC_X4(4,0)=SRC_X4(0,2)= pack_pixel_2to4(p3,p4);
876 SRC_X4(4,1)=SRC_X4(0,3)= pack_pixel_2to4(p4,p5);
877 SRC_X4(4,2)=SRC_X4(0,4)= pack_pixel_2to4(p5,p6);
878 SRC_X4(4,3)=SRC_X4(0,5)= pack_pixel_2to4(p6,p7);
879 SRC_X4(4,4)=SRC_X4(0,6)= pack_pixel_2to4(p7,p8);
880 SRC_X4(4,5)=SRC_X4(4,6)= SRC_X4(0,7) = SRC_X4(4,7) = pack_pixel_2to4(p8,p8);
883 /****************************************************************************
884 * Exported functions:
885 ****************************************************************************/
886 void x264_predict_16x16_init( int cpu, x264_predict_t pf[7] )
888 pf[I_PRED_16x16_V ] = x264_predict_16x16_v_c;
889 pf[I_PRED_16x16_H ] = x264_predict_16x16_h_c;
890 pf[I_PRED_16x16_DC] = x264_predict_16x16_dc_c;
891 pf[I_PRED_16x16_P ] = x264_predict_16x16_p_c;
892 pf[I_PRED_16x16_DC_LEFT]= x264_predict_16x16_dc_left_c;
893 pf[I_PRED_16x16_DC_TOP ]= x264_predict_16x16_dc_top_c;
894 pf[I_PRED_16x16_DC_128 ]= x264_predict_16x16_dc_128_c;
897 x264_predict_16x16_init_mmx( cpu, pf );
901 if( cpu&X264_CPU_ALTIVEC )
902 x264_predict_16x16_init_altivec( pf );
906 x264_predict_16x16_init_arm( cpu, pf );
910 x264_predict_16x16_init_aarch64( cpu, pf );
915 if( cpu&X264_CPU_MSA )
917 pf[I_PRED_16x16_V ] = x264_intra_predict_vert_16x16_msa;
918 pf[I_PRED_16x16_H ] = x264_intra_predict_hor_16x16_msa;
919 pf[I_PRED_16x16_DC] = x264_intra_predict_dc_16x16_msa;
920 pf[I_PRED_16x16_P ] = x264_intra_predict_plane_16x16_msa;
921 pf[I_PRED_16x16_DC_LEFT]= x264_intra_predict_dc_left_16x16_msa;
922 pf[I_PRED_16x16_DC_TOP ]= x264_intra_predict_dc_top_16x16_msa;
923 pf[I_PRED_16x16_DC_128 ]= x264_intra_predict_dc_128_16x16_msa;
929 void x264_predict_8x8c_init( int cpu, x264_predict_t pf[7] )
931 pf[I_PRED_CHROMA_V ] = x264_predict_8x8c_v_c;
932 pf[I_PRED_CHROMA_H ] = x264_predict_8x8c_h_c;
933 pf[I_PRED_CHROMA_DC] = x264_predict_8x8c_dc_c;
934 pf[I_PRED_CHROMA_P ] = x264_predict_8x8c_p_c;
935 pf[I_PRED_CHROMA_DC_LEFT]= x264_predict_8x8c_dc_left_c;
936 pf[I_PRED_CHROMA_DC_TOP ]= x264_predict_8x8c_dc_top_c;
937 pf[I_PRED_CHROMA_DC_128 ]= x264_predict_8x8c_dc_128_c;
940 x264_predict_8x8c_init_mmx( cpu, pf );
944 if( cpu&X264_CPU_ALTIVEC )
945 x264_predict_8x8c_init_altivec( pf );
949 x264_predict_8x8c_init_arm( cpu, pf );
953 x264_predict_8x8c_init_aarch64( cpu, pf );
958 if( cpu&X264_CPU_MSA )
960 pf[I_PRED_CHROMA_P ] = x264_intra_predict_plane_8x8_msa;
966 void x264_predict_8x16c_init( int cpu, x264_predict_t pf[7] )
968 pf[I_PRED_CHROMA_V ] = x264_predict_8x16c_v_c;
969 pf[I_PRED_CHROMA_H ] = x264_predict_8x16c_h_c;
970 pf[I_PRED_CHROMA_DC] = x264_predict_8x16c_dc_c;
971 pf[I_PRED_CHROMA_P ] = x264_predict_8x16c_p_c;
972 pf[I_PRED_CHROMA_DC_LEFT]= x264_predict_8x16c_dc_left_c;
973 pf[I_PRED_CHROMA_DC_TOP ]= x264_predict_8x16c_dc_top_c;
974 pf[I_PRED_CHROMA_DC_128 ]= x264_predict_8x16c_dc_128_c;
977 x264_predict_8x16c_init_mmx( cpu, pf );
981 x264_predict_8x16c_init_arm( cpu, pf );
985 x264_predict_8x16c_init_aarch64( cpu, pf );
989 void x264_predict_8x8_init( int cpu, x264_predict8x8_t pf[12], x264_predict_8x8_filter_t *predict_filter )
991 pf[I_PRED_8x8_V] = x264_predict_8x8_v_c;
992 pf[I_PRED_8x8_H] = x264_predict_8x8_h_c;
993 pf[I_PRED_8x8_DC] = x264_predict_8x8_dc_c;
994 pf[I_PRED_8x8_DDL] = x264_predict_8x8_ddl_c;
995 pf[I_PRED_8x8_DDR] = x264_predict_8x8_ddr_c;
996 pf[I_PRED_8x8_VR] = x264_predict_8x8_vr_c;
997 pf[I_PRED_8x8_HD] = x264_predict_8x8_hd_c;
998 pf[I_PRED_8x8_VL] = x264_predict_8x8_vl_c;
999 pf[I_PRED_8x8_HU] = x264_predict_8x8_hu_c;
1000 pf[I_PRED_8x8_DC_LEFT]= x264_predict_8x8_dc_left_c;
1001 pf[I_PRED_8x8_DC_TOP] = x264_predict_8x8_dc_top_c;
1002 pf[I_PRED_8x8_DC_128] = x264_predict_8x8_dc_128_c;
1003 *predict_filter = x264_predict_8x8_filter_c;
1006 x264_predict_8x8_init_mmx( cpu, pf, predict_filter );
1010 x264_predict_8x8_init_arm( cpu, pf, predict_filter );
1014 x264_predict_8x8_init_aarch64( cpu, pf, predict_filter );
1019 if( cpu&X264_CPU_MSA )
1021 pf[I_PRED_8x8_DDL] = x264_intra_predict_ddl_8x8_msa;
1027 void x264_predict_4x4_init( int cpu, x264_predict_t pf[12] )
1029 pf[I_PRED_4x4_V] = x264_predict_4x4_v_c;
1030 pf[I_PRED_4x4_H] = x264_predict_4x4_h_c;
1031 pf[I_PRED_4x4_DC] = x264_predict_4x4_dc_c;
1032 pf[I_PRED_4x4_DDL] = x264_predict_4x4_ddl_c;
1033 pf[I_PRED_4x4_DDR] = x264_predict_4x4_ddr_c;
1034 pf[I_PRED_4x4_VR] = x264_predict_4x4_vr_c;
1035 pf[I_PRED_4x4_HD] = x264_predict_4x4_hd_c;
1036 pf[I_PRED_4x4_VL] = x264_predict_4x4_vl_c;
1037 pf[I_PRED_4x4_HU] = x264_predict_4x4_hu_c;
1038 pf[I_PRED_4x4_DC_LEFT]= x264_predict_4x4_dc_left_c;
1039 pf[I_PRED_4x4_DC_TOP] = x264_predict_4x4_dc_top_c;
1040 pf[I_PRED_4x4_DC_128] = x264_predict_4x4_dc_128_c;
1043 x264_predict_4x4_init_mmx( cpu, pf );
1047 x264_predict_4x4_init_arm( cpu, pf );
1051 x264_predict_4x4_init_aarch64( cpu, pf );