4 * Copyright (C) 2012 - 2013 Guillaume Martres
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavutil/pixdesc.h"
25 #include "bit_depth_template.c"
28 #define POS(x, y) src[(x) + stride * (y)]
30 static void FUNC(intra_pred)(HEVCContext *s, int x0, int y0, int log2_size, int c_idx)
33 ((x) >> s->sps->log2_min_pu_size)
35 (s->ref->tab_mvf[(x) + (y) * min_pu_width])
36 #define MVF_PU(x, y) \
37 MVF(PU(x0 + ((x) << hshift)), PU(y0 + ((y) << vshift)))
38 #define IS_INTRA(x, y) \
39 (MVF_PU(x, y).pred_flag == PF_INTRA)
40 #define MIN_TB_ADDR_ZS(x, y) \
41 s->pps->min_tb_addr_zs[(y) * s->sps->min_tb_width + (x)]
42 #define EXTEND(ptr, start, length) \
43 for (i = start; i < (start) + (length); i += 4) \
45 #define EXTEND_RIGHT_CIP(ptr, start, length) \
46 for (i = start; i < (start) + (length); i += 4) \
47 if (!IS_INTRA(i, -1)) \
48 AV_WN4P(&ptr[i], a); \
50 a = PIXEL_SPLAT_X4(ptr[i+3])
51 #define EXTEND_LEFT_CIP(ptr, start, length) \
52 for (i = start; i > (start) - (length); i--) \
53 if (!IS_INTRA(i - 1, -1)) \
55 #define EXTEND_UP_CIP(ptr, start, length) \
56 for (i = (start); i > (start) - (length); i -= 4) \
57 if (!IS_INTRA(-1, i - 3)) \
58 AV_WN4P(&ptr[i - 3], a); \
60 a = PIXEL_SPLAT_X4(ptr[i - 3])
61 #define EXTEND_DOWN_CIP(ptr, start, length) \
62 for (i = start; i < (start) + (length); i += 4) \
63 if (!IS_INTRA(-1, i)) \
64 AV_WN4P(&ptr[i], a); \
66 a = PIXEL_SPLAT_X4(ptr[i + 3])
68 HEVCLocalContext *lc = s->HEVClc;
70 int hshift = s->sps->hshift[c_idx];
71 int vshift = s->sps->vshift[c_idx];
72 int size = (1 << log2_size);
73 int size_in_luma_h = size << hshift;
74 int size_in_tbs_h = size_in_luma_h >> s->sps->log2_min_tb_size;
75 int size_in_luma_v = size << vshift;
76 int size_in_tbs_v = size_in_luma_v >> s->sps->log2_min_tb_size;
79 int x_tb = x0 >> s->sps->log2_min_tb_size;
80 int y_tb = y0 >> s->sps->log2_min_tb_size;
81 int cur_tb_addr = MIN_TB_ADDR_ZS(x_tb, y_tb);
83 ptrdiff_t stride = s->frame->linesize[c_idx] / sizeof(pixel);
84 pixel *src = (pixel*)s->frame->data[c_idx] + x + y * stride;
86 int min_pu_width = s->sps->min_pu_width;
88 enum IntraPredMode mode = c_idx ? lc->pu.intra_pred_mode_c :
89 lc->tu.cur_intra_pred_mode;
91 pixel left_array[2 * MAX_TB_SIZE + 1];
92 pixel filtered_left_array[2 * MAX_TB_SIZE + 1];
93 pixel top_array[2 * MAX_TB_SIZE + 1];
94 pixel filtered_top_array[2 * MAX_TB_SIZE + 1];
96 pixel *left = left_array + 1;
97 pixel *top = top_array + 1;
98 pixel *filtered_left = filtered_left_array + 1;
99 pixel *filtered_top = filtered_top_array + 1;
101 int cand_bottom_left = lc->na.cand_bottom_left && cur_tb_addr > MIN_TB_ADDR_ZS(x_tb - 1, y_tb + size_in_tbs_v);
102 int cand_left = lc->na.cand_left;
103 int cand_up_left = lc->na.cand_up_left;
104 int cand_up = lc->na.cand_up;
105 int cand_up_right = lc->na.cand_up_right && cur_tb_addr > MIN_TB_ADDR_ZS(x_tb + size_in_tbs_h, y_tb - 1);
107 int bottom_left_size = (FFMIN(y0 + 2 * size_in_luma_v, s->sps->height) -
108 (y0 + size_in_luma_v)) >> vshift;
109 int top_right_size = (FFMIN(x0 + 2 * size_in_luma_h, s->sps->width) -
110 (x0 + size_in_luma_h)) >> hshift;
112 if (s->pps->constrained_intra_pred_flag == 1) {
113 int size_in_luma_pu_v = PU(size_in_luma_v);
114 int size_in_luma_pu_h = PU(size_in_luma_h);
115 int on_pu_edge_x = !(x0 & ((1 << s->sps->log2_min_pu_size) - 1));
116 int on_pu_edge_y = !(y0 & ((1 << s->sps->log2_min_pu_size) - 1));
117 if (!size_in_luma_pu_h)
119 if (cand_bottom_left == 1 && on_pu_edge_x) {
120 int x_left_pu = PU(x0 - 1);
121 int y_bottom_pu = PU(y0 + size_in_luma_v);
122 int max = FFMIN(size_in_luma_pu_v, s->sps->min_pu_height - y_bottom_pu);
123 cand_bottom_left = 0;
124 for (i = 0; i < max; i += 2)
125 cand_bottom_left |= (MVF(x_left_pu, y_bottom_pu + i).pred_flag == PF_INTRA);
127 if (cand_left == 1 && on_pu_edge_x) {
128 int x_left_pu = PU(x0 - 1);
129 int y_left_pu = PU(y0);
130 int max = FFMIN(size_in_luma_pu_v, s->sps->min_pu_height - y_left_pu);
132 for (i = 0; i < max; i += 2)
133 cand_left |= (MVF(x_left_pu, y_left_pu + i).pred_flag == PF_INTRA);
135 if (cand_up_left == 1) {
136 int x_left_pu = PU(x0 - 1);
137 int y_top_pu = PU(y0 - 1);
138 cand_up_left = MVF(x_left_pu, y_top_pu).pred_flag == PF_INTRA;
140 if (cand_up == 1 && on_pu_edge_y) {
141 int x_top_pu = PU(x0);
142 int y_top_pu = PU(y0 - 1);
143 int max = FFMIN(size_in_luma_pu_h, s->sps->min_pu_width - x_top_pu);
145 for (i = 0; i < max; i += 2)
146 cand_up |= (MVF(x_top_pu + i, y_top_pu).pred_flag == PF_INTRA);
148 if (cand_up_right == 1 && on_pu_edge_y) {
149 int y_top_pu = PU(y0 - 1);
150 int x_right_pu = PU(x0 + size_in_luma_h);
151 int max = FFMIN(size_in_luma_pu_h, s->sps->min_pu_width - x_right_pu);
153 for (i = 0; i < max; i += 2)
154 cand_up_right |= (MVF(x_right_pu + i, y_top_pu).pred_flag == PF_INTRA);
156 memset(left, 128, 2 * MAX_TB_SIZE*sizeof(pixel));
157 memset(top , 128, 2 * MAX_TB_SIZE*sizeof(pixel));
161 left[-1] = POS(-1, -1);
165 for (i = 0; i <size; i+=4)
166 AV_WN4P(&top[i], AV_RN4P(&POS(i, -1)));
169 a = PIXEL_SPLAT_X4(POS(size + top_right_size - 1, -1));
170 for (i = size + top_right_size; i < (size << 1); i += 4)
172 for (i = size ; i < size+top_right_size; i+=4)
173 AV_WN4P(&top[i], AV_RN4P(&POS(i, -1)));
176 for (i = 0; i < size; i++)
177 left[i] = POS(-1, i);
178 if (cand_bottom_left) {
179 for (i = size ; i < size+bottom_left_size; i++)
180 left[i] = POS(-1, i);
181 a = PIXEL_SPLAT_X4(POS(-1, size + bottom_left_size - 1));
182 for (i = size + bottom_left_size; i < (size << 1); i+=4)
183 AV_WN4P(&left[i], a);
186 if (s->pps->constrained_intra_pred_flag == 1) {
187 if (cand_bottom_left || cand_left || cand_up_left || cand_up || cand_up_right) {
188 int size_max_x = x0 + ((2 * size) << hshift) < s->sps->width ?
189 2 * size : (s->sps->width - x0) >> hshift;
190 int size_max_y = y0 + ((2 * size) << vshift) < s->sps->height ?
191 2 * size : (s->sps->height - y0) >> vshift;
192 int j = size + (cand_bottom_left? bottom_left_size: 0) -1;
193 if (!cand_up_right) {
194 size_max_x = x0 + ((size) << hshift) < s->sps->width ?
195 size : (s->sps->width - x0) >> hshift;
197 if (!cand_bottom_left) {
198 size_max_y = y0 + (( size) << vshift) < s->sps->height ?
199 size : (s->sps->height - y0) >> vshift;
201 if (cand_bottom_left || cand_left || cand_up_left) {
202 while (j > -1 && !IS_INTRA(-1, j))
204 if (!IS_INTRA(-1, j)) {
206 while (j < size_max_x && !IS_INTRA(j, -1))
208 EXTEND_LEFT_CIP(top, j, j + 1);
214 while (j < size_max_x && !IS_INTRA(j, -1))
218 EXTEND_LEFT_CIP(top, j, j + 1);
220 EXTEND_LEFT_CIP(top, j, j);
227 if (cand_bottom_left || cand_left) {
228 a = PIXEL_SPLAT_X4(left[-1]);
229 EXTEND_DOWN_CIP(left, 0, size_max_y);
232 a = PIXEL_SPLAT_X4(left[-1]);
233 EXTEND(left, 0, size);
235 if (!cand_bottom_left) {
236 a = PIXEL_SPLAT_X4(left[size - 1]);
237 EXTEND(left, size, size);
239 if (x0 != 0 && y0 != 0) {
240 a = PIXEL_SPLAT_X4(left[size_max_y - 1]);
241 EXTEND_UP_CIP(left, size_max_y - 1, size_max_y);
242 if (!IS_INTRA(-1, - 1))
244 } else if (x0 == 0) {
245 a = PIXEL_SPLAT_X4(left[size_max_y - 1]);
246 EXTEND(left, 0, size_max_y);
248 a = PIXEL_SPLAT_X4(left[size_max_y - 1]);
249 EXTEND_UP_CIP(left, size_max_y - 1, size_max_y);
253 a = PIXEL_SPLAT_X4(left[-1]);
254 EXTEND_RIGHT_CIP(top, 0, size_max_x);
258 // Infer the unavailable samples
259 if (!cand_bottom_left) {
261 a = PIXEL_SPLAT_X4(left[size-1]);
262 EXTEND(left, size, size);
263 } else if (cand_up_left) {
264 a = PIXEL_SPLAT_X4(left[-1]);
265 EXTEND(left, 0, 2 * size);
267 } else if (cand_up) {
269 a = PIXEL_SPLAT_X4(left[-1]);
270 EXTEND(left, 0, 2 * size);
273 } else if (cand_up_right) {
274 left[-1] = top[size];
275 a = PIXEL_SPLAT_X4(left[-1]);
276 EXTEND(top, 0, size);
277 EXTEND(left, 0, 2 * size);
281 } else { // No samples available
282 left[-1] = (1 << (BIT_DEPTH - 1));
283 a = PIXEL_SPLAT_X4(left[-1]);
284 EXTEND(top, 0, 2 * size);
285 EXTEND(left, 0, 2 * size);
290 a = PIXEL_SPLAT_X4(left[size]);
291 EXTEND(left, 0, size);
297 a = PIXEL_SPLAT_X4(left[-1]);
298 EXTEND(top, 0, size);
300 if (!cand_up_right) {
301 a = PIXEL_SPLAT_X4(top[size-1]);
302 EXTEND(top, size, size);
309 if (mode != INTRA_DC && size != 4){
310 int intra_hor_ver_dist_thresh[] = { 7, 1, 0 };
311 int min_dist_vert_hor = FFMIN(FFABS((int)(mode - 26U)),
312 FFABS((int)(mode - 10U)));
313 if (min_dist_vert_hor > intra_hor_ver_dist_thresh[log2_size - 3]) {
314 int threshold = 1 << (BIT_DEPTH - 5);
315 if (s->sps->sps_strong_intra_smoothing_enable_flag && c_idx == 0 &&
317 FFABS(top[-1] + top[63] - 2 * top[31]) < threshold &&
318 FFABS(left[-1] + left[63] - 2 * left[31]) < threshold) {
319 // We can't just overwrite values in top because it could be
320 // a pointer into src
321 filtered_top[-1] = top[-1];
322 filtered_top[63] = top[63];
323 for (i = 0; i < 63; i++)
324 filtered_top[i] = ((64 - (i + 1)) * top[-1] +
325 (i + 1) * top[63] + 32) >> 6;
326 for (i = 0; i < 63; i++)
327 left[i] = ((64 - (i + 1)) * left[-1] +
328 (i + 1) * left[63] + 32) >> 6;
331 filtered_left[2 * size - 1] = left[2 * size - 1];
332 filtered_top[2 * size - 1] = top[2 * size - 1];
333 for (i = 2 * size - 2; i >= 0; i--)
334 filtered_left[i] = (left[i + 1] + 2 * left[i] +
335 left[i - 1] + 2) >> 2;
337 filtered_left[-1] = (left[0] + 2 * left[-1] + top[0] + 2) >> 2;
338 for (i = 2 * size - 2; i >= 0; i--)
339 filtered_top[i] = (top[i + 1] + 2 * top[i] +
340 top[i - 1] + 2) >> 2;
341 left = filtered_left;
350 s->hpc.pred_planar[log2_size - 2]((uint8_t *)src, (uint8_t *)top,
351 (uint8_t *)left, stride);
354 s->hpc.pred_dc((uint8_t *)src, (uint8_t *)top,
355 (uint8_t *)left, stride, log2_size, c_idx);
358 s->hpc.pred_angular[log2_size - 2]((uint8_t *)src, (uint8_t *)top,
359 (uint8_t *)left, stride, c_idx,
365 static av_always_inline void FUNC(pred_planar)(uint8_t *_src, const uint8_t *_top,
366 const uint8_t *_left, ptrdiff_t stride,
370 pixel *src = (pixel *)_src;
371 const pixel *top = (const pixel *)_top;
372 const pixel *left = (const pixel *)_left;
373 int size = 1 << trafo_size;
374 for (y = 0; y < size; y++)
375 for (x = 0; x < size; x++)
376 POS(x, y) = ((size - 1 - x) * left[y] + (x + 1) * top[size] +
377 (size - 1 - y) * top[x] + (y + 1) * left[size] + size) >> (trafo_size + 1);
380 static void FUNC(pred_planar_0)(uint8_t *_src, const uint8_t *_top,
381 const uint8_t *_left, ptrdiff_t stride)
383 FUNC(pred_planar)(_src, _top, _left, stride, 2);
386 static void FUNC(pred_planar_1)(uint8_t *_src, const uint8_t *_top,
387 const uint8_t *_left, ptrdiff_t stride)
389 FUNC(pred_planar)(_src, _top, _left, stride, 3);
392 static void FUNC(pred_planar_2)(uint8_t *_src, const uint8_t *_top,
393 const uint8_t *_left, ptrdiff_t stride)
395 FUNC(pred_planar)(_src, _top, _left, stride, 4);
398 static void FUNC(pred_planar_3)(uint8_t *_src, const uint8_t *_top,
399 const uint8_t *_left, ptrdiff_t stride)
401 FUNC(pred_planar)(_src, _top, _left, stride, 5);
404 static void FUNC(pred_dc)(uint8_t *_src, const uint8_t *_top,
405 const uint8_t *_left,
406 ptrdiff_t stride, int log2_size, int c_idx)
409 int size = (1 << log2_size);
410 pixel *src = (pixel *)_src;
411 const pixel *top = (const pixel *)_top;
412 const pixel *left = (const pixel *)_left;
415 for (i = 0; i < size; i++)
416 dc += left[i] + top[i];
418 dc >>= log2_size + 1;
420 a = PIXEL_SPLAT_X4(dc);
422 for (i = 0; i < size; i++)
423 for (j = 0; j < size; j+=4)
424 AV_WN4P(&POS(j, i), a);
426 if (c_idx == 0 && size < 32) {
427 POS(0, 0) = (left[0] + 2 * dc + top[0] + 2) >> 2;
428 for (x = 1; x < size; x++)
429 POS(x, 0) = (top[x] + 3 * dc + 2) >> 2;
430 for (y = 1; y < size; y++)
431 POS(0, y) = (left[y] + 3 * dc + 2) >> 2;
435 static av_always_inline void FUNC(pred_angular)(uint8_t *_src,
437 const uint8_t *_left,
438 ptrdiff_t stride, int c_idx,
442 pixel *src = (pixel *)_src;
443 const pixel *top = (const pixel *)_top;
444 const pixel *left = (const pixel *)_left;
446 static const int intra_pred_angle[] = {
447 32, 26, 21, 17, 13, 9, 5, 2, 0, -2, -5, -9, -13, -17, -21, -26, -32,
448 -26, -21, -17, -13, -9, -5, -2, 0, 2, 5, 9, 13, 17, 21, 26, 32
450 static const int inv_angle[] = {
451 -4096, -1638, -910, -630, -482, -390, -315, -256, -315, -390, -482,
452 -630, -910, -1638, -4096
455 int angle = intra_pred_angle[mode - 2];
456 pixel ref_array[3 * MAX_TB_SIZE + 4];
457 pixel *ref_tmp = ref_array + size;
459 int last = (size * angle) >> 5;
463 if (angle < 0 && last < -1) {
464 for (x = 0; x <= size; x += 4)
465 AV_WN4P(&ref_tmp[x], AV_RN4P(&top[x - 1]));
466 for (x = last; x <= -1; x++)
467 ref_tmp[x] = left[-1 + ((x * inv_angle[mode - 11] + 128) >> 8)];
471 for (y = 0; y < size; y++) {
472 int idx = ((y + 1) * angle) >> 5;
473 int fact = ((y + 1) * angle) & 31;
475 for (x = 0; x < size; x += 4) {
476 POS(x , y) = ((32 - fact) * ref[x + idx + 1] +
477 fact * ref[x + idx + 2] + 16) >> 5;
478 POS(x + 1, y) = ((32 - fact) * ref[x + 1 + idx + 1] +
479 fact * ref[x + 1 + idx + 2] + 16) >> 5;
480 POS(x + 2, y) = ((32 - fact) * ref[x + 2 + idx + 1] +
481 fact * ref[x + 2 + idx + 2] + 16) >> 5;
482 POS(x + 3, y) = ((32 - fact) * ref[x + 3 + idx + 1] +
483 fact * ref[x + 3 + idx + 2] + 16) >> 5;
486 for (x = 0; x < size; x += 4)
487 AV_WN4P(&POS(x, y), AV_RN4P(&ref[x + idx + 1]));
490 if (mode == 26 && c_idx == 0 && size < 32) {
491 for (y = 0; y < size; y++)
492 POS(0, y) = av_clip_pixel(top[0] + ((left[y] - left[-1]) >> 1));
496 if (angle < 0 && last < -1) {
497 for (x = 0; x <= size; x += 4)
498 AV_WN4P(&ref_tmp[x], AV_RN4P(&left[x - 1]));
499 for (x = last; x <= -1; x++)
500 ref_tmp[x] = top[-1 + ((x * inv_angle[mode - 11] + 128) >> 8)];
504 for (x = 0; x < size; x++) {
505 int idx = ((x + 1) * angle) >> 5;
506 int fact = ((x + 1) * angle) & 31;
508 for (y = 0; y < size; y++) {
509 POS(x, y) = ((32 - fact) * ref[y + idx + 1] +
510 fact * ref[y + idx + 2] + 16) >> 5;
513 for (y = 0; y < size; y++)
514 POS(x, y) = ref[y + idx + 1];
517 if (mode == 10 && c_idx == 0 && size < 32) {
518 for (x = 0; x < size; x += 4) {
519 POS(x, 0) = av_clip_pixel(left[0] + ((top[x ] - top[-1]) >> 1));
520 POS(x + 1, 0) = av_clip_pixel(left[0] + ((top[x + 1] - top[-1]) >> 1));
521 POS(x + 2, 0) = av_clip_pixel(left[0] + ((top[x + 2] - top[-1]) >> 1));
522 POS(x + 3, 0) = av_clip_pixel(left[0] + ((top[x + 3] - top[-1]) >> 1));
528 static void FUNC(pred_angular_0)(uint8_t *src, const uint8_t *top,
530 ptrdiff_t stride, int c_idx, int mode)
532 FUNC(pred_angular)(src, top, left, stride, c_idx, mode, 1 << 2);
535 static void FUNC(pred_angular_1)(uint8_t *src, const uint8_t *top,
537 ptrdiff_t stride, int c_idx, int mode)
539 FUNC(pred_angular)(src, top, left, stride, c_idx, mode, 1 << 3);
542 static void FUNC(pred_angular_2)(uint8_t *src, const uint8_t *top,
544 ptrdiff_t stride, int c_idx, int mode)
546 FUNC(pred_angular)(src, top, left, stride, c_idx, mode, 1 << 4);
549 static void FUNC(pred_angular_3)(uint8_t *src, const uint8_t *top,
551 ptrdiff_t stride, int c_idx, int mode)
553 FUNC(pred_angular)(src, top, left, stride, c_idx, mode, 1 << 5);
556 #undef EXTEND_LEFT_CIP
557 #undef EXTEND_RIGHT_CIP
559 #undef EXTEND_DOWN_CIP
565 #undef MIN_TB_ADDR_ZS