4 * Copyright (C) 2012 - 2013 Guillaume Martres
5 * Copyright (C) 2012 - 2013 Mickael Raulet
6 * Copyright (C) 2012 - 2013 Gildas Cocherel
7 * Copyright (C) 2012 - 2013 Wassim Hamidouche
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "libavutil/atomic.h"
27 #include "libavutil/attributes.h"
28 #include "libavutil/common.h"
29 #include "libavutil/display.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/mastering_display_metadata.h"
32 #include "libavutil/md5.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/stereo3d.h"
38 #include "bytestream.h"
39 #include "cabac_functions.h"
44 const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
47 * NOTE: Each function hls_foo correspond to the function foo in the
48 * specification (HLS stands for High Level Syntax).
55 /* free everything allocated by pic_arrays_init() */
56 static void pic_arrays_free(HEVCContext *s)
59 av_freep(&s->deblock);
61 av_freep(&s->skip_flag);
62 av_freep(&s->tab_ct_depth);
64 av_freep(&s->tab_ipm);
65 av_freep(&s->cbf_luma);
68 av_freep(&s->qp_y_tab);
69 av_freep(&s->tab_slice_address);
70 av_freep(&s->filter_slice_edges);
72 av_freep(&s->horizontal_bs);
73 av_freep(&s->vertical_bs);
75 av_freep(&s->sh.entry_point_offset);
76 av_freep(&s->sh.size);
77 av_freep(&s->sh.offset);
79 av_buffer_pool_uninit(&s->tab_mvf_pool);
80 av_buffer_pool_uninit(&s->rpl_tab_pool);
83 /* allocate arrays that depend on frame dimensions */
84 static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
86 int log2_min_cb_size = sps->log2_min_cb_size;
87 int width = sps->width;
88 int height = sps->height;
89 int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) *
90 ((height >> log2_min_cb_size) + 1);
91 int ctb_count = sps->ctb_width * sps->ctb_height;
92 int min_pu_size = sps->min_pu_width * sps->min_pu_height;
94 s->bs_width = (width >> 2) + 1;
95 s->bs_height = (height >> 2) + 1;
97 s->sao = av_mallocz_array(ctb_count, sizeof(*s->sao));
98 s->deblock = av_mallocz_array(ctb_count, sizeof(*s->deblock));
99 if (!s->sao || !s->deblock)
102 s->skip_flag = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
103 s->tab_ct_depth = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
104 if (!s->skip_flag || !s->tab_ct_depth)
107 s->cbf_luma = av_malloc_array(sps->min_tb_width, sps->min_tb_height);
108 s->tab_ipm = av_mallocz(min_pu_size);
109 s->is_pcm = av_malloc_array(sps->min_pu_width + 1, sps->min_pu_height + 1);
110 if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
113 s->filter_slice_edges = av_mallocz(ctb_count);
114 s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
115 sizeof(*s->tab_slice_address));
116 s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
117 sizeof(*s->qp_y_tab));
118 if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address)
121 s->horizontal_bs = av_mallocz_array(s->bs_width, s->bs_height);
122 s->vertical_bs = av_mallocz_array(s->bs_width, s->bs_height);
123 if (!s->horizontal_bs || !s->vertical_bs)
126 s->tab_mvf_pool = av_buffer_pool_init(min_pu_size * sizeof(MvField),
128 s->rpl_tab_pool = av_buffer_pool_init(ctb_count * sizeof(RefPicListTab),
130 if (!s->tab_mvf_pool || !s->rpl_tab_pool)
137 return AVERROR(ENOMEM);
140 static void pred_weight_table(HEVCContext *s, GetBitContext *gb)
144 uint8_t luma_weight_l0_flag[16];
145 uint8_t chroma_weight_l0_flag[16];
146 uint8_t luma_weight_l1_flag[16];
147 uint8_t chroma_weight_l1_flag[16];
148 int luma_log2_weight_denom;
150 luma_log2_weight_denom = get_ue_golomb_long(gb);
151 if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7)
152 av_log(s->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is invalid\n", luma_log2_weight_denom);
153 s->sh.luma_log2_weight_denom = av_clip_uintp2(luma_log2_weight_denom, 3);
154 if (s->ps.sps->chroma_format_idc != 0) {
155 int delta = get_se_golomb(gb);
156 s->sh.chroma_log2_weight_denom = av_clip_uintp2(s->sh.luma_log2_weight_denom + delta, 3);
159 for (i = 0; i < s->sh.nb_refs[L0]; i++) {
160 luma_weight_l0_flag[i] = get_bits1(gb);
161 if (!luma_weight_l0_flag[i]) {
162 s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom;
163 s->sh.luma_offset_l0[i] = 0;
166 if (s->ps.sps->chroma_format_idc != 0) {
167 for (i = 0; i < s->sh.nb_refs[L0]; i++)
168 chroma_weight_l0_flag[i] = get_bits1(gb);
170 for (i = 0; i < s->sh.nb_refs[L0]; i++)
171 chroma_weight_l0_flag[i] = 0;
173 for (i = 0; i < s->sh.nb_refs[L0]; i++) {
174 if (luma_weight_l0_flag[i]) {
175 int delta_luma_weight_l0 = get_se_golomb(gb);
176 s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0;
177 s->sh.luma_offset_l0[i] = get_se_golomb(gb);
179 if (chroma_weight_l0_flag[i]) {
180 for (j = 0; j < 2; j++) {
181 int delta_chroma_weight_l0 = get_se_golomb(gb);
182 int delta_chroma_offset_l0 = get_se_golomb(gb);
183 s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0;
184 s->sh.chroma_offset_l0[i][j] = av_clip((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j])
185 >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
188 s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom;
189 s->sh.chroma_offset_l0[i][0] = 0;
190 s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom;
191 s->sh.chroma_offset_l0[i][1] = 0;
194 if (s->sh.slice_type == B_SLICE) {
195 for (i = 0; i < s->sh.nb_refs[L1]; i++) {
196 luma_weight_l1_flag[i] = get_bits1(gb);
197 if (!luma_weight_l1_flag[i]) {
198 s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom;
199 s->sh.luma_offset_l1[i] = 0;
202 if (s->ps.sps->chroma_format_idc != 0) {
203 for (i = 0; i < s->sh.nb_refs[L1]; i++)
204 chroma_weight_l1_flag[i] = get_bits1(gb);
206 for (i = 0; i < s->sh.nb_refs[L1]; i++)
207 chroma_weight_l1_flag[i] = 0;
209 for (i = 0; i < s->sh.nb_refs[L1]; i++) {
210 if (luma_weight_l1_flag[i]) {
211 int delta_luma_weight_l1 = get_se_golomb(gb);
212 s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1;
213 s->sh.luma_offset_l1[i] = get_se_golomb(gb);
215 if (chroma_weight_l1_flag[i]) {
216 for (j = 0; j < 2; j++) {
217 int delta_chroma_weight_l1 = get_se_golomb(gb);
218 int delta_chroma_offset_l1 = get_se_golomb(gb);
219 s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1;
220 s->sh.chroma_offset_l1[i][j] = av_clip((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j])
221 >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
224 s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom;
225 s->sh.chroma_offset_l1[i][0] = 0;
226 s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom;
227 s->sh.chroma_offset_l1[i][1] = 0;
233 static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
235 const HEVCSPS *sps = s->ps.sps;
236 int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
237 int prev_delta_msb = 0;
238 unsigned int nb_sps = 0, nb_sh;
242 if (!sps->long_term_ref_pics_present_flag)
245 if (sps->num_long_term_ref_pics_sps > 0)
246 nb_sps = get_ue_golomb_long(gb);
247 nb_sh = get_ue_golomb_long(gb);
249 if (nb_sh + (uint64_t)nb_sps > FF_ARRAY_ELEMS(rps->poc))
250 return AVERROR_INVALIDDATA;
252 rps->nb_refs = nb_sh + nb_sps;
254 for (i = 0; i < rps->nb_refs; i++) {
255 uint8_t delta_poc_msb_present;
258 uint8_t lt_idx_sps = 0;
260 if (sps->num_long_term_ref_pics_sps > 1)
261 lt_idx_sps = get_bits(gb, av_ceil_log2(sps->num_long_term_ref_pics_sps));
263 rps->poc[i] = sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps];
264 rps->used[i] = sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps];
266 rps->poc[i] = get_bits(gb, sps->log2_max_poc_lsb);
267 rps->used[i] = get_bits1(gb);
270 delta_poc_msb_present = get_bits1(gb);
271 if (delta_poc_msb_present) {
272 int delta = get_ue_golomb_long(gb);
274 if (i && i != nb_sps)
275 delta += prev_delta_msb;
277 rps->poc[i] += s->poc - delta * max_poc_lsb - s->sh.pic_order_cnt_lsb;
278 prev_delta_msb = delta;
285 static void export_stream_params(AVCodecContext *avctx, const HEVCParamSets *ps,
288 const HEVCVPS *vps = (const HEVCVPS*)ps->vps_list[sps->vps_id]->data;
289 unsigned int num = 0, den = 0;
291 avctx->pix_fmt = sps->pix_fmt;
292 avctx->coded_width = sps->width;
293 avctx->coded_height = sps->height;
294 avctx->width = sps->output_width;
295 avctx->height = sps->output_height;
296 avctx->has_b_frames = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics;
297 avctx->profile = sps->ptl.general_ptl.profile_idc;
298 avctx->level = sps->ptl.general_ptl.level_idc;
300 ff_set_sar(avctx, sps->vui.sar);
302 if (sps->vui.video_signal_type_present_flag)
303 avctx->color_range = sps->vui.video_full_range_flag ? AVCOL_RANGE_JPEG
306 avctx->color_range = AVCOL_RANGE_MPEG;
308 if (sps->vui.colour_description_present_flag) {
309 avctx->color_primaries = sps->vui.colour_primaries;
310 avctx->color_trc = sps->vui.transfer_characteristic;
311 avctx->colorspace = sps->vui.matrix_coeffs;
313 avctx->color_primaries = AVCOL_PRI_UNSPECIFIED;
314 avctx->color_trc = AVCOL_TRC_UNSPECIFIED;
315 avctx->colorspace = AVCOL_SPC_UNSPECIFIED;
318 if (vps->vps_timing_info_present_flag) {
319 num = vps->vps_num_units_in_tick;
320 den = vps->vps_time_scale;
321 } else if (sps->vui.vui_timing_info_present_flag) {
322 num = sps->vui.vui_num_units_in_tick;
323 den = sps->vui.vui_time_scale;
326 if (num != 0 && den != 0)
327 av_reduce(&avctx->framerate.den, &avctx->framerate.num,
331 static int set_sps(HEVCContext *s, const HEVCSPS *sps, enum AVPixelFormat pix_fmt)
333 #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + CONFIG_HEVC_D3D11VA_HWACCEL + CONFIG_HEVC_VAAPI_HWACCEL + CONFIG_HEVC_VDPAU_HWACCEL)
334 enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
344 ret = pic_arrays_init(s, sps);
348 export_stream_params(s->avctx, &s->ps, sps);
350 switch (sps->pix_fmt) {
351 case AV_PIX_FMT_YUV420P:
352 case AV_PIX_FMT_YUVJ420P:
353 #if CONFIG_HEVC_DXVA2_HWACCEL
354 *fmt++ = AV_PIX_FMT_DXVA2_VLD;
356 #if CONFIG_HEVC_D3D11VA_HWACCEL
357 *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
359 #if CONFIG_HEVC_VAAPI_HWACCEL
360 *fmt++ = AV_PIX_FMT_VAAPI;
362 #if CONFIG_HEVC_VDPAU_HWACCEL
363 *fmt++ = AV_PIX_FMT_VDPAU;
366 case AV_PIX_FMT_YUV420P10:
367 #if CONFIG_HEVC_DXVA2_HWACCEL
368 *fmt++ = AV_PIX_FMT_DXVA2_VLD;
370 #if CONFIG_HEVC_D3D11VA_HWACCEL
371 *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
373 #if CONFIG_HEVC_VAAPI_HWACCEL
374 *fmt++ = AV_PIX_FMT_VAAPI;
379 if (pix_fmt == AV_PIX_FMT_NONE) {
380 *fmt++ = sps->pix_fmt;
381 *fmt = AV_PIX_FMT_NONE;
383 ret = ff_thread_get_format(s->avctx, pix_fmts);
386 s->avctx->pix_fmt = ret;
389 s->avctx->pix_fmt = pix_fmt;
392 ff_hevc_pred_init(&s->hpc, sps->bit_depth);
393 ff_hevc_dsp_init (&s->hevcdsp, sps->bit_depth);
394 ff_videodsp_init (&s->vdsp, sps->bit_depth);
396 for (i = 0; i < 3; i++) {
397 av_freep(&s->sao_pixel_buffer_h[i]);
398 av_freep(&s->sao_pixel_buffer_v[i]);
401 if (sps->sao_enabled && !s->avctx->hwaccel) {
402 int c_count = (sps->chroma_format_idc != 0) ? 3 : 1;
405 for(c_idx = 0; c_idx < c_count; c_idx++) {
406 int w = sps->width >> sps->hshift[c_idx];
407 int h = sps->height >> sps->vshift[c_idx];
408 s->sao_pixel_buffer_h[c_idx] =
409 av_malloc((w * 2 * sps->ctb_height) <<
411 s->sao_pixel_buffer_v[c_idx] =
412 av_malloc((h * 2 * sps->ctb_width) <<
418 s->ps.vps = (HEVCVPS*) s->ps.vps_list[s->ps.sps->vps_id]->data;
428 static int hls_slice_header(HEVCContext *s)
430 GetBitContext *gb = &s->HEVClc->gb;
431 SliceHeader *sh = &s->sh;
435 sh->first_slice_in_pic_flag = get_bits1(gb);
436 if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) {
437 s->seq_decode = (s->seq_decode + 1) & 0xff;
440 ff_hevc_clear_refs(s);
442 sh->no_output_of_prior_pics_flag = 0;
444 sh->no_output_of_prior_pics_flag = get_bits1(gb);
446 sh->pps_id = get_ue_golomb_long(gb);
447 if (sh->pps_id >= MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) {
448 av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id);
449 return AVERROR_INVALIDDATA;
451 if (!sh->first_slice_in_pic_flag &&
452 s->ps.pps != (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data) {
453 av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n");
454 return AVERROR_INVALIDDATA;
456 s->ps.pps = (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data;
457 if (s->nal_unit_type == NAL_CRA_NUT && s->last_eos == 1)
458 sh->no_output_of_prior_pics_flag = 1;
460 if (s->ps.sps != (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data) {
461 const HEVCSPS* last_sps = s->ps.sps;
462 s->ps.sps = (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data;
463 if (last_sps && IS_IRAP(s) && s->nal_unit_type != NAL_CRA_NUT) {
464 if (s->ps.sps->width != last_sps->width || s->ps.sps->height != last_sps->height ||
465 s->ps.sps->temporal_layer[s->ps.sps->max_sub_layers - 1].max_dec_pic_buffering !=
466 last_sps->temporal_layer[last_sps->max_sub_layers - 1].max_dec_pic_buffering)
467 sh->no_output_of_prior_pics_flag = 0;
469 ff_hevc_clear_refs(s);
470 ret = set_sps(s, s->ps.sps, AV_PIX_FMT_NONE);
474 s->seq_decode = (s->seq_decode + 1) & 0xff;
478 sh->dependent_slice_segment_flag = 0;
479 if (!sh->first_slice_in_pic_flag) {
480 int slice_address_length;
482 if (s->ps.pps->dependent_slice_segments_enabled_flag)
483 sh->dependent_slice_segment_flag = get_bits1(gb);
485 slice_address_length = av_ceil_log2(s->ps.sps->ctb_width *
486 s->ps.sps->ctb_height);
487 sh->slice_segment_addr = get_bitsz(gb, slice_address_length);
488 if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
489 av_log(s->avctx, AV_LOG_ERROR,
490 "Invalid slice segment address: %u.\n",
491 sh->slice_segment_addr);
492 return AVERROR_INVALIDDATA;
495 if (!sh->dependent_slice_segment_flag) {
496 sh->slice_addr = sh->slice_segment_addr;
500 sh->slice_segment_addr = sh->slice_addr = 0;
502 s->slice_initialized = 0;
505 if (!sh->dependent_slice_segment_flag) {
506 s->slice_initialized = 0;
508 for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++)
509 skip_bits(gb, 1); // slice_reserved_undetermined_flag[]
511 sh->slice_type = get_ue_golomb_long(gb);
512 if (!(sh->slice_type == I_SLICE ||
513 sh->slice_type == P_SLICE ||
514 sh->slice_type == B_SLICE)) {
515 av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n",
517 return AVERROR_INVALIDDATA;
519 if (IS_IRAP(s) && sh->slice_type != I_SLICE) {
520 av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n");
521 return AVERROR_INVALIDDATA;
524 // when flag is not present, picture is inferred to be output
525 sh->pic_output_flag = 1;
526 if (s->ps.pps->output_flag_present_flag)
527 sh->pic_output_flag = get_bits1(gb);
529 if (s->ps.sps->separate_colour_plane_flag)
530 sh->colour_plane_id = get_bits(gb, 2);
535 sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb);
536 poc = ff_hevc_compute_poc(s, sh->pic_order_cnt_lsb);
537 if (!sh->first_slice_in_pic_flag && poc != s->poc) {
538 av_log(s->avctx, AV_LOG_WARNING,
539 "Ignoring POC change between slices: %d -> %d\n", s->poc, poc);
540 if (s->avctx->err_recognition & AV_EF_EXPLODE)
541 return AVERROR_INVALIDDATA;
546 sh->short_term_ref_pic_set_sps_flag = get_bits1(gb);
547 pos = get_bits_left(gb);
548 if (!sh->short_term_ref_pic_set_sps_flag) {
549 ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1);
553 sh->short_term_rps = &sh->slice_rps;
555 int numbits, rps_idx;
557 if (!s->ps.sps->nb_st_rps) {
558 av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n");
559 return AVERROR_INVALIDDATA;
562 numbits = av_ceil_log2(s->ps.sps->nb_st_rps);
563 rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0;
564 sh->short_term_rps = &s->ps.sps->st_rps[rps_idx];
566 sh->short_term_ref_pic_set_size = pos - get_bits_left(gb);
568 pos = get_bits_left(gb);
569 ret = decode_lt_rps(s, &sh->long_term_rps, gb);
571 av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n");
572 if (s->avctx->err_recognition & AV_EF_EXPLODE)
573 return AVERROR_INVALIDDATA;
575 sh->long_term_ref_pic_set_size = pos - get_bits_left(gb);
577 if (s->ps.sps->sps_temporal_mvp_enabled_flag)
578 sh->slice_temporal_mvp_enabled_flag = get_bits1(gb);
580 sh->slice_temporal_mvp_enabled_flag = 0;
582 s->sh.short_term_rps = NULL;
587 if (s->temporal_id == 0 &&
588 s->nal_unit_type != NAL_TRAIL_N &&
589 s->nal_unit_type != NAL_TSA_N &&
590 s->nal_unit_type != NAL_STSA_N &&
591 s->nal_unit_type != NAL_RADL_N &&
592 s->nal_unit_type != NAL_RADL_R &&
593 s->nal_unit_type != NAL_RASL_N &&
594 s->nal_unit_type != NAL_RASL_R)
597 if (s->ps.sps->sao_enabled) {
598 sh->slice_sample_adaptive_offset_flag[0] = get_bits1(gb);
599 if (s->ps.sps->chroma_format_idc) {
600 sh->slice_sample_adaptive_offset_flag[1] =
601 sh->slice_sample_adaptive_offset_flag[2] = get_bits1(gb);
604 sh->slice_sample_adaptive_offset_flag[0] = 0;
605 sh->slice_sample_adaptive_offset_flag[1] = 0;
606 sh->slice_sample_adaptive_offset_flag[2] = 0;
609 sh->nb_refs[L0] = sh->nb_refs[L1] = 0;
610 if (sh->slice_type == P_SLICE || sh->slice_type == B_SLICE) {
613 sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active;
614 if (sh->slice_type == B_SLICE)
615 sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active;
617 if (get_bits1(gb)) { // num_ref_idx_active_override_flag
618 sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1;
619 if (sh->slice_type == B_SLICE)
620 sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1;
622 if (sh->nb_refs[L0] > MAX_REFS || sh->nb_refs[L1] > MAX_REFS) {
623 av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n",
624 sh->nb_refs[L0], sh->nb_refs[L1]);
625 return AVERROR_INVALIDDATA;
628 sh->rpl_modification_flag[0] = 0;
629 sh->rpl_modification_flag[1] = 0;
630 nb_refs = ff_hevc_frame_nb_refs(s);
632 av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n");
633 return AVERROR_INVALIDDATA;
636 if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) {
637 sh->rpl_modification_flag[0] = get_bits1(gb);
638 if (sh->rpl_modification_flag[0]) {
639 for (i = 0; i < sh->nb_refs[L0]; i++)
640 sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs));
643 if (sh->slice_type == B_SLICE) {
644 sh->rpl_modification_flag[1] = get_bits1(gb);
645 if (sh->rpl_modification_flag[1] == 1)
646 for (i = 0; i < sh->nb_refs[L1]; i++)
647 sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs));
651 if (sh->slice_type == B_SLICE)
652 sh->mvd_l1_zero_flag = get_bits1(gb);
654 if (s->ps.pps->cabac_init_present_flag)
655 sh->cabac_init_flag = get_bits1(gb);
657 sh->cabac_init_flag = 0;
659 sh->collocated_ref_idx = 0;
660 if (sh->slice_temporal_mvp_enabled_flag) {
661 sh->collocated_list = L0;
662 if (sh->slice_type == B_SLICE)
663 sh->collocated_list = !get_bits1(gb);
665 if (sh->nb_refs[sh->collocated_list] > 1) {
666 sh->collocated_ref_idx = get_ue_golomb_long(gb);
667 if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) {
668 av_log(s->avctx, AV_LOG_ERROR,
669 "Invalid collocated_ref_idx: %d.\n",
670 sh->collocated_ref_idx);
671 return AVERROR_INVALIDDATA;
676 if ((s->ps.pps->weighted_pred_flag && sh->slice_type == P_SLICE) ||
677 (s->ps.pps->weighted_bipred_flag && sh->slice_type == B_SLICE)) {
678 pred_weight_table(s, gb);
681 sh->max_num_merge_cand = 5 - get_ue_golomb_long(gb);
682 if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) {
683 av_log(s->avctx, AV_LOG_ERROR,
684 "Invalid number of merging MVP candidates: %d.\n",
685 sh->max_num_merge_cand);
686 return AVERROR_INVALIDDATA;
690 sh->slice_qp_delta = get_se_golomb(gb);
692 if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) {
693 sh->slice_cb_qp_offset = get_se_golomb(gb);
694 sh->slice_cr_qp_offset = get_se_golomb(gb);
696 sh->slice_cb_qp_offset = 0;
697 sh->slice_cr_qp_offset = 0;
700 if (s->ps.pps->chroma_qp_offset_list_enabled_flag)
701 sh->cu_chroma_qp_offset_enabled_flag = get_bits1(gb);
703 sh->cu_chroma_qp_offset_enabled_flag = 0;
705 if (s->ps.pps->deblocking_filter_control_present_flag) {
706 int deblocking_filter_override_flag = 0;
708 if (s->ps.pps->deblocking_filter_override_enabled_flag)
709 deblocking_filter_override_flag = get_bits1(gb);
711 if (deblocking_filter_override_flag) {
712 sh->disable_deblocking_filter_flag = get_bits1(gb);
713 if (!sh->disable_deblocking_filter_flag) {
714 sh->beta_offset = get_se_golomb(gb) * 2;
715 sh->tc_offset = get_se_golomb(gb) * 2;
718 sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf;
719 sh->beta_offset = s->ps.pps->beta_offset;
720 sh->tc_offset = s->ps.pps->tc_offset;
723 sh->disable_deblocking_filter_flag = 0;
728 if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag &&
729 (sh->slice_sample_adaptive_offset_flag[0] ||
730 sh->slice_sample_adaptive_offset_flag[1] ||
731 !sh->disable_deblocking_filter_flag)) {
732 sh->slice_loop_filter_across_slices_enabled_flag = get_bits1(gb);
734 sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag;
736 } else if (!s->slice_initialized) {
737 av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n");
738 return AVERROR_INVALIDDATA;
741 sh->num_entry_point_offsets = 0;
742 if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) {
743 unsigned num_entry_point_offsets = get_ue_golomb_long(gb);
744 // It would be possible to bound this tighter but this here is simpler
745 if (num_entry_point_offsets > get_bits_left(gb)) {
746 av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets);
747 return AVERROR_INVALIDDATA;
750 sh->num_entry_point_offsets = num_entry_point_offsets;
751 if (sh->num_entry_point_offsets > 0) {
752 int offset_len = get_ue_golomb_long(gb) + 1;
754 if (offset_len < 1 || offset_len > 32) {
755 sh->num_entry_point_offsets = 0;
756 av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len);
757 return AVERROR_INVALIDDATA;
760 av_freep(&sh->entry_point_offset);
761 av_freep(&sh->offset);
763 sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned));
764 sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
765 sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
766 if (!sh->entry_point_offset || !sh->offset || !sh->size) {
767 sh->num_entry_point_offsets = 0;
768 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n");
769 return AVERROR(ENOMEM);
771 for (i = 0; i < sh->num_entry_point_offsets; i++) {
772 unsigned val = get_bits_long(gb, offset_len);
773 sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size
775 if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) {
776 s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here
777 s->threads_number = 1;
779 s->enable_parallel_tiles = 0;
781 s->enable_parallel_tiles = 0;
784 if (s->ps.pps->slice_header_extension_present_flag) {
785 unsigned int length = get_ue_golomb_long(gb);
786 if (length*8LL > get_bits_left(gb)) {
787 av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n");
788 return AVERROR_INVALIDDATA;
790 for (i = 0; i < length; i++)
791 skip_bits(gb, 8); // slice_header_extension_data_byte
794 // Inferred parameters
795 sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta;
796 if (sh->slice_qp > 51 ||
797 sh->slice_qp < -s->ps.sps->qp_bd_offset) {
798 av_log(s->avctx, AV_LOG_ERROR,
799 "The slice_qp %d is outside the valid range "
802 -s->ps.sps->qp_bd_offset);
803 return AVERROR_INVALIDDATA;
806 sh->slice_ctb_addr_rs = sh->slice_segment_addr;
808 if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) {
809 av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n");
810 return AVERROR_INVALIDDATA;
813 if (get_bits_left(gb) < 0) {
814 av_log(s->avctx, AV_LOG_ERROR,
815 "Overread slice header by %d bits\n", -get_bits_left(gb));
816 return AVERROR_INVALIDDATA;
819 s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag;
821 if (!s->ps.pps->cu_qp_delta_enabled_flag)
822 s->HEVClc->qp_y = s->sh.slice_qp;
824 s->slice_initialized = 1;
825 s->HEVClc->tu.cu_qp_offset_cb = 0;
826 s->HEVClc->tu.cu_qp_offset_cr = 0;
828 s->no_rasl_output_flag = IS_IDR(s) || IS_BLA(s) || (s->nal_unit_type == NAL_CRA_NUT && s->last_eos);
833 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
835 #define SET_SAO(elem, value) \
837 if (!sao_merge_up_flag && !sao_merge_left_flag) \
839 else if (sao_merge_left_flag) \
840 sao->elem = CTB(s->sao, rx-1, ry).elem; \
841 else if (sao_merge_up_flag) \
842 sao->elem = CTB(s->sao, rx, ry-1).elem; \
847 static void hls_sao_param(HEVCContext *s, int rx, int ry)
849 HEVCLocalContext *lc = s->HEVClc;
850 int sao_merge_left_flag = 0;
851 int sao_merge_up_flag = 0;
852 SAOParams *sao = &CTB(s->sao, rx, ry);
855 if (s->sh.slice_sample_adaptive_offset_flag[0] ||
856 s->sh.slice_sample_adaptive_offset_flag[1]) {
858 if (lc->ctb_left_flag)
859 sao_merge_left_flag = ff_hevc_sao_merge_flag_decode(s);
861 if (ry > 0 && !sao_merge_left_flag) {
863 sao_merge_up_flag = ff_hevc_sao_merge_flag_decode(s);
867 for (c_idx = 0; c_idx < (s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) {
868 int log2_sao_offset_scale = c_idx == 0 ? s->ps.pps->log2_sao_offset_scale_luma :
869 s->ps.pps->log2_sao_offset_scale_chroma;
871 if (!s->sh.slice_sample_adaptive_offset_flag[c_idx]) {
872 sao->type_idx[c_idx] = SAO_NOT_APPLIED;
877 sao->type_idx[2] = sao->type_idx[1];
878 sao->eo_class[2] = sao->eo_class[1];
880 SET_SAO(type_idx[c_idx], ff_hevc_sao_type_idx_decode(s));
883 if (sao->type_idx[c_idx] == SAO_NOT_APPLIED)
886 for (i = 0; i < 4; i++)
887 SET_SAO(offset_abs[c_idx][i], ff_hevc_sao_offset_abs_decode(s));
889 if (sao->type_idx[c_idx] == SAO_BAND) {
890 for (i = 0; i < 4; i++) {
891 if (sao->offset_abs[c_idx][i]) {
892 SET_SAO(offset_sign[c_idx][i],
893 ff_hevc_sao_offset_sign_decode(s));
895 sao->offset_sign[c_idx][i] = 0;
898 SET_SAO(band_position[c_idx], ff_hevc_sao_band_position_decode(s));
899 } else if (c_idx != 2) {
900 SET_SAO(eo_class[c_idx], ff_hevc_sao_eo_class_decode(s));
903 // Inferred parameters
904 sao->offset_val[c_idx][0] = 0;
905 for (i = 0; i < 4; i++) {
906 sao->offset_val[c_idx][i + 1] = sao->offset_abs[c_idx][i];
907 if (sao->type_idx[c_idx] == SAO_EDGE) {
909 sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
910 } else if (sao->offset_sign[c_idx][i]) {
911 sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
913 sao->offset_val[c_idx][i + 1] *= 1 << log2_sao_offset_scale;
921 static int hls_cross_component_pred(HEVCContext *s, int idx) {
922 HEVCLocalContext *lc = s->HEVClc;
923 int log2_res_scale_abs_plus1 = ff_hevc_log2_res_scale_abs(s, idx);
925 if (log2_res_scale_abs_plus1 != 0) {
926 int res_scale_sign_flag = ff_hevc_res_scale_sign_flag(s, idx);
927 lc->tu.res_scale_val = (1 << (log2_res_scale_abs_plus1 - 1)) *
928 (1 - 2 * res_scale_sign_flag);
930 lc->tu.res_scale_val = 0;
937 static int hls_transform_unit(HEVCContext *s, int x0, int y0,
938 int xBase, int yBase, int cb_xBase, int cb_yBase,
939 int log2_cb_size, int log2_trafo_size,
940 int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
942 HEVCLocalContext *lc = s->HEVClc;
943 const int log2_trafo_size_c = log2_trafo_size - s->ps.sps->hshift[1];
946 if (lc->cu.pred_mode == MODE_INTRA) {
947 int trafo_size = 1 << log2_trafo_size;
948 ff_hevc_set_neighbour_available(s, x0, y0, trafo_size, trafo_size);
950 s->hpc.intra_pred[log2_trafo_size - 2](s, x0, y0, 0);
953 if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
954 (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
955 int scan_idx = SCAN_DIAG;
956 int scan_idx_c = SCAN_DIAG;
957 int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
958 (s->ps.sps->chroma_format_idc == 2 &&
959 (cbf_cb[1] || cbf_cr[1]));
961 if (s->ps.pps->cu_qp_delta_enabled_flag && !lc->tu.is_cu_qp_delta_coded) {
962 lc->tu.cu_qp_delta = ff_hevc_cu_qp_delta_abs(s);
963 if (lc->tu.cu_qp_delta != 0)
964 if (ff_hevc_cu_qp_delta_sign_flag(s) == 1)
965 lc->tu.cu_qp_delta = -lc->tu.cu_qp_delta;
966 lc->tu.is_cu_qp_delta_coded = 1;
968 if (lc->tu.cu_qp_delta < -(26 + s->ps.sps->qp_bd_offset / 2) ||
969 lc->tu.cu_qp_delta > (25 + s->ps.sps->qp_bd_offset / 2)) {
970 av_log(s->avctx, AV_LOG_ERROR,
971 "The cu_qp_delta %d is outside the valid range "
974 -(26 + s->ps.sps->qp_bd_offset / 2),
975 (25 + s->ps.sps->qp_bd_offset / 2));
976 return AVERROR_INVALIDDATA;
979 ff_hevc_set_qPy(s, cb_xBase, cb_yBase, log2_cb_size);
982 if (s->sh.cu_chroma_qp_offset_enabled_flag && cbf_chroma &&
983 !lc->cu.cu_transquant_bypass_flag && !lc->tu.is_cu_chroma_qp_offset_coded) {
984 int cu_chroma_qp_offset_flag = ff_hevc_cu_chroma_qp_offset_flag(s);
985 if (cu_chroma_qp_offset_flag) {
986 int cu_chroma_qp_offset_idx = 0;
987 if (s->ps.pps->chroma_qp_offset_list_len_minus1 > 0) {
988 cu_chroma_qp_offset_idx = ff_hevc_cu_chroma_qp_offset_idx(s);
989 av_log(s->avctx, AV_LOG_ERROR,
990 "cu_chroma_qp_offset_idx not yet tested.\n");
992 lc->tu.cu_qp_offset_cb = s->ps.pps->cb_qp_offset_list[cu_chroma_qp_offset_idx];
993 lc->tu.cu_qp_offset_cr = s->ps.pps->cr_qp_offset_list[cu_chroma_qp_offset_idx];
995 lc->tu.cu_qp_offset_cb = 0;
996 lc->tu.cu_qp_offset_cr = 0;
998 lc->tu.is_cu_chroma_qp_offset_coded = 1;
1001 if (lc->cu.pred_mode == MODE_INTRA && log2_trafo_size < 4) {
1002 if (lc->tu.intra_pred_mode >= 6 &&
1003 lc->tu.intra_pred_mode <= 14) {
1004 scan_idx = SCAN_VERT;
1005 } else if (lc->tu.intra_pred_mode >= 22 &&
1006 lc->tu.intra_pred_mode <= 30) {
1007 scan_idx = SCAN_HORIZ;
1010 if (lc->tu.intra_pred_mode_c >= 6 &&
1011 lc->tu.intra_pred_mode_c <= 14) {
1012 scan_idx_c = SCAN_VERT;
1013 } else if (lc->tu.intra_pred_mode_c >= 22 &&
1014 lc->tu.intra_pred_mode_c <= 30) {
1015 scan_idx_c = SCAN_HORIZ;
1019 lc->tu.cross_pf = 0;
1022 ff_hevc_hls_residual_coding(s, x0, y0, log2_trafo_size, scan_idx, 0);
1023 if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1024 int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1025 int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1026 lc->tu.cross_pf = (s->ps.pps->cross_component_prediction_enabled_flag && cbf_luma &&
1027 (lc->cu.pred_mode == MODE_INTER ||
1028 (lc->tu.chroma_mode_c == 4)));
1030 if (lc->tu.cross_pf) {
1031 hls_cross_component_pred(s, 0);
1033 for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1034 if (lc->cu.pred_mode == MODE_INTRA) {
1035 ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1036 s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 1);
1039 ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c),
1040 log2_trafo_size_c, scan_idx_c, 1);
1042 if (lc->tu.cross_pf) {
1043 ptrdiff_t stride = s->frame->linesize[1];
1044 int hshift = s->ps.sps->hshift[1];
1045 int vshift = s->ps.sps->vshift[1];
1046 int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1047 int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1048 int size = 1 << log2_trafo_size_c;
1050 uint8_t *dst = &s->frame->data[1][(y0 >> vshift) * stride +
1051 ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1052 for (i = 0; i < (size * size); i++) {
1053 coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1055 s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1059 if (lc->tu.cross_pf) {
1060 hls_cross_component_pred(s, 1);
1062 for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1063 if (lc->cu.pred_mode == MODE_INTRA) {
1064 ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1065 s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 2);
1068 ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c),
1069 log2_trafo_size_c, scan_idx_c, 2);
1071 if (lc->tu.cross_pf) {
1072 ptrdiff_t stride = s->frame->linesize[2];
1073 int hshift = s->ps.sps->hshift[2];
1074 int vshift = s->ps.sps->vshift[2];
1075 int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1076 int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1077 int size = 1 << log2_trafo_size_c;
1079 uint8_t *dst = &s->frame->data[2][(y0 >> vshift) * stride +
1080 ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1081 for (i = 0; i < (size * size); i++) {
1082 coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1084 s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1087 } else if (s->ps.sps->chroma_format_idc && blk_idx == 3) {
1088 int trafo_size_h = 1 << (log2_trafo_size + 1);
1089 int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1090 for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1091 if (lc->cu.pred_mode == MODE_INTRA) {
1092 ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size),
1093 trafo_size_h, trafo_size_v);
1094 s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 1);
1097 ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size),
1098 log2_trafo_size, scan_idx_c, 1);
1100 for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1101 if (lc->cu.pred_mode == MODE_INTRA) {
1102 ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size),
1103 trafo_size_h, trafo_size_v);
1104 s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 2);
1107 ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size),
1108 log2_trafo_size, scan_idx_c, 2);
1111 } else if (s->ps.sps->chroma_format_idc && lc->cu.pred_mode == MODE_INTRA) {
1112 if (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3) {
1113 int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1114 int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1115 ff_hevc_set_neighbour_available(s, x0, y0, trafo_size_h, trafo_size_v);
1116 s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 1);
1117 s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 2);
1118 if (s->ps.sps->chroma_format_idc == 2) {
1119 ff_hevc_set_neighbour_available(s, x0, y0 + (1 << log2_trafo_size_c),
1120 trafo_size_h, trafo_size_v);
1121 s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 1);
1122 s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 2);
1124 } else if (blk_idx == 3) {
1125 int trafo_size_h = 1 << (log2_trafo_size + 1);
1126 int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1127 ff_hevc_set_neighbour_available(s, xBase, yBase,
1128 trafo_size_h, trafo_size_v);
1129 s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 1);
1130 s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 2);
1131 if (s->ps.sps->chroma_format_idc == 2) {
1132 ff_hevc_set_neighbour_available(s, xBase, yBase + (1 << (log2_trafo_size)),
1133 trafo_size_h, trafo_size_v);
1134 s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 1);
1135 s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 2);
1143 static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
1145 int cb_size = 1 << log2_cb_size;
1146 int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
1148 int min_pu_width = s->ps.sps->min_pu_width;
1149 int x_end = FFMIN(x0 + cb_size, s->ps.sps->width);
1150 int y_end = FFMIN(y0 + cb_size, s->ps.sps->height);
1153 for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
1154 for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++)
1155 s->is_pcm[i + j * min_pu_width] = 2;
1158 static int hls_transform_tree(HEVCContext *s, int x0, int y0,
1159 int xBase, int yBase, int cb_xBase, int cb_yBase,
1160 int log2_cb_size, int log2_trafo_size,
1161 int trafo_depth, int blk_idx,
1162 const int *base_cbf_cb, const int *base_cbf_cr)
1164 HEVCLocalContext *lc = s->HEVClc;
1165 uint8_t split_transform_flag;
1170 cbf_cb[0] = base_cbf_cb[0];
1171 cbf_cb[1] = base_cbf_cb[1];
1172 cbf_cr[0] = base_cbf_cr[0];
1173 cbf_cr[1] = base_cbf_cr[1];
1175 if (lc->cu.intra_split_flag) {
1176 if (trafo_depth == 1) {
1177 lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[blk_idx];
1178 if (s->ps.sps->chroma_format_idc == 3) {
1179 lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[blk_idx];
1180 lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[blk_idx];
1182 lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[0];
1183 lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1187 lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[0];
1188 lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[0];
1189 lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1192 if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
1193 log2_trafo_size > s->ps.sps->log2_min_tb_size &&
1194 trafo_depth < lc->cu.max_trafo_depth &&
1195 !(lc->cu.intra_split_flag && trafo_depth == 0)) {
1196 split_transform_flag = ff_hevc_split_transform_flag_decode(s, log2_trafo_size);
1198 int inter_split = s->ps.sps->max_transform_hierarchy_depth_inter == 0 &&
1199 lc->cu.pred_mode == MODE_INTER &&
1200 lc->cu.part_mode != PART_2Nx2N &&
1203 split_transform_flag = log2_trafo_size > s->ps.sps->log2_max_trafo_size ||
1204 (lc->cu.intra_split_flag && trafo_depth == 0) ||
1208 if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1209 if (trafo_depth == 0 || cbf_cb[0]) {
1210 cbf_cb[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1211 if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1212 cbf_cb[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1216 if (trafo_depth == 0 || cbf_cr[0]) {
1217 cbf_cr[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1218 if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1219 cbf_cr[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1224 if (split_transform_flag) {
1225 const int trafo_size_split = 1 << (log2_trafo_size - 1);
1226 const int x1 = x0 + trafo_size_split;
1227 const int y1 = y0 + trafo_size_split;
1229 #define SUBDIVIDE(x, y, idx) \
1231 ret = hls_transform_tree(s, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size, \
1232 log2_trafo_size - 1, trafo_depth + 1, idx, \
1238 SUBDIVIDE(x0, y0, 0);
1239 SUBDIVIDE(x1, y0, 1);
1240 SUBDIVIDE(x0, y1, 2);
1241 SUBDIVIDE(x1, y1, 3);
1245 int min_tu_size = 1 << s->ps.sps->log2_min_tb_size;
1246 int log2_min_tu_size = s->ps.sps->log2_min_tb_size;
1247 int min_tu_width = s->ps.sps->min_tb_width;
1250 if (lc->cu.pred_mode == MODE_INTRA || trafo_depth != 0 ||
1251 cbf_cb[0] || cbf_cr[0] ||
1252 (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1253 cbf_luma = ff_hevc_cbf_luma_decode(s, trafo_depth);
1256 ret = hls_transform_unit(s, x0, y0, xBase, yBase, cb_xBase, cb_yBase,
1257 log2_cb_size, log2_trafo_size,
1258 blk_idx, cbf_luma, cbf_cb, cbf_cr);
1261 // TODO: store cbf_luma somewhere else
1264 for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size)
1265 for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
1266 int x_tu = (x0 + j) >> log2_min_tu_size;
1267 int y_tu = (y0 + i) >> log2_min_tu_size;
1268 s->cbf_luma[y_tu * min_tu_width + x_tu] = 1;
1271 if (!s->sh.disable_deblocking_filter_flag) {
1272 ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_trafo_size);
1273 if (s->ps.pps->transquant_bypass_enable_flag &&
1274 lc->cu.cu_transquant_bypass_flag)
1275 set_deblocking_bypass(s, x0, y0, log2_trafo_size);
1281 static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
1283 HEVCLocalContext *lc = s->HEVClc;
1285 int cb_size = 1 << log2_cb_size;
1286 int stride0 = s->frame->linesize[0];
1287 uint8_t *dst0 = &s->frame->data[0][y0 * stride0 + (x0 << s->ps.sps->pixel_shift)];
1288 int stride1 = s->frame->linesize[1];
1289 uint8_t *dst1 = &s->frame->data[1][(y0 >> s->ps.sps->vshift[1]) * stride1 + ((x0 >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)];
1290 int stride2 = s->frame->linesize[2];
1291 uint8_t *dst2 = &s->frame->data[2][(y0 >> s->ps.sps->vshift[2]) * stride2 + ((x0 >> s->ps.sps->hshift[2]) << s->ps.sps->pixel_shift)];
1293 int length = cb_size * cb_size * s->ps.sps->pcm.bit_depth +
1294 (((cb_size >> s->ps.sps->hshift[1]) * (cb_size >> s->ps.sps->vshift[1])) +
1295 ((cb_size >> s->ps.sps->hshift[2]) * (cb_size >> s->ps.sps->vshift[2]))) *
1296 s->ps.sps->pcm.bit_depth_chroma;
1297 const uint8_t *pcm = skip_bytes(&lc->cc, (length + 7) >> 3);
1300 if (!s->sh.disable_deblocking_filter_flag)
1301 ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
1303 ret = init_get_bits(&gb, pcm, length);
1307 s->hevcdsp.put_pcm(dst0, stride0, cb_size, cb_size, &gb, s->ps.sps->pcm.bit_depth);
1308 if (s->ps.sps->chroma_format_idc) {
1309 s->hevcdsp.put_pcm(dst1, stride1,
1310 cb_size >> s->ps.sps->hshift[1],
1311 cb_size >> s->ps.sps->vshift[1],
1312 &gb, s->ps.sps->pcm.bit_depth_chroma);
1313 s->hevcdsp.put_pcm(dst2, stride2,
1314 cb_size >> s->ps.sps->hshift[2],
1315 cb_size >> s->ps.sps->vshift[2],
1316 &gb, s->ps.sps->pcm.bit_depth_chroma);
1323 * 8.5.3.2.2.1 Luma sample unidirectional interpolation process
1325 * @param s HEVC decoding context
1326 * @param dst target buffer for block data at block position
1327 * @param dststride stride of the dst buffer
1328 * @param ref reference picture buffer at origin (0, 0)
1329 * @param mv motion vector (relative to block position) to get pixel data from
1330 * @param x_off horizontal position of block from origin (0, 0)
1331 * @param y_off vertical position of block from origin (0, 0)
1332 * @param block_w width of block
1333 * @param block_h height of block
1334 * @param luma_weight weighting factor applied to the luma prediction
1335 * @param luma_offset additive offset applied to the luma prediction value
1338 static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride,
1339 AVFrame *ref, const Mv *mv, int x_off, int y_off,
1340 int block_w, int block_h, int luma_weight, int luma_offset)
1342 HEVCLocalContext *lc = s->HEVClc;
1343 uint8_t *src = ref->data[0];
1344 ptrdiff_t srcstride = ref->linesize[0];
1345 int pic_width = s->ps.sps->width;
1346 int pic_height = s->ps.sps->height;
1349 int weight_flag = (s->sh.slice_type == P_SLICE && s->ps.pps->weighted_pred_flag) ||
1350 (s->sh.slice_type == B_SLICE && s->ps.pps->weighted_bipred_flag);
1351 int idx = ff_hevc_pel_weight[block_w];
1353 x_off += mv->x >> 2;
1354 y_off += mv->y >> 2;
1355 src += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1357 if (x_off < QPEL_EXTRA_BEFORE || y_off < QPEL_EXTRA_AFTER ||
1358 x_off >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1359 y_off >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1360 const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1361 int offset = QPEL_EXTRA_BEFORE * srcstride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1362 int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1364 s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset,
1365 edge_emu_stride, srcstride,
1366 block_w + QPEL_EXTRA,
1367 block_h + QPEL_EXTRA,
1368 x_off - QPEL_EXTRA_BEFORE, y_off - QPEL_EXTRA_BEFORE,
1369 pic_width, pic_height);
1370 src = lc->edge_emu_buffer + buf_offset;
1371 srcstride = edge_emu_stride;
1375 s->hevcdsp.put_hevc_qpel_uni[idx][!!my][!!mx](dst, dststride, src, srcstride,
1376 block_h, mx, my, block_w);
1378 s->hevcdsp.put_hevc_qpel_uni_w[idx][!!my][!!mx](dst, dststride, src, srcstride,
1379 block_h, s->sh.luma_log2_weight_denom,
1380 luma_weight, luma_offset, mx, my, block_w);
1384 * 8.5.3.2.2.1 Luma sample bidirectional interpolation process
1386 * @param s HEVC decoding context
1387 * @param dst target buffer for block data at block position
1388 * @param dststride stride of the dst buffer
1389 * @param ref0 reference picture0 buffer at origin (0, 0)
1390 * @param mv0 motion vector0 (relative to block position) to get pixel data from
1391 * @param x_off horizontal position of block from origin (0, 0)
1392 * @param y_off vertical position of block from origin (0, 0)
1393 * @param block_w width of block
1394 * @param block_h height of block
1395 * @param ref1 reference picture1 buffer at origin (0, 0)
1396 * @param mv1 motion vector1 (relative to block position) to get pixel data from
1397 * @param current_mv current motion vector structure
1399 static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride,
1400 AVFrame *ref0, const Mv *mv0, int x_off, int y_off,
1401 int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
1403 HEVCLocalContext *lc = s->HEVClc;
1404 ptrdiff_t src0stride = ref0->linesize[0];
1405 ptrdiff_t src1stride = ref1->linesize[0];
1406 int pic_width = s->ps.sps->width;
1407 int pic_height = s->ps.sps->height;
1408 int mx0 = mv0->x & 3;
1409 int my0 = mv0->y & 3;
1410 int mx1 = mv1->x & 3;
1411 int my1 = mv1->y & 3;
1412 int weight_flag = (s->sh.slice_type == P_SLICE && s->ps.pps->weighted_pred_flag) ||
1413 (s->sh.slice_type == B_SLICE && s->ps.pps->weighted_bipred_flag);
1414 int x_off0 = x_off + (mv0->x >> 2);
1415 int y_off0 = y_off + (mv0->y >> 2);
1416 int x_off1 = x_off + (mv1->x >> 2);
1417 int y_off1 = y_off + (mv1->y >> 2);
1418 int idx = ff_hevc_pel_weight[block_w];
1420 uint8_t *src0 = ref0->data[0] + y_off0 * src0stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1421 uint8_t *src1 = ref1->data[0] + y_off1 * src1stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1423 if (x_off0 < QPEL_EXTRA_BEFORE || y_off0 < QPEL_EXTRA_AFTER ||
1424 x_off0 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1425 y_off0 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1426 const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1427 int offset = QPEL_EXTRA_BEFORE * src0stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1428 int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1430 s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset,
1431 edge_emu_stride, src0stride,
1432 block_w + QPEL_EXTRA,
1433 block_h + QPEL_EXTRA,
1434 x_off0 - QPEL_EXTRA_BEFORE, y_off0 - QPEL_EXTRA_BEFORE,
1435 pic_width, pic_height);
1436 src0 = lc->edge_emu_buffer + buf_offset;
1437 src0stride = edge_emu_stride;
1440 if (x_off1 < QPEL_EXTRA_BEFORE || y_off1 < QPEL_EXTRA_AFTER ||
1441 x_off1 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1442 y_off1 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1443 const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1444 int offset = QPEL_EXTRA_BEFORE * src1stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1445 int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1447 s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src1 - offset,
1448 edge_emu_stride, src1stride,
1449 block_w + QPEL_EXTRA,
1450 block_h + QPEL_EXTRA,
1451 x_off1 - QPEL_EXTRA_BEFORE, y_off1 - QPEL_EXTRA_BEFORE,
1452 pic_width, pic_height);
1453 src1 = lc->edge_emu_buffer2 + buf_offset;
1454 src1stride = edge_emu_stride;
1457 s->hevcdsp.put_hevc_qpel[idx][!!my0][!!mx0](lc->tmp, src0, src0stride,
1458 block_h, mx0, my0, block_w);
1460 s->hevcdsp.put_hevc_qpel_bi[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1461 block_h, mx1, my1, block_w);
1463 s->hevcdsp.put_hevc_qpel_bi_w[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1464 block_h, s->sh.luma_log2_weight_denom,
1465 s->sh.luma_weight_l0[current_mv->ref_idx[0]],
1466 s->sh.luma_weight_l1[current_mv->ref_idx[1]],
1467 s->sh.luma_offset_l0[current_mv->ref_idx[0]],
1468 s->sh.luma_offset_l1[current_mv->ref_idx[1]],
1474 * 8.5.3.2.2.2 Chroma sample uniprediction interpolation process
1476 * @param s HEVC decoding context
1477 * @param dst1 target buffer for block data at block position (U plane)
1478 * @param dst2 target buffer for block data at block position (V plane)
1479 * @param dststride stride of the dst1 and dst2 buffers
1480 * @param ref reference picture buffer at origin (0, 0)
1481 * @param mv motion vector (relative to block position) to get pixel data from
1482 * @param x_off horizontal position of block from origin (0, 0)
1483 * @param y_off vertical position of block from origin (0, 0)
1484 * @param block_w width of block
1485 * @param block_h height of block
1486 * @param chroma_weight weighting factor applied to the chroma prediction
1487 * @param chroma_offset additive offset applied to the chroma prediction value
1490 static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0,
1491 ptrdiff_t dststride, uint8_t *src0, ptrdiff_t srcstride, int reflist,
1492 int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int chroma_weight, int chroma_offset)
1494 HEVCLocalContext *lc = s->HEVClc;
1495 int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1496 int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1497 const Mv *mv = ¤t_mv->mv[reflist];
1498 int weight_flag = (s->sh.slice_type == P_SLICE && s->ps.pps->weighted_pred_flag) ||
1499 (s->sh.slice_type == B_SLICE && s->ps.pps->weighted_bipred_flag);
1500 int idx = ff_hevc_pel_weight[block_w];
1501 int hshift = s->ps.sps->hshift[1];
1502 int vshift = s->ps.sps->vshift[1];
1503 intptr_t mx = av_mod_uintp2(mv->x, 2 + hshift);
1504 intptr_t my = av_mod_uintp2(mv->y, 2 + vshift);
1505 intptr_t _mx = mx << (1 - hshift);
1506 intptr_t _my = my << (1 - vshift);
1508 x_off += mv->x >> (2 + hshift);
1509 y_off += mv->y >> (2 + vshift);
1510 src0 += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1512 if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER ||
1513 x_off >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1514 y_off >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1515 const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1516 int offset0 = EPEL_EXTRA_BEFORE * (srcstride + (1 << s->ps.sps->pixel_shift));
1517 int buf_offset0 = EPEL_EXTRA_BEFORE *
1518 (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1519 s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset0,
1520 edge_emu_stride, srcstride,
1521 block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1522 x_off - EPEL_EXTRA_BEFORE,
1523 y_off - EPEL_EXTRA_BEFORE,
1524 pic_width, pic_height);
1526 src0 = lc->edge_emu_buffer + buf_offset0;
1527 srcstride = edge_emu_stride;
1530 s->hevcdsp.put_hevc_epel_uni[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1531 block_h, _mx, _my, block_w);
1533 s->hevcdsp.put_hevc_epel_uni_w[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1534 block_h, s->sh.chroma_log2_weight_denom,
1535 chroma_weight, chroma_offset, _mx, _my, block_w);
1539 * 8.5.3.2.2.2 Chroma sample bidirectional interpolation process
1541 * @param s HEVC decoding context
1542 * @param dst target buffer for block data at block position
1543 * @param dststride stride of the dst buffer
1544 * @param ref0 reference picture0 buffer at origin (0, 0)
1545 * @param mv0 motion vector0 (relative to block position) to get pixel data from
1546 * @param x_off horizontal position of block from origin (0, 0)
1547 * @param y_off vertical position of block from origin (0, 0)
1548 * @param block_w width of block
1549 * @param block_h height of block
1550 * @param ref1 reference picture1 buffer at origin (0, 0)
1551 * @param mv1 motion vector1 (relative to block position) to get pixel data from
1552 * @param current_mv current motion vector structure
1553 * @param cidx chroma component(cb, cr)
1555 static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1,
1556 int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
1558 HEVCLocalContext *lc = s->HEVClc;
1559 uint8_t *src1 = ref0->data[cidx+1];
1560 uint8_t *src2 = ref1->data[cidx+1];
1561 ptrdiff_t src1stride = ref0->linesize[cidx+1];
1562 ptrdiff_t src2stride = ref1->linesize[cidx+1];
1563 int weight_flag = (s->sh.slice_type == P_SLICE && s->ps.pps->weighted_pred_flag) ||
1564 (s->sh.slice_type == B_SLICE && s->ps.pps->weighted_bipred_flag);
1565 int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1566 int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1567 Mv *mv0 = ¤t_mv->mv[0];
1568 Mv *mv1 = ¤t_mv->mv[1];
1569 int hshift = s->ps.sps->hshift[1];
1570 int vshift = s->ps.sps->vshift[1];
1572 intptr_t mx0 = av_mod_uintp2(mv0->x, 2 + hshift);
1573 intptr_t my0 = av_mod_uintp2(mv0->y, 2 + vshift);
1574 intptr_t mx1 = av_mod_uintp2(mv1->x, 2 + hshift);
1575 intptr_t my1 = av_mod_uintp2(mv1->y, 2 + vshift);
1576 intptr_t _mx0 = mx0 << (1 - hshift);
1577 intptr_t _my0 = my0 << (1 - vshift);
1578 intptr_t _mx1 = mx1 << (1 - hshift);
1579 intptr_t _my1 = my1 << (1 - vshift);
1581 int x_off0 = x_off + (mv0->x >> (2 + hshift));
1582 int y_off0 = y_off + (mv0->y >> (2 + vshift));
1583 int x_off1 = x_off + (mv1->x >> (2 + hshift));
1584 int y_off1 = y_off + (mv1->y >> (2 + vshift));
1585 int idx = ff_hevc_pel_weight[block_w];
1586 src1 += y_off0 * src1stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1587 src2 += y_off1 * src2stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1589 if (x_off0 < EPEL_EXTRA_BEFORE || y_off0 < EPEL_EXTRA_AFTER ||
1590 x_off0 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1591 y_off0 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1592 const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1593 int offset1 = EPEL_EXTRA_BEFORE * (src1stride + (1 << s->ps.sps->pixel_shift));
1594 int buf_offset1 = EPEL_EXTRA_BEFORE *
1595 (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1597 s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1,
1598 edge_emu_stride, src1stride,
1599 block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1600 x_off0 - EPEL_EXTRA_BEFORE,
1601 y_off0 - EPEL_EXTRA_BEFORE,
1602 pic_width, pic_height);
1604 src1 = lc->edge_emu_buffer + buf_offset1;
1605 src1stride = edge_emu_stride;
1608 if (x_off1 < EPEL_EXTRA_BEFORE || y_off1 < EPEL_EXTRA_AFTER ||
1609 x_off1 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1610 y_off1 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1611 const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1612 int offset1 = EPEL_EXTRA_BEFORE * (src2stride + (1 << s->ps.sps->pixel_shift));
1613 int buf_offset1 = EPEL_EXTRA_BEFORE *
1614 (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1616 s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src2 - offset1,
1617 edge_emu_stride, src2stride,
1618 block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1619 x_off1 - EPEL_EXTRA_BEFORE,
1620 y_off1 - EPEL_EXTRA_BEFORE,
1621 pic_width, pic_height);
1623 src2 = lc->edge_emu_buffer2 + buf_offset1;
1624 src2stride = edge_emu_stride;
1627 s->hevcdsp.put_hevc_epel[idx][!!my0][!!mx0](lc->tmp, src1, src1stride,
1628 block_h, _mx0, _my0, block_w);
1630 s->hevcdsp.put_hevc_epel_bi[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1631 src2, src2stride, lc->tmp,
1632 block_h, _mx1, _my1, block_w);
1634 s->hevcdsp.put_hevc_epel_bi_w[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1635 src2, src2stride, lc->tmp,
1637 s->sh.chroma_log2_weight_denom,
1638 s->sh.chroma_weight_l0[current_mv->ref_idx[0]][cidx],
1639 s->sh.chroma_weight_l1[current_mv->ref_idx[1]][cidx],
1640 s->sh.chroma_offset_l0[current_mv->ref_idx[0]][cidx],
1641 s->sh.chroma_offset_l1[current_mv->ref_idx[1]][cidx],
1642 _mx1, _my1, block_w);
1645 static void hevc_await_progress(HEVCContext *s, HEVCFrame *ref,
1646 const Mv *mv, int y0, int height)
1648 int y = FFMAX(0, (mv->y >> 2) + y0 + height + 9);
1650 if (s->threads_type == FF_THREAD_FRAME )
1651 ff_thread_await_progress(&ref->tf, y, 0);
1654 static void hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW,
1655 int nPbH, int log2_cb_size, int part_idx,
1656 int merge_idx, MvField *mv)
1658 HEVCLocalContext *lc = s->HEVClc;
1659 enum InterPredIdc inter_pred_idc = PRED_L0;
1662 ff_hevc_set_neighbour_available(s, x0, y0, nPbW, nPbH);
1664 if (s->sh.slice_type == B_SLICE)
1665 inter_pred_idc = ff_hevc_inter_pred_idc_decode(s, nPbW, nPbH);
1667 if (inter_pred_idc != PRED_L1) {
1668 if (s->sh.nb_refs[L0])
1669 mv->ref_idx[0]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L0]);
1671 mv->pred_flag = PF_L0;
1672 ff_hevc_hls_mvd_coding(s, x0, y0, 0);
1673 mvp_flag = ff_hevc_mvp_lx_flag_decode(s);
1674 ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1675 part_idx, merge_idx, mv, mvp_flag, 0);
1676 mv->mv[0].x += lc->pu.mvd.x;
1677 mv->mv[0].y += lc->pu.mvd.y;
1680 if (inter_pred_idc != PRED_L0) {
1681 if (s->sh.nb_refs[L1])
1682 mv->ref_idx[1]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L1]);
1684 if (s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc == PRED_BI) {
1685 AV_ZERO32(&lc->pu.mvd);
1687 ff_hevc_hls_mvd_coding(s, x0, y0, 1);
1690 mv->pred_flag += PF_L1;
1691 mvp_flag = ff_hevc_mvp_lx_flag_decode(s);
1692 ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1693 part_idx, merge_idx, mv, mvp_flag, 1);
1694 mv->mv[1].x += lc->pu.mvd.x;
1695 mv->mv[1].y += lc->pu.mvd.y;
1699 static void hls_prediction_unit(HEVCContext *s, int x0, int y0,
1701 int log2_cb_size, int partIdx, int idx)
1703 #define POS(c_idx, x, y) \
1704 &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \
1705 (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)]
1706 HEVCLocalContext *lc = s->HEVClc;
1708 struct MvField current_mv = {{{ 0 }}};
1710 int min_pu_width = s->ps.sps->min_pu_width;
1712 MvField *tab_mvf = s->ref->tab_mvf;
1713 RefPicList *refPicList = s->ref->refPicList;
1714 HEVCFrame *ref0 = NULL, *ref1 = NULL;
1715 uint8_t *dst0 = POS(0, x0, y0);
1716 uint8_t *dst1 = POS(1, x0, y0);
1717 uint8_t *dst2 = POS(2, x0, y0);
1718 int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
1719 int min_cb_width = s->ps.sps->min_cb_width;
1720 int x_cb = x0 >> log2_min_cb_size;
1721 int y_cb = y0 >> log2_min_cb_size;
1725 int skip_flag = SAMPLE_CTB(s->skip_flag, x_cb, y_cb);
1728 lc->pu.merge_flag = ff_hevc_merge_flag_decode(s);
1730 if (skip_flag || lc->pu.merge_flag) {
1731 if (s->sh.max_num_merge_cand > 1)
1732 merge_idx = ff_hevc_merge_idx_decode(s);
1736 ff_hevc_luma_mv_merge_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1737 partIdx, merge_idx, ¤t_mv);
1739 hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1740 partIdx, merge_idx, ¤t_mv);
1743 x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1744 y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1746 for (j = 0; j < nPbH >> s->ps.sps->log2_min_pu_size; j++)
1747 for (i = 0; i < nPbW >> s->ps.sps->log2_min_pu_size; i++)
1748 tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv;
1750 if (current_mv.pred_flag & PF_L0) {
1751 ref0 = refPicList[0].ref[current_mv.ref_idx[0]];
1754 hevc_await_progress(s, ref0, ¤t_mv.mv[0], y0, nPbH);
1756 if (current_mv.pred_flag & PF_L1) {
1757 ref1 = refPicList[1].ref[current_mv.ref_idx[1]];
1760 hevc_await_progress(s, ref1, ¤t_mv.mv[1], y0, nPbH);
1763 if (current_mv.pred_flag == PF_L0) {
1764 int x0_c = x0 >> s->ps.sps->hshift[1];
1765 int y0_c = y0 >> s->ps.sps->vshift[1];
1766 int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1767 int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1769 luma_mc_uni(s, dst0, s->frame->linesize[0], ref0->frame,
1770 ¤t_mv.mv[0], x0, y0, nPbW, nPbH,
1771 s->sh.luma_weight_l0[current_mv.ref_idx[0]],
1772 s->sh.luma_offset_l0[current_mv.ref_idx[0]]);
1774 if (s->ps.sps->chroma_format_idc) {
1775 chroma_mc_uni(s, dst1, s->frame->linesize[1], ref0->frame->data[1], ref0->frame->linesize[1],
1776 0, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1777 s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0]);
1778 chroma_mc_uni(s, dst2, s->frame->linesize[2], ref0->frame->data[2], ref0->frame->linesize[2],
1779 0, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1780 s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1]);
1782 } else if (current_mv.pred_flag == PF_L1) {
1783 int x0_c = x0 >> s->ps.sps->hshift[1];
1784 int y0_c = y0 >> s->ps.sps->vshift[1];
1785 int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1786 int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1788 luma_mc_uni(s, dst0, s->frame->linesize[0], ref1->frame,
1789 ¤t_mv.mv[1], x0, y0, nPbW, nPbH,
1790 s->sh.luma_weight_l1[current_mv.ref_idx[1]],
1791 s->sh.luma_offset_l1[current_mv.ref_idx[1]]);
1793 if (s->ps.sps->chroma_format_idc) {
1794 chroma_mc_uni(s, dst1, s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1],
1795 1, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1796 s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0]);
1798 chroma_mc_uni(s, dst2, s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2],
1799 1, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1800 s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1]);
1802 } else if (current_mv.pred_flag == PF_BI) {
1803 int x0_c = x0 >> s->ps.sps->hshift[1];
1804 int y0_c = y0 >> s->ps.sps->vshift[1];
1805 int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1806 int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1808 luma_mc_bi(s, dst0, s->frame->linesize[0], ref0->frame,
1809 ¤t_mv.mv[0], x0, y0, nPbW, nPbH,
1810 ref1->frame, ¤t_mv.mv[1], ¤t_mv);
1812 if (s->ps.sps->chroma_format_idc) {
1813 chroma_mc_bi(s, dst1, s->frame->linesize[1], ref0->frame, ref1->frame,
1814 x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, 0);
1816 chroma_mc_bi(s, dst2, s->frame->linesize[2], ref0->frame, ref1->frame,
1817 x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, 1);
1825 static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size,
1826 int prev_intra_luma_pred_flag)
1828 HEVCLocalContext *lc = s->HEVClc;
1829 int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1830 int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1831 int min_pu_width = s->ps.sps->min_pu_width;
1832 int size_in_pus = pu_size >> s->ps.sps->log2_min_pu_size;
1833 int x0b = av_mod_uintp2(x0, s->ps.sps->log2_ctb_size);
1834 int y0b = av_mod_uintp2(y0, s->ps.sps->log2_ctb_size);
1836 int cand_up = (lc->ctb_up_flag || y0b) ?
1837 s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] : INTRA_DC;
1838 int cand_left = (lc->ctb_left_flag || x0b) ?
1839 s->tab_ipm[y_pu * min_pu_width + x_pu - 1] : INTRA_DC;
1841 int y_ctb = (y0 >> (s->ps.sps->log2_ctb_size)) << (s->ps.sps->log2_ctb_size);
1843 MvField *tab_mvf = s->ref->tab_mvf;
1844 int intra_pred_mode;
1848 // intra_pred_mode prediction does not cross vertical CTB boundaries
1849 if ((y0 - 1) < y_ctb)
1852 if (cand_left == cand_up) {
1853 if (cand_left < 2) {
1854 candidate[0] = INTRA_PLANAR;
1855 candidate[1] = INTRA_DC;
1856 candidate[2] = INTRA_ANGULAR_26;
1858 candidate[0] = cand_left;
1859 candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
1860 candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
1863 candidate[0] = cand_left;
1864 candidate[1] = cand_up;
1865 if (candidate[0] != INTRA_PLANAR && candidate[1] != INTRA_PLANAR) {
1866 candidate[2] = INTRA_PLANAR;
1867 } else if (candidate[0] != INTRA_DC && candidate[1] != INTRA_DC) {
1868 candidate[2] = INTRA_DC;
1870 candidate[2] = INTRA_ANGULAR_26;
1874 if (prev_intra_luma_pred_flag) {
1875 intra_pred_mode = candidate[lc->pu.mpm_idx];
1877 if (candidate[0] > candidate[1])
1878 FFSWAP(uint8_t, candidate[0], candidate[1]);
1879 if (candidate[0] > candidate[2])
1880 FFSWAP(uint8_t, candidate[0], candidate[2]);
1881 if (candidate[1] > candidate[2])
1882 FFSWAP(uint8_t, candidate[1], candidate[2]);
1884 intra_pred_mode = lc->pu.rem_intra_luma_pred_mode;
1885 for (i = 0; i < 3; i++)
1886 if (intra_pred_mode >= candidate[i])
1890 /* write the intra prediction units into the mv array */
1893 for (i = 0; i < size_in_pus; i++) {
1894 memset(&s->tab_ipm[(y_pu + i) * min_pu_width + x_pu],
1895 intra_pred_mode, size_in_pus);
1897 for (j = 0; j < size_in_pus; j++) {
1898 tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].pred_flag = PF_INTRA;
1902 return intra_pred_mode;
1905 static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0,
1906 int log2_cb_size, int ct_depth)
1908 int length = (1 << log2_cb_size) >> s->ps.sps->log2_min_cb_size;
1909 int x_cb = x0 >> s->ps.sps->log2_min_cb_size;
1910 int y_cb = y0 >> s->ps.sps->log2_min_cb_size;
1913 for (y = 0; y < length; y++)
1914 memset(&s->tab_ct_depth[(y_cb + y) * s->ps.sps->min_cb_width + x_cb],
1918 static const uint8_t tab_mode_idx[] = {
1919 0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20,
1920 21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
1922 static void intra_prediction_unit(HEVCContext *s, int x0, int y0,
1925 HEVCLocalContext *lc = s->HEVClc;
1926 static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
1927 uint8_t prev_intra_luma_pred_flag[4];
1928 int split = lc->cu.part_mode == PART_NxN;
1929 int pb_size = (1 << log2_cb_size) >> split;
1930 int side = split + 1;
1934 for (i = 0; i < side; i++)
1935 for (j = 0; j < side; j++)
1936 prev_intra_luma_pred_flag[2 * i + j] = ff_hevc_prev_intra_luma_pred_flag_decode(s);
1938 for (i = 0; i < side; i++) {
1939 for (j = 0; j < side; j++) {
1940 if (prev_intra_luma_pred_flag[2 * i + j])
1941 lc->pu.mpm_idx = ff_hevc_mpm_idx_decode(s);
1943 lc->pu.rem_intra_luma_pred_mode = ff_hevc_rem_intra_luma_pred_mode_decode(s);
1945 lc->pu.intra_pred_mode[2 * i + j] =
1946 luma_intra_pred_mode(s, x0 + pb_size * j, y0 + pb_size * i, pb_size,
1947 prev_intra_luma_pred_flag[2 * i + j]);
1951 if (s->ps.sps->chroma_format_idc == 3) {
1952 for (i = 0; i < side; i++) {
1953 for (j = 0; j < side; j++) {
1954 lc->pu.chroma_mode_c[2 * i + j] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
1955 if (chroma_mode != 4) {
1956 if (lc->pu.intra_pred_mode[2 * i + j] == intra_chroma_table[chroma_mode])
1957 lc->pu.intra_pred_mode_c[2 * i + j] = 34;
1959 lc->pu.intra_pred_mode_c[2 * i + j] = intra_chroma_table[chroma_mode];
1961 lc->pu.intra_pred_mode_c[2 * i + j] = lc->pu.intra_pred_mode[2 * i + j];
1965 } else if (s->ps.sps->chroma_format_idc == 2) {
1967 lc->pu.chroma_mode_c[0] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
1968 if (chroma_mode != 4) {
1969 if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
1972 mode_idx = intra_chroma_table[chroma_mode];
1974 mode_idx = lc->pu.intra_pred_mode[0];
1976 lc->pu.intra_pred_mode_c[0] = tab_mode_idx[mode_idx];
1977 } else if (s->ps.sps->chroma_format_idc != 0) {
1978 chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
1979 if (chroma_mode != 4) {
1980 if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
1981 lc->pu.intra_pred_mode_c[0] = 34;
1983 lc->pu.intra_pred_mode_c[0] = intra_chroma_table[chroma_mode];
1985 lc->pu.intra_pred_mode_c[0] = lc->pu.intra_pred_mode[0];
1990 static void intra_prediction_unit_default_value(HEVCContext *s,
1994 HEVCLocalContext *lc = s->HEVClc;
1995 int pb_size = 1 << log2_cb_size;
1996 int size_in_pus = pb_size >> s->ps.sps->log2_min_pu_size;
1997 int min_pu_width = s->ps.sps->min_pu_width;
1998 MvField *tab_mvf = s->ref->tab_mvf;
1999 int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
2000 int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
2003 if (size_in_pus == 0)
2005 for (j = 0; j < size_in_pus; j++)
2006 memset(&s->tab_ipm[(y_pu + j) * min_pu_width + x_pu], INTRA_DC, size_in_pus);
2007 if (lc->cu.pred_mode == MODE_INTRA)
2008 for (j = 0; j < size_in_pus; j++)
2009 for (k = 0; k < size_in_pus; k++)
2010 tab_mvf[(y_pu + j) * min_pu_width + x_pu + k].pred_flag = PF_INTRA;
2013 static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
2015 int cb_size = 1 << log2_cb_size;
2016 HEVCLocalContext *lc = s->HEVClc;
2017 int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
2018 int length = cb_size >> log2_min_cb_size;
2019 int min_cb_width = s->ps.sps->min_cb_width;
2020 int x_cb = x0 >> log2_min_cb_size;
2021 int y_cb = y0 >> log2_min_cb_size;
2022 int idx = log2_cb_size - 2;
2023 int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2028 lc->cu.pred_mode = MODE_INTRA;
2029 lc->cu.part_mode = PART_2Nx2N;
2030 lc->cu.intra_split_flag = 0;
2032 SAMPLE_CTB(s->skip_flag, x_cb, y_cb) = 0;
2033 for (x = 0; x < 4; x++)
2034 lc->pu.intra_pred_mode[x] = 1;
2035 if (s->ps.pps->transquant_bypass_enable_flag) {
2036 lc->cu.cu_transquant_bypass_flag = ff_hevc_cu_transquant_bypass_flag_decode(s);
2037 if (lc->cu.cu_transquant_bypass_flag)
2038 set_deblocking_bypass(s, x0, y0, log2_cb_size);
2040 lc->cu.cu_transquant_bypass_flag = 0;
2042 if (s->sh.slice_type != I_SLICE) {
2043 uint8_t skip_flag = ff_hevc_skip_flag_decode(s, x0, y0, x_cb, y_cb);
2045 x = y_cb * min_cb_width + x_cb;
2046 for (y = 0; y < length; y++) {
2047 memset(&s->skip_flag[x], skip_flag, length);
2050 lc->cu.pred_mode = skip_flag ? MODE_SKIP : MODE_INTER;
2052 x = y_cb * min_cb_width + x_cb;
2053 for (y = 0; y < length; y++) {
2054 memset(&s->skip_flag[x], 0, length);
2059 if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) {
2060 hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2061 intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2063 if (!s->sh.disable_deblocking_filter_flag)
2064 ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
2068 if (s->sh.slice_type != I_SLICE)
2069 lc->cu.pred_mode = ff_hevc_pred_mode_decode(s);
2070 if (lc->cu.pred_mode != MODE_INTRA ||
2071 log2_cb_size == s->ps.sps->log2_min_cb_size) {
2072 lc->cu.part_mode = ff_hevc_part_mode_decode(s, log2_cb_size);
2073 lc->cu.intra_split_flag = lc->cu.part_mode == PART_NxN &&
2074 lc->cu.pred_mode == MODE_INTRA;
2077 if (lc->cu.pred_mode == MODE_INTRA) {
2078 if (lc->cu.part_mode == PART_2Nx2N && s->ps.sps->pcm_enabled_flag &&
2079 log2_cb_size >= s->ps.sps->pcm.log2_min_pcm_cb_size &&
2080 log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
2081 pcm_flag = ff_hevc_pcm_flag_decode(s);
2084 intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2085 ret = hls_pcm_sample(s, x0, y0, log2_cb_size);
2086 if (s->ps.sps->pcm.loop_filter_disable_flag)
2087 set_deblocking_bypass(s, x0, y0, log2_cb_size);
2092 intra_prediction_unit(s, x0, y0, log2_cb_size);
2095 intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2096 switch (lc->cu.part_mode) {
2098 hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2101 hls_prediction_unit(s, x0, y0, cb_size, cb_size / 2, log2_cb_size, 0, idx);
2102 hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size, cb_size / 2, log2_cb_size, 1, idx);
2105 hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size, log2_cb_size, 0, idx - 1);
2106 hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1);
2109 hls_prediction_unit(s, x0, y0, cb_size, cb_size / 4, log2_cb_size, 0, idx);
2110 hls_prediction_unit(s, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx);
2113 hls_prediction_unit(s, x0, y0, cb_size, cb_size * 3 / 4, log2_cb_size, 0, idx);
2114 hls_prediction_unit(s, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1, idx);
2117 hls_prediction_unit(s, x0, y0, cb_size / 4, cb_size, log2_cb_size, 0, idx - 2);
2118 hls_prediction_unit(s, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
2121 hls_prediction_unit(s, x0, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 0, idx - 2);
2122 hls_prediction_unit(s, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2);
2125 hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size / 2, log2_cb_size, 0, idx - 1);
2126 hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
2127 hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
2128 hls_prediction_unit(s, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
2134 int rqt_root_cbf = 1;
2136 if (lc->cu.pred_mode != MODE_INTRA &&
2137 !(lc->cu.part_mode == PART_2Nx2N && lc->pu.merge_flag)) {
2138 rqt_root_cbf = ff_hevc_no_residual_syntax_flag_decode(s);
2141 const static int cbf[2] = { 0 };
2142 lc->cu.max_trafo_depth = lc->cu.pred_mode == MODE_INTRA ?
2143 s->ps.sps->max_transform_hierarchy_depth_intra + lc->cu.intra_split_flag :
2144 s->ps.sps->max_transform_hierarchy_depth_inter;
2145 ret = hls_transform_tree(s, x0, y0, x0, y0, x0, y0,
2147 log2_cb_size, 0, 0, cbf, cbf);
2151 if (!s->sh.disable_deblocking_filter_flag)
2152 ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
2157 if (s->ps.pps->cu_qp_delta_enabled_flag && lc->tu.is_cu_qp_delta_coded == 0)
2158 ff_hevc_set_qPy(s, x0, y0, log2_cb_size);
2160 x = y_cb * min_cb_width + x_cb;
2161 for (y = 0; y < length; y++) {
2162 memset(&s->qp_y_tab[x], lc->qp_y, length);
2166 if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2167 ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
2168 lc->qPy_pred = lc->qp_y;
2171 set_ct_depth(s, x0, y0, log2_cb_size, lc->ct_depth);
2176 static int hls_coding_quadtree(HEVCContext *s, int x0, int y0,
2177 int log2_cb_size, int cb_depth)
2179 HEVCLocalContext *lc = s->HEVClc;
2180 const int cb_size = 1 << log2_cb_size;
2184 lc->ct_depth = cb_depth;
2185 if (x0 + cb_size <= s->ps.sps->width &&
2186 y0 + cb_size <= s->ps.sps->height &&
2187 log2_cb_size > s->ps.sps->log2_min_cb_size) {
2188 split_cu = ff_hevc_split_coding_unit_flag_decode(s, cb_depth, x0, y0);
2190 split_cu = (log2_cb_size > s->ps.sps->log2_min_cb_size);
2192 if (s->ps.pps->cu_qp_delta_enabled_flag &&
2193 log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth) {
2194 lc->tu.is_cu_qp_delta_coded = 0;
2195 lc->tu.cu_qp_delta = 0;
2198 if (s->sh.cu_chroma_qp_offset_enabled_flag &&
2199 log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_chroma_qp_offset_depth) {
2200 lc->tu.is_cu_chroma_qp_offset_coded = 0;
2204 int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2205 const int cb_size_split = cb_size >> 1;
2206 const int x1 = x0 + cb_size_split;
2207 const int y1 = y0 + cb_size_split;
2211 more_data = hls_coding_quadtree(s, x0, y0, log2_cb_size - 1, cb_depth + 1);
2215 if (more_data && x1 < s->ps.sps->width) {
2216 more_data = hls_coding_quadtree(s, x1, y0, log2_cb_size - 1, cb_depth + 1);
2220 if (more_data && y1 < s->ps.sps->height) {
2221 more_data = hls_coding_quadtree(s, x0, y1, log2_cb_size - 1, cb_depth + 1);
2225 if (more_data && x1 < s->ps.sps->width &&
2226 y1 < s->ps.sps->height) {
2227 more_data = hls_coding_quadtree(s, x1, y1, log2_cb_size - 1, cb_depth + 1);
2232 if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2233 ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
2234 lc->qPy_pred = lc->qp_y;
2237 return ((x1 + cb_size_split) < s->ps.sps->width ||
2238 (y1 + cb_size_split) < s->ps.sps->height);
2242 ret = hls_coding_unit(s, x0, y0, log2_cb_size);
2245 if ((!((x0 + cb_size) %
2246 (1 << (s->ps.sps->log2_ctb_size))) ||
2247 (x0 + cb_size >= s->ps.sps->width)) &&
2249 (1 << (s->ps.sps->log2_ctb_size))) ||
2250 (y0 + cb_size >= s->ps.sps->height))) {
2251 int end_of_slice_flag = ff_hevc_end_of_slice_flag_decode(s);
2252 return !end_of_slice_flag;
2261 static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb,
2264 HEVCLocalContext *lc = s->HEVClc;
2265 int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2266 int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2267 int ctb_addr_in_slice = ctb_addr_rs - s->sh.slice_addr;
2269 s->tab_slice_address[ctb_addr_rs] = s->sh.slice_addr;
2271 if (s->ps.pps->entropy_coding_sync_enabled_flag) {
2272 if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
2273 lc->first_qp_group = 1;
2274 lc->end_of_tiles_x = s->ps.sps->width;
2275 } else if (s->ps.pps->tiles_enabled_flag) {
2276 if (ctb_addr_ts && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[ctb_addr_ts - 1]) {
2277 int idxX = s->ps.pps->col_idxX[x_ctb >> s->ps.sps->log2_ctb_size];
2278 lc->end_of_tiles_x = x_ctb + (s->ps.pps->column_width[idxX] << s->ps.sps->log2_ctb_size);
2279 lc->first_qp_group = 1;
2282 lc->end_of_tiles_x = s->ps.sps->width;
2285 lc->end_of_tiles_y = FFMIN(y_ctb + ctb_size, s->ps.sps->height);
2287 lc->boundary_flags = 0;
2288 if (s->ps.pps->tiles_enabled_flag) {
2289 if (x_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]])
2290 lc->boundary_flags |= BOUNDARY_LEFT_TILE;
2291 if (x_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - 1])
2292 lc->boundary_flags |= BOUNDARY_LEFT_SLICE;
2293 if (y_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]])
2294 lc->boundary_flags |= BOUNDARY_UPPER_TILE;
2295 if (y_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - s->ps.sps->ctb_width])
2296 lc->boundary_flags |= BOUNDARY_UPPER_SLICE;
2298 if (ctb_addr_in_slice <= 0)
2299 lc->boundary_flags |= BOUNDARY_LEFT_SLICE;
2300 if (ctb_addr_in_slice < s->ps.sps->ctb_width)
2301 lc->boundary_flags |= BOUNDARY_UPPER_SLICE;
2304 lc->ctb_left_flag = ((x_ctb > 0) && (ctb_addr_in_slice > 0) && !(lc->boundary_flags & BOUNDARY_LEFT_TILE));
2305 lc->ctb_up_flag = ((y_ctb > 0) && (ctb_addr_in_slice >= s->ps.sps->ctb_width) && !(lc->boundary_flags & BOUNDARY_UPPER_TILE));
2306 lc->ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 - s->ps.sps->ctb_width]]));
2307 lc->ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 - s->ps.sps->ctb_width]]));
2310 static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
2312 HEVCContext *s = avctxt->priv_data;
2313 int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2317 int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs];
2319 if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) {
2320 av_log(s->avctx, AV_LOG_ERROR, "Impossible initial tile.\n");
2321 return AVERROR_INVALIDDATA;
2324 if (s->sh.dependent_slice_segment_flag) {
2325 int prev_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1];
2326 if (s->tab_slice_address[prev_rs] != s->sh.slice_addr) {
2327 av_log(s->avctx, AV_LOG_ERROR, "Previous slice segment missing\n");
2328 return AVERROR_INVALIDDATA;
2332 while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2333 int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2335 x_ctb = (ctb_addr_rs % ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2336 y_ctb = (ctb_addr_rs / ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2337 hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
2339 ff_hevc_cabac_init(s, ctb_addr_ts);
2341 hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2343 s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset;
2344 s->deblock[ctb_addr_rs].tc_offset = s->sh.tc_offset;
2345 s->filter_slice_edges[ctb_addr_rs] = s->sh.slice_loop_filter_across_slices_enabled_flag;
2347 more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2348 if (more_data < 0) {
2349 s->tab_slice_address[ctb_addr_rs] = -1;
2355 ff_hevc_save_states(s, ctb_addr_ts);
2356 ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
2359 if (x_ctb + ctb_size >= s->ps.sps->width &&
2360 y_ctb + ctb_size >= s->ps.sps->height)
2361 ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
2366 static int hls_slice_data(HEVCContext *s)
2374 s->avctx->execute(s->avctx, hls_decode_entry, arg, ret , 1, sizeof(int));
2377 static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
2379 HEVCContext *s1 = avctxt->priv_data, *s;
2380 HEVCLocalContext *lc;
2381 int ctb_size = 1<< s1->ps.sps->log2_ctb_size;
2383 int *ctb_row_p = input_ctb_row;
2384 int ctb_row = ctb_row_p[job];
2385 int ctb_addr_rs = s1->sh.slice_ctb_addr_rs + ctb_row * ((s1->ps.sps->width + ctb_size - 1) >> s1->ps.sps->log2_ctb_size);
2386 int ctb_addr_ts = s1->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
2387 int thread = ctb_row % s1->threads_number;
2390 s = s1->sList[self_id];
2394 ret = init_get_bits8(&lc->gb, s->data + s->sh.offset[ctb_row - 1], s->sh.size[ctb_row - 1]);
2398 ff_init_cabac_decoder(&lc->cc, s->data + s->sh.offset[(ctb_row)-1], s->sh.size[ctb_row - 1]);
2401 while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2402 int x_ctb = (ctb_addr_rs % s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2403 int y_ctb = (ctb_addr_rs / s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2405 hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
2407 ff_thread_await_progress2(s->avctx, ctb_row, thread, SHIFT_CTB_WPP);
2409 if (avpriv_atomic_int_get(&s1->wpp_err)){
2410 ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2414 ff_hevc_cabac_init(s, ctb_addr_ts);
2415 hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2416 more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2418 if (more_data < 0) {
2419 s->tab_slice_address[ctb_addr_rs] = -1;
2420 avpriv_atomic_int_set(&s1->wpp_err, 1);
2421 ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2427 ff_hevc_save_states(s, ctb_addr_ts);
2428 ff_thread_report_progress2(s->avctx, ctb_row, thread, 1);
2429 ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
2431 if (!more_data && (x_ctb+ctb_size) < s->ps.sps->width && ctb_row != s->sh.num_entry_point_offsets) {
2432 avpriv_atomic_int_set(&s1->wpp_err, 1);
2433 ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2437 if ((x_ctb+ctb_size) >= s->ps.sps->width && (y_ctb+ctb_size) >= s->ps.sps->height ) {
2438 ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
2439 ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2442 ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2445 if(x_ctb >= s->ps.sps->width) {
2449 ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2454 static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
2456 const uint8_t *data = nal->data;
2457 int length = nal->size;
2458 HEVCLocalContext *lc = s->HEVClc;
2459 int *ret = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
2460 int *arg = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
2462 int64_t startheader, cmpt = 0;
2468 return AVERROR(ENOMEM);
2471 if (s->sh.slice_ctb_addr_rs + s->sh.num_entry_point_offsets * s->ps.sps->ctb_width >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
2472 av_log(s->avctx, AV_LOG_ERROR, "WPP ctb addresses are wrong (%d %d %d %d)\n",
2473 s->sh.slice_ctb_addr_rs, s->sh.num_entry_point_offsets,
2474 s->ps.sps->ctb_width, s->ps.sps->ctb_height
2476 res = AVERROR_INVALIDDATA;
2480 ff_alloc_entries(s->avctx, s->sh.num_entry_point_offsets + 1);
2483 for (i = 1; i < s->threads_number; i++) {
2484 s->sList[i] = av_malloc(sizeof(HEVCContext));
2485 memcpy(s->sList[i], s, sizeof(HEVCContext));
2486 s->HEVClcList[i] = av_mallocz(sizeof(HEVCLocalContext));
2487 s->sList[i]->HEVClc = s->HEVClcList[i];
2491 offset = (lc->gb.index >> 3);
2493 for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[0]; j < nal->skipped_bytes; j++) {
2494 if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2500 for (i = 1; i < s->sh.num_entry_point_offsets; i++) {
2501 offset += (s->sh.entry_point_offset[i - 1] - cmpt);
2502 for (j = 0, cmpt = 0, startheader = offset
2503 + s->sh.entry_point_offset[i]; j < nal->skipped_bytes; j++) {
2504 if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2509 s->sh.size[i - 1] = s->sh.entry_point_offset[i] - cmpt;
2510 s->sh.offset[i - 1] = offset;
2513 if (s->sh.num_entry_point_offsets != 0) {
2514 offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt;
2515 if (length < offset) {
2516 av_log(s->avctx, AV_LOG_ERROR, "entry_point_offset table is corrupted\n");
2517 res = AVERROR_INVALIDDATA;
2520 s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset;
2521 s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset;
2526 for (i = 1; i < s->threads_number; i++) {
2527 s->sList[i]->HEVClc->first_qp_group = 1;
2528 s->sList[i]->HEVClc->qp_y = s->sList[0]->HEVClc->qp_y;
2529 memcpy(s->sList[i], s, sizeof(HEVCContext));
2530 s->sList[i]->HEVClc = s->HEVClcList[i];
2533 avpriv_atomic_int_set(&s->wpp_err, 0);
2534 ff_reset_entries(s->avctx);
2536 for (i = 0; i <= s->sh.num_entry_point_offsets; i++) {
2541 if (s->ps.pps->entropy_coding_sync_enabled_flag)
2542 s->avctx->execute2(s->avctx, hls_decode_entry_wpp, arg, ret, s->sh.num_entry_point_offsets + 1);
2544 for (i = 0; i <= s->sh.num_entry_point_offsets; i++)
2552 static int set_side_data(HEVCContext *s)
2554 AVFrame *out = s->ref->frame;
2556 if (s->sei_frame_packing_present &&
2557 s->frame_packing_arrangement_type >= 3 &&
2558 s->frame_packing_arrangement_type <= 5 &&
2559 s->content_interpretation_type > 0 &&
2560 s->content_interpretation_type < 3) {
2561 AVStereo3D *stereo = av_stereo3d_create_side_data(out);
2563 return AVERROR(ENOMEM);
2565 switch (s->frame_packing_arrangement_type) {
2567 if (s->quincunx_subsampling)
2568 stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
2570 stereo->type = AV_STEREO3D_SIDEBYSIDE;
2573 stereo->type = AV_STEREO3D_TOPBOTTOM;
2576 stereo->type = AV_STEREO3D_FRAMESEQUENCE;
2580 if (s->content_interpretation_type == 2)
2581 stereo->flags = AV_STEREO3D_FLAG_INVERT;
2584 if (s->sei_display_orientation_present &&
2585 (s->sei_anticlockwise_rotation || s->sei_hflip || s->sei_vflip)) {
2586 double angle = s->sei_anticlockwise_rotation * 360 / (double) (1 << 16);
2587 AVFrameSideData *rotation = av_frame_new_side_data(out,
2588 AV_FRAME_DATA_DISPLAYMATRIX,
2589 sizeof(int32_t) * 9);
2591 return AVERROR(ENOMEM);
2593 av_display_rotation_set((int32_t *)rotation->data, angle);
2594 av_display_matrix_flip((int32_t *)rotation->data,
2595 s->sei_hflip, s->sei_vflip);
2598 // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2599 // so the side data persists for the entire coded video sequence.
2600 if (s->sei_mastering_display_info_present > 0 &&
2601 IS_IRAP(s) && s->no_rasl_output_flag) {
2602 s->sei_mastering_display_info_present--;
2604 if (s->sei_mastering_display_info_present) {
2605 // HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b
2606 const int mapping[3] = {2, 0, 1};
2607 const int chroma_den = 50000;
2608 const int luma_den = 10000;
2610 AVMasteringDisplayMetadata *metadata =
2611 av_mastering_display_metadata_create_side_data(out);
2613 return AVERROR(ENOMEM);
2615 for (i = 0; i < 3; i++) {
2616 const int j = mapping[i];
2617 metadata->display_primaries[i][0].num = s->display_primaries[j][0];
2618 metadata->display_primaries[i][0].den = chroma_den;
2619 metadata->display_primaries[i][1].num = s->display_primaries[j][1];
2620 metadata->display_primaries[i][1].den = chroma_den;
2622 metadata->white_point[0].num = s->white_point[0];
2623 metadata->white_point[0].den = chroma_den;
2624 metadata->white_point[1].num = s->white_point[1];
2625 metadata->white_point[1].den = chroma_den;
2627 metadata->max_luminance.num = s->max_mastering_luminance;
2628 metadata->max_luminance.den = luma_den;
2629 metadata->min_luminance.num = s->min_mastering_luminance;
2630 metadata->min_luminance.den = luma_den;
2631 metadata->has_luminance = 1;
2632 metadata->has_primaries = 1;
2634 av_log(s->avctx, AV_LOG_DEBUG, "Mastering Display Metadata:\n");
2635 av_log(s->avctx, AV_LOG_DEBUG,
2636 "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
2637 av_q2d(metadata->display_primaries[0][0]),
2638 av_q2d(metadata->display_primaries[0][1]),
2639 av_q2d(metadata->display_primaries[1][0]),
2640 av_q2d(metadata->display_primaries[1][1]),
2641 av_q2d(metadata->display_primaries[2][0]),
2642 av_q2d(metadata->display_primaries[2][1]),
2643 av_q2d(metadata->white_point[0]), av_q2d(metadata->white_point[1]));
2644 av_log(s->avctx, AV_LOG_DEBUG,
2645 "min_luminance=%f, max_luminance=%f\n",
2646 av_q2d(metadata->min_luminance), av_q2d(metadata->max_luminance));
2649 if (s->a53_caption) {
2650 AVFrameSideData* sd = av_frame_new_side_data(out,
2651 AV_FRAME_DATA_A53_CC,
2652 s->a53_caption_size);
2654 memcpy(sd->data, s->a53_caption, s->a53_caption_size);
2655 av_freep(&s->a53_caption);
2656 s->a53_caption_size = 0;
2657 s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
2663 static int hevc_frame_start(HEVCContext *s)
2665 HEVCLocalContext *lc = s->HEVClc;
2666 int pic_size_in_ctb = ((s->ps.sps->width >> s->ps.sps->log2_min_cb_size) + 1) *
2667 ((s->ps.sps->height >> s->ps.sps->log2_min_cb_size) + 1);
2670 memset(s->horizontal_bs, 0, s->bs_width * s->bs_height);
2671 memset(s->vertical_bs, 0, s->bs_width * s->bs_height);
2672 memset(s->cbf_luma, 0, s->ps.sps->min_tb_width * s->ps.sps->min_tb_height);
2673 memset(s->is_pcm, 0, (s->ps.sps->min_pu_width + 1) * (s->ps.sps->min_pu_height + 1));
2674 memset(s->tab_slice_address, -1, pic_size_in_ctb * sizeof(*s->tab_slice_address));
2677 s->first_nal_type = s->nal_unit_type;
2679 if (s->ps.pps->tiles_enabled_flag)
2680 lc->end_of_tiles_x = s->ps.pps->column_width[0] << s->ps.sps->log2_ctb_size;
2682 ret = ff_hevc_set_new_ref(s, &s->frame, s->poc);
2686 ret = ff_hevc_frame_rps(s);
2688 av_log(s->avctx, AV_LOG_ERROR, "Error constructing the frame RPS.\n");
2692 s->ref->frame->key_frame = IS_IRAP(s);
2694 ret = set_side_data(s);
2698 s->frame->pict_type = 3 - s->sh.slice_type;
2701 ff_hevc_bump_frame(s);
2703 av_frame_unref(s->output_frame);
2704 ret = ff_hevc_output_frame(s, s->output_frame, 0);
2708 if (!s->avctx->hwaccel)
2709 ff_thread_finish_setup(s->avctx);
2715 ff_hevc_unref_frame(s, s->ref, ~0);
2720 static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
2722 HEVCLocalContext *lc = s->HEVClc;
2723 GetBitContext *gb = &lc->gb;
2724 int ctb_addr_ts, ret;
2727 s->nal_unit_type = nal->type;
2728 s->temporal_id = nal->temporal_id;
2730 switch (s->nal_unit_type) {
2732 ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps);
2737 ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps,
2738 s->apply_defdispwin);
2743 ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps);
2747 case NAL_SEI_PREFIX:
2748 case NAL_SEI_SUFFIX:
2749 ret = ff_hevc_decode_nal_sei(s);
2760 case NAL_BLA_W_RADL:
2762 case NAL_IDR_W_RADL:
2769 ret = hls_slice_header(s);
2773 if (s->max_ra == INT_MAX) {
2774 if (s->nal_unit_type == NAL_CRA_NUT || IS_BLA(s)) {
2778 s->max_ra = INT_MIN;
2782 if ((s->nal_unit_type == NAL_RASL_R || s->nal_unit_type == NAL_RASL_N) &&
2783 s->poc <= s->max_ra) {
2787 if (s->nal_unit_type == NAL_RASL_R && s->poc > s->max_ra)
2788 s->max_ra = INT_MIN;
2791 if (s->sh.first_slice_in_pic_flag) {
2792 ret = hevc_frame_start(s);
2795 } else if (!s->ref) {
2796 av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
2800 if (s->nal_unit_type != s->first_nal_type) {
2801 av_log(s->avctx, AV_LOG_ERROR,
2802 "Non-matching NAL types of the VCL NALUs: %d %d\n",
2803 s->first_nal_type, s->nal_unit_type);
2804 return AVERROR_INVALIDDATA;
2807 if (!s->sh.dependent_slice_segment_flag &&
2808 s->sh.slice_type != I_SLICE) {
2809 ret = ff_hevc_slice_rpl(s);
2811 av_log(s->avctx, AV_LOG_WARNING,
2812 "Error constructing the reference lists for the current slice.\n");
2817 if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) {
2818 ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0);
2823 if (s->avctx->hwaccel) {
2824 ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size);
2828 if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0)
2829 ctb_addr_ts = hls_slice_data_wpp(s, nal);
2831 ctb_addr_ts = hls_slice_data(s);
2832 if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) {
2836 if (ctb_addr_ts < 0) {
2844 s->seq_decode = (s->seq_decode + 1) & 0xff;
2845 s->max_ra = INT_MAX;
2851 av_log(s->avctx, AV_LOG_INFO,
2852 "Skipping NAL unit %d\n", s->nal_unit_type);
2857 if (s->avctx->err_recognition & AV_EF_EXPLODE)
2862 static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
2867 s->last_eos = s->eos;
2870 /* split the input packet into NAL units, so we know the upper bound on the
2871 * number of slices in the frame */
2872 ret = ff_h2645_packet_split(&s->pkt, buf, length, s->avctx, s->is_nalff,
2873 s->nal_length_size, s->avctx->codec_id, 1);
2875 av_log(s->avctx, AV_LOG_ERROR,
2876 "Error splitting the input into NAL units.\n");
2880 for (i = 0; i < s->pkt.nb_nals; i++) {
2881 if (s->pkt.nals[i].type == NAL_EOB_NUT ||
2882 s->pkt.nals[i].type == NAL_EOS_NUT)
2886 /* decode the NAL units */
2887 for (i = 0; i < s->pkt.nb_nals; i++) {
2888 ret = decode_nal_unit(s, &s->pkt.nals[i]);
2890 av_log(s->avctx, AV_LOG_WARNING,
2891 "Error parsing NAL unit #%d.\n", i);
2897 if (s->ref && s->threads_type == FF_THREAD_FRAME)
2898 ff_thread_report_progress(&s->ref->tf, INT_MAX, 0);
2903 static void print_md5(void *log_ctx, int level, uint8_t md5[16])
2906 for (i = 0; i < 16; i++)
2907 av_log(log_ctx, level, "%02"PRIx8, md5[i]);
2910 static int verify_md5(HEVCContext *s, AVFrame *frame)
2912 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
2917 return AVERROR(EINVAL);
2919 pixel_shift = desc->comp[0].depth > 8;
2921 av_log(s->avctx, AV_LOG_DEBUG, "Verifying checksum for frame with POC %d: ",
2924 /* the checksums are LE, so we have to byteswap for >8bpp formats
2927 if (pixel_shift && !s->checksum_buf) {
2928 av_fast_malloc(&s->checksum_buf, &s->checksum_buf_size,
2929 FFMAX3(frame->linesize[0], frame->linesize[1],
2930 frame->linesize[2]));
2931 if (!s->checksum_buf)
2932 return AVERROR(ENOMEM);
2936 for (i = 0; frame->data[i]; i++) {
2937 int width = s->avctx->coded_width;
2938 int height = s->avctx->coded_height;
2939 int w = (i == 1 || i == 2) ? (width >> desc->log2_chroma_w) : width;
2940 int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height;
2943 av_md5_init(s->md5_ctx);
2944 for (j = 0; j < h; j++) {
2945 const uint8_t *src = frame->data[i] + j * frame->linesize[i];
2948 s->bdsp.bswap16_buf((uint16_t *) s->checksum_buf,
2949 (const uint16_t *) src, w);
2950 src = s->checksum_buf;
2953 av_md5_update(s->md5_ctx, src, w << pixel_shift);
2955 av_md5_final(s->md5_ctx, md5);
2957 if (!memcmp(md5, s->md5[i], 16)) {
2958 av_log (s->avctx, AV_LOG_DEBUG, "plane %d - correct ", i);
2959 print_md5(s->avctx, AV_LOG_DEBUG, md5);
2960 av_log (s->avctx, AV_LOG_DEBUG, "; ");
2962 av_log (s->avctx, AV_LOG_ERROR, "mismatching checksum of plane %d - ", i);
2963 print_md5(s->avctx, AV_LOG_ERROR, md5);
2964 av_log (s->avctx, AV_LOG_ERROR, " != ");
2965 print_md5(s->avctx, AV_LOG_ERROR, s->md5[i]);
2966 av_log (s->avctx, AV_LOG_ERROR, "\n");
2967 return AVERROR_INVALIDDATA;
2971 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2976 static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length)
2978 AVCodecContext *avctx = s->avctx;
2982 bytestream2_init(&gb, buf, length);
2984 if (length > 3 && (buf[0] || buf[1] || buf[2] > 1)) {
2985 /* It seems the extradata is encoded as hvcC format.
2986 * Temporarily, we support configurationVersion==0 until 14496-15 3rd
2987 * is finalized. When finalized, configurationVersion will be 1 and we
2988 * can recognize hvcC by checking if avctx->extradata[0]==1 or not. */
2989 int i, j, num_arrays, nal_len_size;
2993 bytestream2_skip(&gb, 21);
2994 nal_len_size = (bytestream2_get_byte(&gb) & 3) + 1;
2995 num_arrays = bytestream2_get_byte(&gb);
2997 /* nal units in the hvcC always have length coded with 2 bytes,
2998 * so put a fake nal_length_size = 2 while parsing them */
2999 s->nal_length_size = 2;
3001 /* Decode nal units from hvcC. */
3002 for (i = 0; i < num_arrays; i++) {
3003 int type = bytestream2_get_byte(&gb) & 0x3f;
3004 int cnt = bytestream2_get_be16(&gb);
3006 for (j = 0; j < cnt; j++) {
3007 // +2 for the nal size field
3008 int nalsize = bytestream2_peek_be16(&gb) + 2;
3009 if (bytestream2_get_bytes_left(&gb) < nalsize) {
3010 av_log(s->avctx, AV_LOG_ERROR,
3011 "Invalid NAL unit size in extradata.\n");
3012 return AVERROR_INVALIDDATA;
3015 ret = decode_nal_units(s, gb.buffer, nalsize);
3017 av_log(avctx, AV_LOG_ERROR,
3018 "Decoding nal unit %d %d from hvcC failed\n",
3022 bytestream2_skip(&gb, nalsize);
3026 /* Now store right nal length size, that will be used to parse
3028 s->nal_length_size = nal_len_size;
3031 ret = decode_nal_units(s, buf, length);
3036 /* export stream parameters from the first SPS */
3037 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3038 if (s->ps.sps_list[i]) {
3039 const HEVCSPS *sps = (const HEVCSPS*)s->ps.sps_list[i]->data;
3040 export_stream_params(s->avctx, &s->ps, sps);
3048 static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output,
3052 int new_extradata_size;
3053 uint8_t *new_extradata;
3054 HEVCContext *s = avctx->priv_data;
3057 ret = ff_hevc_output_frame(s, data, 1);
3065 new_extradata = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA,
3066 &new_extradata_size);
3067 if (new_extradata && new_extradata_size > 0) {
3068 ret = hevc_decode_extradata(s, new_extradata, new_extradata_size);
3074 ret = decode_nal_units(s, avpkt->data, avpkt->size);
3078 if (avctx->hwaccel) {
3079 if (s->ref && (ret = avctx->hwaccel->end_frame(avctx)) < 0) {
3080 av_log(avctx, AV_LOG_ERROR,
3081 "hardware accelerator failed to decode picture\n");
3082 ff_hevc_unref_frame(s, s->ref, ~0);
3086 /* verify the SEI checksum */
3087 if (avctx->err_recognition & AV_EF_CRCCHECK && s->is_decoded &&
3089 ret = verify_md5(s, s->ref->frame);
3090 if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) {
3091 ff_hevc_unref_frame(s, s->ref, ~0);
3098 if (s->is_decoded) {
3099 av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc);
3103 if (s->output_frame->buf[0]) {
3104 av_frame_move_ref(data, s->output_frame);
3111 static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
3115 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
3119 dst->tab_mvf_buf = av_buffer_ref(src->tab_mvf_buf);
3120 if (!dst->tab_mvf_buf)
3122 dst->tab_mvf = src->tab_mvf;
3124 dst->rpl_tab_buf = av_buffer_ref(src->rpl_tab_buf);
3125 if (!dst->rpl_tab_buf)
3127 dst->rpl_tab = src->rpl_tab;
3129 dst->rpl_buf = av_buffer_ref(src->rpl_buf);
3133 dst->poc = src->poc;
3134 dst->ctb_count = src->ctb_count;
3135 dst->window = src->window;
3136 dst->flags = src->flags;
3137 dst->sequence = src->sequence;
3139 if (src->hwaccel_picture_private) {
3140 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
3141 if (!dst->hwaccel_priv_buf)
3143 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
3148 ff_hevc_unref_frame(s, dst, ~0);
3149 return AVERROR(ENOMEM);
3152 static av_cold int hevc_decode_free(AVCodecContext *avctx)
3154 HEVCContext *s = avctx->priv_data;
3159 av_freep(&s->md5_ctx);
3161 av_freep(&s->cabac_state);
3163 for (i = 0; i < 3; i++) {
3164 av_freep(&s->sao_pixel_buffer_h[i]);
3165 av_freep(&s->sao_pixel_buffer_v[i]);
3167 av_frame_free(&s->output_frame);
3169 for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3170 ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3171 av_frame_free(&s->DPB[i].frame);
3174 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++)
3175 av_buffer_unref(&s->ps.vps_list[i]);
3176 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++)
3177 av_buffer_unref(&s->ps.sps_list[i]);
3178 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++)
3179 av_buffer_unref(&s->ps.pps_list[i]);
3184 av_freep(&s->sh.entry_point_offset);
3185 av_freep(&s->sh.offset);
3186 av_freep(&s->sh.size);
3188 for (i = 1; i < s->threads_number; i++) {
3189 HEVCLocalContext *lc = s->HEVClcList[i];
3191 av_freep(&s->HEVClcList[i]);
3192 av_freep(&s->sList[i]);
3195 if (s->HEVClc == s->HEVClcList[0])
3197 av_freep(&s->HEVClcList[0]);
3199 ff_h2645_packet_uninit(&s->pkt);
3204 static av_cold int hevc_init_context(AVCodecContext *avctx)
3206 HEVCContext *s = avctx->priv_data;
3211 s->HEVClc = av_mallocz(sizeof(HEVCLocalContext));
3214 s->HEVClcList[0] = s->HEVClc;
3217 s->cabac_state = av_malloc(HEVC_CONTEXTS);
3218 if (!s->cabac_state)
3221 s->output_frame = av_frame_alloc();
3222 if (!s->output_frame)
3225 for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3226 s->DPB[i].frame = av_frame_alloc();
3227 if (!s->DPB[i].frame)
3229 s->DPB[i].tf.f = s->DPB[i].frame;
3232 s->max_ra = INT_MAX;
3234 s->md5_ctx = av_md5_alloc();
3238 ff_bswapdsp_init(&s->bdsp);
3240 s->context_initialized = 1;
3243 ff_hevc_reset_sei(s);
3248 hevc_decode_free(avctx);
3249 return AVERROR(ENOMEM);
3252 static int hevc_update_thread_context(AVCodecContext *dst,
3253 const AVCodecContext *src)
3255 HEVCContext *s = dst->priv_data;
3256 HEVCContext *s0 = src->priv_data;
3259 if (!s->context_initialized) {
3260 ret = hevc_init_context(dst);
3265 for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3266 ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3267 if (s0->DPB[i].frame->buf[0]) {
3268 ret = hevc_ref_frame(s, &s->DPB[i], &s0->DPB[i]);
3274 if (s->ps.sps != s0->ps.sps)
3276 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++) {
3277 av_buffer_unref(&s->ps.vps_list[i]);
3278 if (s0->ps.vps_list[i]) {
3279 s->ps.vps_list[i] = av_buffer_ref(s0->ps.vps_list[i]);
3280 if (!s->ps.vps_list[i])
3281 return AVERROR(ENOMEM);
3285 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3286 av_buffer_unref(&s->ps.sps_list[i]);
3287 if (s0->ps.sps_list[i]) {
3288 s->ps.sps_list[i] = av_buffer_ref(s0->ps.sps_list[i]);
3289 if (!s->ps.sps_list[i])
3290 return AVERROR(ENOMEM);
3294 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++) {
3295 av_buffer_unref(&s->ps.pps_list[i]);
3296 if (s0->ps.pps_list[i]) {
3297 s->ps.pps_list[i] = av_buffer_ref(s0->ps.pps_list[i]);
3298 if (!s->ps.pps_list[i])
3299 return AVERROR(ENOMEM);
3303 if (s->ps.sps != s0->ps.sps)
3304 if ((ret = set_sps(s, s0->ps.sps, src->pix_fmt)) < 0)
3307 s->seq_decode = s0->seq_decode;
3308 s->seq_output = s0->seq_output;
3309 s->pocTid0 = s0->pocTid0;
3310 s->max_ra = s0->max_ra;
3312 s->no_rasl_output_flag = s0->no_rasl_output_flag;
3314 s->is_nalff = s0->is_nalff;
3315 s->nal_length_size = s0->nal_length_size;
3317 s->threads_number = s0->threads_number;
3318 s->threads_type = s0->threads_type;
3321 s->seq_decode = (s->seq_decode + 1) & 0xff;
3322 s->max_ra = INT_MAX;
3328 static av_cold int hevc_decode_init(AVCodecContext *avctx)
3330 HEVCContext *s = avctx->priv_data;
3333 avctx->internal->allocate_progress = 1;
3335 ret = hevc_init_context(avctx);
3339 s->enable_parallel_tiles = 0;
3340 s->picture_struct = 0;
3343 if(avctx->active_thread_type & FF_THREAD_SLICE)
3344 s->threads_number = avctx->thread_count;
3346 s->threads_number = 1;
3348 if (avctx->extradata_size > 0 && avctx->extradata) {
3349 ret = hevc_decode_extradata(s, avctx->extradata, avctx->extradata_size);
3351 hevc_decode_free(avctx);
3356 if((avctx->active_thread_type & FF_THREAD_FRAME) && avctx->thread_count > 1)
3357 s->threads_type = FF_THREAD_FRAME;
3359 s->threads_type = FF_THREAD_SLICE;
3364 static av_cold int hevc_init_thread_copy(AVCodecContext *avctx)
3366 HEVCContext *s = avctx->priv_data;
3369 memset(s, 0, sizeof(*s));
3371 ret = hevc_init_context(avctx);
3378 static void hevc_decode_flush(AVCodecContext *avctx)
3380 HEVCContext *s = avctx->priv_data;
3381 ff_hevc_flush_dpb(s);
3382 s->max_ra = INT_MAX;
3386 #define OFFSET(x) offsetof(HEVCContext, x)
3387 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
3389 static const AVOption options[] = {
3390 { "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin),
3391 AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3392 { "strict-displaywin", "stricly apply default display window size", OFFSET(apply_defdispwin),
3393 AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3397 static const AVClass hevc_decoder_class = {
3398 .class_name = "HEVC decoder",
3399 .item_name = av_default_item_name,
3401 .version = LIBAVUTIL_VERSION_INT,
3404 AVCodec ff_hevc_decoder = {
3406 .long_name = NULL_IF_CONFIG_SMALL("HEVC (High Efficiency Video Coding)"),
3407 .type = AVMEDIA_TYPE_VIDEO,
3408 .id = AV_CODEC_ID_HEVC,
3409 .priv_data_size = sizeof(HEVCContext),
3410 .priv_class = &hevc_decoder_class,
3411 .init = hevc_decode_init,
3412 .close = hevc_decode_free,
3413 .decode = hevc_decode_frame,
3414 .flush = hevc_decode_flush,
3415 .update_thread_context = hevc_update_thread_context,
3416 .init_thread_copy = hevc_init_thread_copy,
3417 .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
3418 AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS,
3419 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
3420 .profiles = NULL_IF_CONFIG_SMALL(ff_hevc_profiles),