4 * Copyright (C) 2012 - 2013 Guillaume Martres
5 * Copyright (C) 2012 - 2013 Mickael Raulet
6 * Copyright (C) 2012 - 2013 Gildas Cocherel
7 * Copyright (C) 2012 - 2013 Wassim Hamidouche
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "libavutil/atomic.h"
27 #include "libavutil/attributes.h"
28 #include "libavutil/common.h"
29 #include "libavutil/display.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/mastering_display_metadata.h"
32 #include "libavutil/md5.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/stereo3d.h"
38 #include "bytestream.h"
39 #include "cabac_functions.h"
44 const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
47 * NOTE: Each function hls_foo correspond to the function foo in the
48 * specification (HLS stands for High Level Syntax).
55 /* free everything allocated by pic_arrays_init() */
56 static void pic_arrays_free(HEVCContext *s)
59 av_freep(&s->deblock);
61 av_freep(&s->skip_flag);
62 av_freep(&s->tab_ct_depth);
64 av_freep(&s->tab_ipm);
65 av_freep(&s->cbf_luma);
68 av_freep(&s->qp_y_tab);
69 av_freep(&s->tab_slice_address);
70 av_freep(&s->filter_slice_edges);
72 av_freep(&s->horizontal_bs);
73 av_freep(&s->vertical_bs);
75 av_freep(&s->sh.entry_point_offset);
76 av_freep(&s->sh.size);
77 av_freep(&s->sh.offset);
79 av_buffer_pool_uninit(&s->tab_mvf_pool);
80 av_buffer_pool_uninit(&s->rpl_tab_pool);
83 /* allocate arrays that depend on frame dimensions */
84 static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
86 int log2_min_cb_size = sps->log2_min_cb_size;
87 int width = sps->width;
88 int height = sps->height;
89 int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) *
90 ((height >> log2_min_cb_size) + 1);
91 int ctb_count = sps->ctb_width * sps->ctb_height;
92 int min_pu_size = sps->min_pu_width * sps->min_pu_height;
94 s->bs_width = (width >> 2) + 1;
95 s->bs_height = (height >> 2) + 1;
97 s->sao = av_mallocz_array(ctb_count, sizeof(*s->sao));
98 s->deblock = av_mallocz_array(ctb_count, sizeof(*s->deblock));
99 if (!s->sao || !s->deblock)
102 s->skip_flag = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
103 s->tab_ct_depth = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
104 if (!s->skip_flag || !s->tab_ct_depth)
107 s->cbf_luma = av_malloc_array(sps->min_tb_width, sps->min_tb_height);
108 s->tab_ipm = av_mallocz(min_pu_size);
109 s->is_pcm = av_malloc_array(sps->min_pu_width + 1, sps->min_pu_height + 1);
110 if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
113 s->filter_slice_edges = av_mallocz(ctb_count);
114 s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
115 sizeof(*s->tab_slice_address));
116 s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
117 sizeof(*s->qp_y_tab));
118 if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address)
121 s->horizontal_bs = av_mallocz_array(s->bs_width, s->bs_height);
122 s->vertical_bs = av_mallocz_array(s->bs_width, s->bs_height);
123 if (!s->horizontal_bs || !s->vertical_bs)
126 s->tab_mvf_pool = av_buffer_pool_init(min_pu_size * sizeof(MvField),
128 s->rpl_tab_pool = av_buffer_pool_init(ctb_count * sizeof(RefPicListTab),
130 if (!s->tab_mvf_pool || !s->rpl_tab_pool)
137 return AVERROR(ENOMEM);
140 static void pred_weight_table(HEVCContext *s, GetBitContext *gb)
144 uint8_t luma_weight_l0_flag[16];
145 uint8_t chroma_weight_l0_flag[16];
146 uint8_t luma_weight_l1_flag[16];
147 uint8_t chroma_weight_l1_flag[16];
148 int luma_log2_weight_denom;
150 luma_log2_weight_denom = get_ue_golomb_long(gb);
151 if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7)
152 av_log(s->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is invalid\n", luma_log2_weight_denom);
153 s->sh.luma_log2_weight_denom = av_clip_uintp2(luma_log2_weight_denom, 3);
154 if (s->ps.sps->chroma_format_idc != 0) {
155 int delta = get_se_golomb(gb);
156 s->sh.chroma_log2_weight_denom = av_clip_uintp2(s->sh.luma_log2_weight_denom + delta, 3);
159 for (i = 0; i < s->sh.nb_refs[L0]; i++) {
160 luma_weight_l0_flag[i] = get_bits1(gb);
161 if (!luma_weight_l0_flag[i]) {
162 s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom;
163 s->sh.luma_offset_l0[i] = 0;
166 if (s->ps.sps->chroma_format_idc != 0) {
167 for (i = 0; i < s->sh.nb_refs[L0]; i++)
168 chroma_weight_l0_flag[i] = get_bits1(gb);
170 for (i = 0; i < s->sh.nb_refs[L0]; i++)
171 chroma_weight_l0_flag[i] = 0;
173 for (i = 0; i < s->sh.nb_refs[L0]; i++) {
174 if (luma_weight_l0_flag[i]) {
175 int delta_luma_weight_l0 = get_se_golomb(gb);
176 s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0;
177 s->sh.luma_offset_l0[i] = get_se_golomb(gb);
179 if (chroma_weight_l0_flag[i]) {
180 for (j = 0; j < 2; j++) {
181 int delta_chroma_weight_l0 = get_se_golomb(gb);
182 int delta_chroma_offset_l0 = get_se_golomb(gb);
183 s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0;
184 s->sh.chroma_offset_l0[i][j] = av_clip((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j])
185 >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
188 s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom;
189 s->sh.chroma_offset_l0[i][0] = 0;
190 s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom;
191 s->sh.chroma_offset_l0[i][1] = 0;
194 if (s->sh.slice_type == B_SLICE) {
195 for (i = 0; i < s->sh.nb_refs[L1]; i++) {
196 luma_weight_l1_flag[i] = get_bits1(gb);
197 if (!luma_weight_l1_flag[i]) {
198 s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom;
199 s->sh.luma_offset_l1[i] = 0;
202 if (s->ps.sps->chroma_format_idc != 0) {
203 for (i = 0; i < s->sh.nb_refs[L1]; i++)
204 chroma_weight_l1_flag[i] = get_bits1(gb);
206 for (i = 0; i < s->sh.nb_refs[L1]; i++)
207 chroma_weight_l1_flag[i] = 0;
209 for (i = 0; i < s->sh.nb_refs[L1]; i++) {
210 if (luma_weight_l1_flag[i]) {
211 int delta_luma_weight_l1 = get_se_golomb(gb);
212 s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1;
213 s->sh.luma_offset_l1[i] = get_se_golomb(gb);
215 if (chroma_weight_l1_flag[i]) {
216 for (j = 0; j < 2; j++) {
217 int delta_chroma_weight_l1 = get_se_golomb(gb);
218 int delta_chroma_offset_l1 = get_se_golomb(gb);
219 s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1;
220 s->sh.chroma_offset_l1[i][j] = av_clip((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j])
221 >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
224 s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom;
225 s->sh.chroma_offset_l1[i][0] = 0;
226 s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom;
227 s->sh.chroma_offset_l1[i][1] = 0;
233 static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
235 const HEVCSPS *sps = s->ps.sps;
236 int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
237 int prev_delta_msb = 0;
238 unsigned int nb_sps = 0, nb_sh;
242 if (!sps->long_term_ref_pics_present_flag)
245 if (sps->num_long_term_ref_pics_sps > 0)
246 nb_sps = get_ue_golomb_long(gb);
247 nb_sh = get_ue_golomb_long(gb);
249 if (nb_sh + (uint64_t)nb_sps > FF_ARRAY_ELEMS(rps->poc))
250 return AVERROR_INVALIDDATA;
252 rps->nb_refs = nb_sh + nb_sps;
254 for (i = 0; i < rps->nb_refs; i++) {
255 uint8_t delta_poc_msb_present;
258 uint8_t lt_idx_sps = 0;
260 if (sps->num_long_term_ref_pics_sps > 1)
261 lt_idx_sps = get_bits(gb, av_ceil_log2(sps->num_long_term_ref_pics_sps));
263 rps->poc[i] = sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps];
264 rps->used[i] = sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps];
266 rps->poc[i] = get_bits(gb, sps->log2_max_poc_lsb);
267 rps->used[i] = get_bits1(gb);
270 delta_poc_msb_present = get_bits1(gb);
271 if (delta_poc_msb_present) {
272 int delta = get_ue_golomb_long(gb);
274 if (i && i != nb_sps)
275 delta += prev_delta_msb;
277 rps->poc[i] += s->poc - delta * max_poc_lsb - s->sh.pic_order_cnt_lsb;
278 prev_delta_msb = delta;
285 static void export_stream_params(AVCodecContext *avctx, const HEVCParamSets *ps,
288 const HEVCVPS *vps = (const HEVCVPS*)ps->vps_list[sps->vps_id]->data;
289 unsigned int num = 0, den = 0;
291 avctx->pix_fmt = sps->pix_fmt;
292 avctx->coded_width = sps->width;
293 avctx->coded_height = sps->height;
294 avctx->width = sps->output_width;
295 avctx->height = sps->output_height;
296 avctx->has_b_frames = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics;
297 avctx->profile = sps->ptl.general_ptl.profile_idc;
298 avctx->level = sps->ptl.general_ptl.level_idc;
300 ff_set_sar(avctx, sps->vui.sar);
302 if (sps->vui.video_signal_type_present_flag)
303 avctx->color_range = sps->vui.video_full_range_flag ? AVCOL_RANGE_JPEG
306 avctx->color_range = AVCOL_RANGE_MPEG;
308 if (sps->vui.colour_description_present_flag) {
309 avctx->color_primaries = sps->vui.colour_primaries;
310 avctx->color_trc = sps->vui.transfer_characteristic;
311 avctx->colorspace = sps->vui.matrix_coeffs;
313 avctx->color_primaries = AVCOL_PRI_UNSPECIFIED;
314 avctx->color_trc = AVCOL_TRC_UNSPECIFIED;
315 avctx->colorspace = AVCOL_SPC_UNSPECIFIED;
318 if (vps->vps_timing_info_present_flag) {
319 num = vps->vps_num_units_in_tick;
320 den = vps->vps_time_scale;
321 } else if (sps->vui.vui_timing_info_present_flag) {
322 num = sps->vui.vui_num_units_in_tick;
323 den = sps->vui.vui_time_scale;
326 if (num != 0 && den != 0)
327 av_reduce(&avctx->framerate.den, &avctx->framerate.num,
331 static int set_sps(HEVCContext *s, const HEVCSPS *sps, enum AVPixelFormat pix_fmt)
333 #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + CONFIG_HEVC_D3D11VA_HWACCEL + CONFIG_HEVC_VAAPI_HWACCEL + CONFIG_HEVC_VDPAU_HWACCEL)
334 enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
344 ret = pic_arrays_init(s, sps);
348 export_stream_params(s->avctx, &s->ps, sps);
350 switch (sps->pix_fmt) {
351 case AV_PIX_FMT_YUV420P:
352 case AV_PIX_FMT_YUVJ420P:
353 #if CONFIG_HEVC_DXVA2_HWACCEL
354 *fmt++ = AV_PIX_FMT_DXVA2_VLD;
356 #if CONFIG_HEVC_D3D11VA_HWACCEL
357 *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
359 #if CONFIG_HEVC_VAAPI_HWACCEL
360 *fmt++ = AV_PIX_FMT_VAAPI;
362 #if CONFIG_HEVC_VDPAU_HWACCEL
363 *fmt++ = AV_PIX_FMT_VDPAU;
366 case AV_PIX_FMT_YUV420P10:
367 #if CONFIG_HEVC_DXVA2_HWACCEL
368 *fmt++ = AV_PIX_FMT_DXVA2_VLD;
370 #if CONFIG_HEVC_D3D11VA_HWACCEL
371 *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
376 if (pix_fmt == AV_PIX_FMT_NONE) {
377 *fmt++ = sps->pix_fmt;
378 *fmt = AV_PIX_FMT_NONE;
380 ret = ff_thread_get_format(s->avctx, pix_fmts);
383 s->avctx->pix_fmt = ret;
386 s->avctx->pix_fmt = pix_fmt;
389 ff_hevc_pred_init(&s->hpc, sps->bit_depth);
390 ff_hevc_dsp_init (&s->hevcdsp, sps->bit_depth);
391 ff_videodsp_init (&s->vdsp, sps->bit_depth);
393 for (i = 0; i < 3; i++) {
394 av_freep(&s->sao_pixel_buffer_h[i]);
395 av_freep(&s->sao_pixel_buffer_v[i]);
398 if (sps->sao_enabled && !s->avctx->hwaccel) {
399 int c_count = (sps->chroma_format_idc != 0) ? 3 : 1;
402 for(c_idx = 0; c_idx < c_count; c_idx++) {
403 int w = sps->width >> sps->hshift[c_idx];
404 int h = sps->height >> sps->vshift[c_idx];
405 s->sao_pixel_buffer_h[c_idx] =
406 av_malloc((w * 2 * sps->ctb_height) <<
408 s->sao_pixel_buffer_v[c_idx] =
409 av_malloc((h * 2 * sps->ctb_width) <<
415 s->ps.vps = (HEVCVPS*) s->ps.vps_list[s->ps.sps->vps_id]->data;
425 static int hls_slice_header(HEVCContext *s)
427 GetBitContext *gb = &s->HEVClc->gb;
428 SliceHeader *sh = &s->sh;
432 sh->first_slice_in_pic_flag = get_bits1(gb);
433 if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) {
434 s->seq_decode = (s->seq_decode + 1) & 0xff;
437 ff_hevc_clear_refs(s);
439 sh->no_output_of_prior_pics_flag = 0;
441 sh->no_output_of_prior_pics_flag = get_bits1(gb);
443 sh->pps_id = get_ue_golomb_long(gb);
444 if (sh->pps_id >= MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) {
445 av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id);
446 return AVERROR_INVALIDDATA;
448 if (!sh->first_slice_in_pic_flag &&
449 s->ps.pps != (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data) {
450 av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n");
451 return AVERROR_INVALIDDATA;
453 s->ps.pps = (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data;
454 if (s->nal_unit_type == NAL_CRA_NUT && s->last_eos == 1)
455 sh->no_output_of_prior_pics_flag = 1;
457 if (s->ps.sps != (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data) {
458 const HEVCSPS* last_sps = s->ps.sps;
459 s->ps.sps = (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data;
460 if (last_sps && IS_IRAP(s) && s->nal_unit_type != NAL_CRA_NUT) {
461 if (s->ps.sps->width != last_sps->width || s->ps.sps->height != last_sps->height ||
462 s->ps.sps->temporal_layer[s->ps.sps->max_sub_layers - 1].max_dec_pic_buffering !=
463 last_sps->temporal_layer[last_sps->max_sub_layers - 1].max_dec_pic_buffering)
464 sh->no_output_of_prior_pics_flag = 0;
466 ff_hevc_clear_refs(s);
467 ret = set_sps(s, s->ps.sps, AV_PIX_FMT_NONE);
471 s->seq_decode = (s->seq_decode + 1) & 0xff;
475 sh->dependent_slice_segment_flag = 0;
476 if (!sh->first_slice_in_pic_flag) {
477 int slice_address_length;
479 if (s->ps.pps->dependent_slice_segments_enabled_flag)
480 sh->dependent_slice_segment_flag = get_bits1(gb);
482 slice_address_length = av_ceil_log2(s->ps.sps->ctb_width *
483 s->ps.sps->ctb_height);
484 sh->slice_segment_addr = get_bitsz(gb, slice_address_length);
485 if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
486 av_log(s->avctx, AV_LOG_ERROR,
487 "Invalid slice segment address: %u.\n",
488 sh->slice_segment_addr);
489 return AVERROR_INVALIDDATA;
492 if (!sh->dependent_slice_segment_flag) {
493 sh->slice_addr = sh->slice_segment_addr;
497 sh->slice_segment_addr = sh->slice_addr = 0;
499 s->slice_initialized = 0;
502 if (!sh->dependent_slice_segment_flag) {
503 s->slice_initialized = 0;
505 for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++)
506 skip_bits(gb, 1); // slice_reserved_undetermined_flag[]
508 sh->slice_type = get_ue_golomb_long(gb);
509 if (!(sh->slice_type == I_SLICE ||
510 sh->slice_type == P_SLICE ||
511 sh->slice_type == B_SLICE)) {
512 av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n",
514 return AVERROR_INVALIDDATA;
516 if (IS_IRAP(s) && sh->slice_type != I_SLICE) {
517 av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n");
518 return AVERROR_INVALIDDATA;
521 // when flag is not present, picture is inferred to be output
522 sh->pic_output_flag = 1;
523 if (s->ps.pps->output_flag_present_flag)
524 sh->pic_output_flag = get_bits1(gb);
526 if (s->ps.sps->separate_colour_plane_flag)
527 sh->colour_plane_id = get_bits(gb, 2);
532 sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb);
533 poc = ff_hevc_compute_poc(s, sh->pic_order_cnt_lsb);
534 if (!sh->first_slice_in_pic_flag && poc != s->poc) {
535 av_log(s->avctx, AV_LOG_WARNING,
536 "Ignoring POC change between slices: %d -> %d\n", s->poc, poc);
537 if (s->avctx->err_recognition & AV_EF_EXPLODE)
538 return AVERROR_INVALIDDATA;
543 sh->short_term_ref_pic_set_sps_flag = get_bits1(gb);
544 pos = get_bits_left(gb);
545 if (!sh->short_term_ref_pic_set_sps_flag) {
546 ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1);
550 sh->short_term_rps = &sh->slice_rps;
552 int numbits, rps_idx;
554 if (!s->ps.sps->nb_st_rps) {
555 av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n");
556 return AVERROR_INVALIDDATA;
559 numbits = av_ceil_log2(s->ps.sps->nb_st_rps);
560 rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0;
561 sh->short_term_rps = &s->ps.sps->st_rps[rps_idx];
563 sh->short_term_ref_pic_set_size = pos - get_bits_left(gb);
565 pos = get_bits_left(gb);
566 ret = decode_lt_rps(s, &sh->long_term_rps, gb);
568 av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n");
569 if (s->avctx->err_recognition & AV_EF_EXPLODE)
570 return AVERROR_INVALIDDATA;
572 sh->long_term_ref_pic_set_size = pos - get_bits_left(gb);
574 if (s->ps.sps->sps_temporal_mvp_enabled_flag)
575 sh->slice_temporal_mvp_enabled_flag = get_bits1(gb);
577 sh->slice_temporal_mvp_enabled_flag = 0;
579 s->sh.short_term_rps = NULL;
584 if (s->temporal_id == 0 &&
585 s->nal_unit_type != NAL_TRAIL_N &&
586 s->nal_unit_type != NAL_TSA_N &&
587 s->nal_unit_type != NAL_STSA_N &&
588 s->nal_unit_type != NAL_RADL_N &&
589 s->nal_unit_type != NAL_RADL_R &&
590 s->nal_unit_type != NAL_RASL_N &&
591 s->nal_unit_type != NAL_RASL_R)
594 if (s->ps.sps->sao_enabled) {
595 sh->slice_sample_adaptive_offset_flag[0] = get_bits1(gb);
596 if (s->ps.sps->chroma_format_idc) {
597 sh->slice_sample_adaptive_offset_flag[1] =
598 sh->slice_sample_adaptive_offset_flag[2] = get_bits1(gb);
601 sh->slice_sample_adaptive_offset_flag[0] = 0;
602 sh->slice_sample_adaptive_offset_flag[1] = 0;
603 sh->slice_sample_adaptive_offset_flag[2] = 0;
606 sh->nb_refs[L0] = sh->nb_refs[L1] = 0;
607 if (sh->slice_type == P_SLICE || sh->slice_type == B_SLICE) {
610 sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active;
611 if (sh->slice_type == B_SLICE)
612 sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active;
614 if (get_bits1(gb)) { // num_ref_idx_active_override_flag
615 sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1;
616 if (sh->slice_type == B_SLICE)
617 sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1;
619 if (sh->nb_refs[L0] > MAX_REFS || sh->nb_refs[L1] > MAX_REFS) {
620 av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n",
621 sh->nb_refs[L0], sh->nb_refs[L1]);
622 return AVERROR_INVALIDDATA;
625 sh->rpl_modification_flag[0] = 0;
626 sh->rpl_modification_flag[1] = 0;
627 nb_refs = ff_hevc_frame_nb_refs(s);
629 av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n");
630 return AVERROR_INVALIDDATA;
633 if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) {
634 sh->rpl_modification_flag[0] = get_bits1(gb);
635 if (sh->rpl_modification_flag[0]) {
636 for (i = 0; i < sh->nb_refs[L0]; i++)
637 sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs));
640 if (sh->slice_type == B_SLICE) {
641 sh->rpl_modification_flag[1] = get_bits1(gb);
642 if (sh->rpl_modification_flag[1] == 1)
643 for (i = 0; i < sh->nb_refs[L1]; i++)
644 sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs));
648 if (sh->slice_type == B_SLICE)
649 sh->mvd_l1_zero_flag = get_bits1(gb);
651 if (s->ps.pps->cabac_init_present_flag)
652 sh->cabac_init_flag = get_bits1(gb);
654 sh->cabac_init_flag = 0;
656 sh->collocated_ref_idx = 0;
657 if (sh->slice_temporal_mvp_enabled_flag) {
658 sh->collocated_list = L0;
659 if (sh->slice_type == B_SLICE)
660 sh->collocated_list = !get_bits1(gb);
662 if (sh->nb_refs[sh->collocated_list] > 1) {
663 sh->collocated_ref_idx = get_ue_golomb_long(gb);
664 if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) {
665 av_log(s->avctx, AV_LOG_ERROR,
666 "Invalid collocated_ref_idx: %d.\n",
667 sh->collocated_ref_idx);
668 return AVERROR_INVALIDDATA;
673 if ((s->ps.pps->weighted_pred_flag && sh->slice_type == P_SLICE) ||
674 (s->ps.pps->weighted_bipred_flag && sh->slice_type == B_SLICE)) {
675 pred_weight_table(s, gb);
678 sh->max_num_merge_cand = 5 - get_ue_golomb_long(gb);
679 if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) {
680 av_log(s->avctx, AV_LOG_ERROR,
681 "Invalid number of merging MVP candidates: %d.\n",
682 sh->max_num_merge_cand);
683 return AVERROR_INVALIDDATA;
687 sh->slice_qp_delta = get_se_golomb(gb);
689 if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) {
690 sh->slice_cb_qp_offset = get_se_golomb(gb);
691 sh->slice_cr_qp_offset = get_se_golomb(gb);
693 sh->slice_cb_qp_offset = 0;
694 sh->slice_cr_qp_offset = 0;
697 if (s->ps.pps->chroma_qp_offset_list_enabled_flag)
698 sh->cu_chroma_qp_offset_enabled_flag = get_bits1(gb);
700 sh->cu_chroma_qp_offset_enabled_flag = 0;
702 if (s->ps.pps->deblocking_filter_control_present_flag) {
703 int deblocking_filter_override_flag = 0;
705 if (s->ps.pps->deblocking_filter_override_enabled_flag)
706 deblocking_filter_override_flag = get_bits1(gb);
708 if (deblocking_filter_override_flag) {
709 sh->disable_deblocking_filter_flag = get_bits1(gb);
710 if (!sh->disable_deblocking_filter_flag) {
711 sh->beta_offset = get_se_golomb(gb) * 2;
712 sh->tc_offset = get_se_golomb(gb) * 2;
715 sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf;
716 sh->beta_offset = s->ps.pps->beta_offset;
717 sh->tc_offset = s->ps.pps->tc_offset;
720 sh->disable_deblocking_filter_flag = 0;
725 if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag &&
726 (sh->slice_sample_adaptive_offset_flag[0] ||
727 sh->slice_sample_adaptive_offset_flag[1] ||
728 !sh->disable_deblocking_filter_flag)) {
729 sh->slice_loop_filter_across_slices_enabled_flag = get_bits1(gb);
731 sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag;
733 } else if (!s->slice_initialized) {
734 av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n");
735 return AVERROR_INVALIDDATA;
738 sh->num_entry_point_offsets = 0;
739 if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) {
740 unsigned num_entry_point_offsets = get_ue_golomb_long(gb);
741 // It would be possible to bound this tighter but this here is simpler
742 if (num_entry_point_offsets > get_bits_left(gb)) {
743 av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets);
744 return AVERROR_INVALIDDATA;
747 sh->num_entry_point_offsets = num_entry_point_offsets;
748 if (sh->num_entry_point_offsets > 0) {
749 int offset_len = get_ue_golomb_long(gb) + 1;
751 if (offset_len < 1 || offset_len > 32) {
752 sh->num_entry_point_offsets = 0;
753 av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len);
754 return AVERROR_INVALIDDATA;
757 av_freep(&sh->entry_point_offset);
758 av_freep(&sh->offset);
760 sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned));
761 sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
762 sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
763 if (!sh->entry_point_offset || !sh->offset || !sh->size) {
764 sh->num_entry_point_offsets = 0;
765 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n");
766 return AVERROR(ENOMEM);
768 for (i = 0; i < sh->num_entry_point_offsets; i++) {
769 unsigned val = get_bits_long(gb, offset_len);
770 sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size
772 if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) {
773 s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here
774 s->threads_number = 1;
776 s->enable_parallel_tiles = 0;
778 s->enable_parallel_tiles = 0;
781 if (s->ps.pps->slice_header_extension_present_flag) {
782 unsigned int length = get_ue_golomb_long(gb);
783 if (length*8LL > get_bits_left(gb)) {
784 av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n");
785 return AVERROR_INVALIDDATA;
787 for (i = 0; i < length; i++)
788 skip_bits(gb, 8); // slice_header_extension_data_byte
791 // Inferred parameters
792 sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta;
793 if (sh->slice_qp > 51 ||
794 sh->slice_qp < -s->ps.sps->qp_bd_offset) {
795 av_log(s->avctx, AV_LOG_ERROR,
796 "The slice_qp %d is outside the valid range "
799 -s->ps.sps->qp_bd_offset);
800 return AVERROR_INVALIDDATA;
803 sh->slice_ctb_addr_rs = sh->slice_segment_addr;
805 if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) {
806 av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n");
807 return AVERROR_INVALIDDATA;
810 if (get_bits_left(gb) < 0) {
811 av_log(s->avctx, AV_LOG_ERROR,
812 "Overread slice header by %d bits\n", -get_bits_left(gb));
813 return AVERROR_INVALIDDATA;
816 s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag;
818 if (!s->ps.pps->cu_qp_delta_enabled_flag)
819 s->HEVClc->qp_y = s->sh.slice_qp;
821 s->slice_initialized = 1;
822 s->HEVClc->tu.cu_qp_offset_cb = 0;
823 s->HEVClc->tu.cu_qp_offset_cr = 0;
825 s->no_rasl_output_flag = IS_IDR(s) || IS_BLA(s) || (s->nal_unit_type == NAL_CRA_NUT && s->last_eos);
830 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
832 #define SET_SAO(elem, value) \
834 if (!sao_merge_up_flag && !sao_merge_left_flag) \
836 else if (sao_merge_left_flag) \
837 sao->elem = CTB(s->sao, rx-1, ry).elem; \
838 else if (sao_merge_up_flag) \
839 sao->elem = CTB(s->sao, rx, ry-1).elem; \
844 static void hls_sao_param(HEVCContext *s, int rx, int ry)
846 HEVCLocalContext *lc = s->HEVClc;
847 int sao_merge_left_flag = 0;
848 int sao_merge_up_flag = 0;
849 SAOParams *sao = &CTB(s->sao, rx, ry);
852 if (s->sh.slice_sample_adaptive_offset_flag[0] ||
853 s->sh.slice_sample_adaptive_offset_flag[1]) {
855 if (lc->ctb_left_flag)
856 sao_merge_left_flag = ff_hevc_sao_merge_flag_decode(s);
858 if (ry > 0 && !sao_merge_left_flag) {
860 sao_merge_up_flag = ff_hevc_sao_merge_flag_decode(s);
864 for (c_idx = 0; c_idx < (s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) {
865 int log2_sao_offset_scale = c_idx == 0 ? s->ps.pps->log2_sao_offset_scale_luma :
866 s->ps.pps->log2_sao_offset_scale_chroma;
868 if (!s->sh.slice_sample_adaptive_offset_flag[c_idx]) {
869 sao->type_idx[c_idx] = SAO_NOT_APPLIED;
874 sao->type_idx[2] = sao->type_idx[1];
875 sao->eo_class[2] = sao->eo_class[1];
877 SET_SAO(type_idx[c_idx], ff_hevc_sao_type_idx_decode(s));
880 if (sao->type_idx[c_idx] == SAO_NOT_APPLIED)
883 for (i = 0; i < 4; i++)
884 SET_SAO(offset_abs[c_idx][i], ff_hevc_sao_offset_abs_decode(s));
886 if (sao->type_idx[c_idx] == SAO_BAND) {
887 for (i = 0; i < 4; i++) {
888 if (sao->offset_abs[c_idx][i]) {
889 SET_SAO(offset_sign[c_idx][i],
890 ff_hevc_sao_offset_sign_decode(s));
892 sao->offset_sign[c_idx][i] = 0;
895 SET_SAO(band_position[c_idx], ff_hevc_sao_band_position_decode(s));
896 } else if (c_idx != 2) {
897 SET_SAO(eo_class[c_idx], ff_hevc_sao_eo_class_decode(s));
900 // Inferred parameters
901 sao->offset_val[c_idx][0] = 0;
902 for (i = 0; i < 4; i++) {
903 sao->offset_val[c_idx][i + 1] = sao->offset_abs[c_idx][i];
904 if (sao->type_idx[c_idx] == SAO_EDGE) {
906 sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
907 } else if (sao->offset_sign[c_idx][i]) {
908 sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
910 sao->offset_val[c_idx][i + 1] *= 1 << log2_sao_offset_scale;
918 static int hls_cross_component_pred(HEVCContext *s, int idx) {
919 HEVCLocalContext *lc = s->HEVClc;
920 int log2_res_scale_abs_plus1 = ff_hevc_log2_res_scale_abs(s, idx);
922 if (log2_res_scale_abs_plus1 != 0) {
923 int res_scale_sign_flag = ff_hevc_res_scale_sign_flag(s, idx);
924 lc->tu.res_scale_val = (1 << (log2_res_scale_abs_plus1 - 1)) *
925 (1 - 2 * res_scale_sign_flag);
927 lc->tu.res_scale_val = 0;
934 static int hls_transform_unit(HEVCContext *s, int x0, int y0,
935 int xBase, int yBase, int cb_xBase, int cb_yBase,
936 int log2_cb_size, int log2_trafo_size,
937 int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
939 HEVCLocalContext *lc = s->HEVClc;
940 const int log2_trafo_size_c = log2_trafo_size - s->ps.sps->hshift[1];
943 if (lc->cu.pred_mode == MODE_INTRA) {
944 int trafo_size = 1 << log2_trafo_size;
945 ff_hevc_set_neighbour_available(s, x0, y0, trafo_size, trafo_size);
947 s->hpc.intra_pred[log2_trafo_size - 2](s, x0, y0, 0);
950 if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
951 (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
952 int scan_idx = SCAN_DIAG;
953 int scan_idx_c = SCAN_DIAG;
954 int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
955 (s->ps.sps->chroma_format_idc == 2 &&
956 (cbf_cb[1] || cbf_cr[1]));
958 if (s->ps.pps->cu_qp_delta_enabled_flag && !lc->tu.is_cu_qp_delta_coded) {
959 lc->tu.cu_qp_delta = ff_hevc_cu_qp_delta_abs(s);
960 if (lc->tu.cu_qp_delta != 0)
961 if (ff_hevc_cu_qp_delta_sign_flag(s) == 1)
962 lc->tu.cu_qp_delta = -lc->tu.cu_qp_delta;
963 lc->tu.is_cu_qp_delta_coded = 1;
965 if (lc->tu.cu_qp_delta < -(26 + s->ps.sps->qp_bd_offset / 2) ||
966 lc->tu.cu_qp_delta > (25 + s->ps.sps->qp_bd_offset / 2)) {
967 av_log(s->avctx, AV_LOG_ERROR,
968 "The cu_qp_delta %d is outside the valid range "
971 -(26 + s->ps.sps->qp_bd_offset / 2),
972 (25 + s->ps.sps->qp_bd_offset / 2));
973 return AVERROR_INVALIDDATA;
976 ff_hevc_set_qPy(s, cb_xBase, cb_yBase, log2_cb_size);
979 if (s->sh.cu_chroma_qp_offset_enabled_flag && cbf_chroma &&
980 !lc->cu.cu_transquant_bypass_flag && !lc->tu.is_cu_chroma_qp_offset_coded) {
981 int cu_chroma_qp_offset_flag = ff_hevc_cu_chroma_qp_offset_flag(s);
982 if (cu_chroma_qp_offset_flag) {
983 int cu_chroma_qp_offset_idx = 0;
984 if (s->ps.pps->chroma_qp_offset_list_len_minus1 > 0) {
985 cu_chroma_qp_offset_idx = ff_hevc_cu_chroma_qp_offset_idx(s);
986 av_log(s->avctx, AV_LOG_ERROR,
987 "cu_chroma_qp_offset_idx not yet tested.\n");
989 lc->tu.cu_qp_offset_cb = s->ps.pps->cb_qp_offset_list[cu_chroma_qp_offset_idx];
990 lc->tu.cu_qp_offset_cr = s->ps.pps->cr_qp_offset_list[cu_chroma_qp_offset_idx];
992 lc->tu.cu_qp_offset_cb = 0;
993 lc->tu.cu_qp_offset_cr = 0;
995 lc->tu.is_cu_chroma_qp_offset_coded = 1;
998 if (lc->cu.pred_mode == MODE_INTRA && log2_trafo_size < 4) {
999 if (lc->tu.intra_pred_mode >= 6 &&
1000 lc->tu.intra_pred_mode <= 14) {
1001 scan_idx = SCAN_VERT;
1002 } else if (lc->tu.intra_pred_mode >= 22 &&
1003 lc->tu.intra_pred_mode <= 30) {
1004 scan_idx = SCAN_HORIZ;
1007 if (lc->tu.intra_pred_mode_c >= 6 &&
1008 lc->tu.intra_pred_mode_c <= 14) {
1009 scan_idx_c = SCAN_VERT;
1010 } else if (lc->tu.intra_pred_mode_c >= 22 &&
1011 lc->tu.intra_pred_mode_c <= 30) {
1012 scan_idx_c = SCAN_HORIZ;
1016 lc->tu.cross_pf = 0;
1019 ff_hevc_hls_residual_coding(s, x0, y0, log2_trafo_size, scan_idx, 0);
1020 if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1021 int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1022 int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1023 lc->tu.cross_pf = (s->ps.pps->cross_component_prediction_enabled_flag && cbf_luma &&
1024 (lc->cu.pred_mode == MODE_INTER ||
1025 (lc->tu.chroma_mode_c == 4)));
1027 if (lc->tu.cross_pf) {
1028 hls_cross_component_pred(s, 0);
1030 for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1031 if (lc->cu.pred_mode == MODE_INTRA) {
1032 ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1033 s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 1);
1036 ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c),
1037 log2_trafo_size_c, scan_idx_c, 1);
1039 if (lc->tu.cross_pf) {
1040 ptrdiff_t stride = s->frame->linesize[1];
1041 int hshift = s->ps.sps->hshift[1];
1042 int vshift = s->ps.sps->vshift[1];
1043 int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1044 int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1045 int size = 1 << log2_trafo_size_c;
1047 uint8_t *dst = &s->frame->data[1][(y0 >> vshift) * stride +
1048 ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1049 for (i = 0; i < (size * size); i++) {
1050 coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1052 s->hevcdsp.transform_add[log2_trafo_size_c-2](dst, coeffs, stride);
1056 if (lc->tu.cross_pf) {
1057 hls_cross_component_pred(s, 1);
1059 for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1060 if (lc->cu.pred_mode == MODE_INTRA) {
1061 ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1062 s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 2);
1065 ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c),
1066 log2_trafo_size_c, scan_idx_c, 2);
1068 if (lc->tu.cross_pf) {
1069 ptrdiff_t stride = s->frame->linesize[2];
1070 int hshift = s->ps.sps->hshift[2];
1071 int vshift = s->ps.sps->vshift[2];
1072 int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1073 int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1074 int size = 1 << log2_trafo_size_c;
1076 uint8_t *dst = &s->frame->data[2][(y0 >> vshift) * stride +
1077 ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1078 for (i = 0; i < (size * size); i++) {
1079 coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1081 s->hevcdsp.transform_add[log2_trafo_size_c-2](dst, coeffs, stride);
1084 } else if (s->ps.sps->chroma_format_idc && blk_idx == 3) {
1085 int trafo_size_h = 1 << (log2_trafo_size + 1);
1086 int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1087 for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1088 if (lc->cu.pred_mode == MODE_INTRA) {
1089 ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size),
1090 trafo_size_h, trafo_size_v);
1091 s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 1);
1094 ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size),
1095 log2_trafo_size, scan_idx_c, 1);
1097 for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1098 if (lc->cu.pred_mode == MODE_INTRA) {
1099 ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size),
1100 trafo_size_h, trafo_size_v);
1101 s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 2);
1104 ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size),
1105 log2_trafo_size, scan_idx_c, 2);
1108 } else if (s->ps.sps->chroma_format_idc && lc->cu.pred_mode == MODE_INTRA) {
1109 if (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3) {
1110 int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1111 int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1112 ff_hevc_set_neighbour_available(s, x0, y0, trafo_size_h, trafo_size_v);
1113 s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 1);
1114 s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 2);
1115 if (s->ps.sps->chroma_format_idc == 2) {
1116 ff_hevc_set_neighbour_available(s, x0, y0 + (1 << log2_trafo_size_c),
1117 trafo_size_h, trafo_size_v);
1118 s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 1);
1119 s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 2);
1121 } else if (blk_idx == 3) {
1122 int trafo_size_h = 1 << (log2_trafo_size + 1);
1123 int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1124 ff_hevc_set_neighbour_available(s, xBase, yBase,
1125 trafo_size_h, trafo_size_v);
1126 s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 1);
1127 s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 2);
1128 if (s->ps.sps->chroma_format_idc == 2) {
1129 ff_hevc_set_neighbour_available(s, xBase, yBase + (1 << (log2_trafo_size)),
1130 trafo_size_h, trafo_size_v);
1131 s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 1);
1132 s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 2);
1140 static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
1142 int cb_size = 1 << log2_cb_size;
1143 int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
1145 int min_pu_width = s->ps.sps->min_pu_width;
1146 int x_end = FFMIN(x0 + cb_size, s->ps.sps->width);
1147 int y_end = FFMIN(y0 + cb_size, s->ps.sps->height);
1150 for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
1151 for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++)
1152 s->is_pcm[i + j * min_pu_width] = 2;
1155 static int hls_transform_tree(HEVCContext *s, int x0, int y0,
1156 int xBase, int yBase, int cb_xBase, int cb_yBase,
1157 int log2_cb_size, int log2_trafo_size,
1158 int trafo_depth, int blk_idx,
1159 const int *base_cbf_cb, const int *base_cbf_cr)
1161 HEVCLocalContext *lc = s->HEVClc;
1162 uint8_t split_transform_flag;
1167 cbf_cb[0] = base_cbf_cb[0];
1168 cbf_cb[1] = base_cbf_cb[1];
1169 cbf_cr[0] = base_cbf_cr[0];
1170 cbf_cr[1] = base_cbf_cr[1];
1172 if (lc->cu.intra_split_flag) {
1173 if (trafo_depth == 1) {
1174 lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[blk_idx];
1175 if (s->ps.sps->chroma_format_idc == 3) {
1176 lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[blk_idx];
1177 lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[blk_idx];
1179 lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[0];
1180 lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1184 lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[0];
1185 lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[0];
1186 lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1189 if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
1190 log2_trafo_size > s->ps.sps->log2_min_tb_size &&
1191 trafo_depth < lc->cu.max_trafo_depth &&
1192 !(lc->cu.intra_split_flag && trafo_depth == 0)) {
1193 split_transform_flag = ff_hevc_split_transform_flag_decode(s, log2_trafo_size);
1195 int inter_split = s->ps.sps->max_transform_hierarchy_depth_inter == 0 &&
1196 lc->cu.pred_mode == MODE_INTER &&
1197 lc->cu.part_mode != PART_2Nx2N &&
1200 split_transform_flag = log2_trafo_size > s->ps.sps->log2_max_trafo_size ||
1201 (lc->cu.intra_split_flag && trafo_depth == 0) ||
1205 if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1206 if (trafo_depth == 0 || cbf_cb[0]) {
1207 cbf_cb[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1208 if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1209 cbf_cb[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1213 if (trafo_depth == 0 || cbf_cr[0]) {
1214 cbf_cr[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1215 if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1216 cbf_cr[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1221 if (split_transform_flag) {
1222 const int trafo_size_split = 1 << (log2_trafo_size - 1);
1223 const int x1 = x0 + trafo_size_split;
1224 const int y1 = y0 + trafo_size_split;
1226 #define SUBDIVIDE(x, y, idx) \
1228 ret = hls_transform_tree(s, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size, \
1229 log2_trafo_size - 1, trafo_depth + 1, idx, \
1235 SUBDIVIDE(x0, y0, 0);
1236 SUBDIVIDE(x1, y0, 1);
1237 SUBDIVIDE(x0, y1, 2);
1238 SUBDIVIDE(x1, y1, 3);
1242 int min_tu_size = 1 << s->ps.sps->log2_min_tb_size;
1243 int log2_min_tu_size = s->ps.sps->log2_min_tb_size;
1244 int min_tu_width = s->ps.sps->min_tb_width;
1247 if (lc->cu.pred_mode == MODE_INTRA || trafo_depth != 0 ||
1248 cbf_cb[0] || cbf_cr[0] ||
1249 (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1250 cbf_luma = ff_hevc_cbf_luma_decode(s, trafo_depth);
1253 ret = hls_transform_unit(s, x0, y0, xBase, yBase, cb_xBase, cb_yBase,
1254 log2_cb_size, log2_trafo_size,
1255 blk_idx, cbf_luma, cbf_cb, cbf_cr);
1258 // TODO: store cbf_luma somewhere else
1261 for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size)
1262 for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
1263 int x_tu = (x0 + j) >> log2_min_tu_size;
1264 int y_tu = (y0 + i) >> log2_min_tu_size;
1265 s->cbf_luma[y_tu * min_tu_width + x_tu] = 1;
1268 if (!s->sh.disable_deblocking_filter_flag) {
1269 ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_trafo_size);
1270 if (s->ps.pps->transquant_bypass_enable_flag &&
1271 lc->cu.cu_transquant_bypass_flag)
1272 set_deblocking_bypass(s, x0, y0, log2_trafo_size);
1278 static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
1280 HEVCLocalContext *lc = s->HEVClc;
1282 int cb_size = 1 << log2_cb_size;
1283 int stride0 = s->frame->linesize[0];
1284 uint8_t *dst0 = &s->frame->data[0][y0 * stride0 + (x0 << s->ps.sps->pixel_shift)];
1285 int stride1 = s->frame->linesize[1];
1286 uint8_t *dst1 = &s->frame->data[1][(y0 >> s->ps.sps->vshift[1]) * stride1 + ((x0 >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)];
1287 int stride2 = s->frame->linesize[2];
1288 uint8_t *dst2 = &s->frame->data[2][(y0 >> s->ps.sps->vshift[2]) * stride2 + ((x0 >> s->ps.sps->hshift[2]) << s->ps.sps->pixel_shift)];
1290 int length = cb_size * cb_size * s->ps.sps->pcm.bit_depth +
1291 (((cb_size >> s->ps.sps->hshift[1]) * (cb_size >> s->ps.sps->vshift[1])) +
1292 ((cb_size >> s->ps.sps->hshift[2]) * (cb_size >> s->ps.sps->vshift[2]))) *
1293 s->ps.sps->pcm.bit_depth_chroma;
1294 const uint8_t *pcm = skip_bytes(&lc->cc, (length + 7) >> 3);
1297 if (!s->sh.disable_deblocking_filter_flag)
1298 ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
1300 ret = init_get_bits(&gb, pcm, length);
1304 s->hevcdsp.put_pcm(dst0, stride0, cb_size, cb_size, &gb, s->ps.sps->pcm.bit_depth);
1305 if (s->ps.sps->chroma_format_idc) {
1306 s->hevcdsp.put_pcm(dst1, stride1,
1307 cb_size >> s->ps.sps->hshift[1],
1308 cb_size >> s->ps.sps->vshift[1],
1309 &gb, s->ps.sps->pcm.bit_depth_chroma);
1310 s->hevcdsp.put_pcm(dst2, stride2,
1311 cb_size >> s->ps.sps->hshift[2],
1312 cb_size >> s->ps.sps->vshift[2],
1313 &gb, s->ps.sps->pcm.bit_depth_chroma);
1320 * 8.5.3.2.2.1 Luma sample unidirectional interpolation process
1322 * @param s HEVC decoding context
1323 * @param dst target buffer for block data at block position
1324 * @param dststride stride of the dst buffer
1325 * @param ref reference picture buffer at origin (0, 0)
1326 * @param mv motion vector (relative to block position) to get pixel data from
1327 * @param x_off horizontal position of block from origin (0, 0)
1328 * @param y_off vertical position of block from origin (0, 0)
1329 * @param block_w width of block
1330 * @param block_h height of block
1331 * @param luma_weight weighting factor applied to the luma prediction
1332 * @param luma_offset additive offset applied to the luma prediction value
1335 static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride,
1336 AVFrame *ref, const Mv *mv, int x_off, int y_off,
1337 int block_w, int block_h, int luma_weight, int luma_offset)
1339 HEVCLocalContext *lc = s->HEVClc;
1340 uint8_t *src = ref->data[0];
1341 ptrdiff_t srcstride = ref->linesize[0];
1342 int pic_width = s->ps.sps->width;
1343 int pic_height = s->ps.sps->height;
1346 int weight_flag = (s->sh.slice_type == P_SLICE && s->ps.pps->weighted_pred_flag) ||
1347 (s->sh.slice_type == B_SLICE && s->ps.pps->weighted_bipred_flag);
1348 int idx = ff_hevc_pel_weight[block_w];
1350 x_off += mv->x >> 2;
1351 y_off += mv->y >> 2;
1352 src += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1354 if (x_off < QPEL_EXTRA_BEFORE || y_off < QPEL_EXTRA_AFTER ||
1355 x_off >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1356 y_off >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1357 const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1358 int offset = QPEL_EXTRA_BEFORE * srcstride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1359 int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1361 s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset,
1362 edge_emu_stride, srcstride,
1363 block_w + QPEL_EXTRA,
1364 block_h + QPEL_EXTRA,
1365 x_off - QPEL_EXTRA_BEFORE, y_off - QPEL_EXTRA_BEFORE,
1366 pic_width, pic_height);
1367 src = lc->edge_emu_buffer + buf_offset;
1368 srcstride = edge_emu_stride;
1372 s->hevcdsp.put_hevc_qpel_uni[idx][!!my][!!mx](dst, dststride, src, srcstride,
1373 block_h, mx, my, block_w);
1375 s->hevcdsp.put_hevc_qpel_uni_w[idx][!!my][!!mx](dst, dststride, src, srcstride,
1376 block_h, s->sh.luma_log2_weight_denom,
1377 luma_weight, luma_offset, mx, my, block_w);
1381 * 8.5.3.2.2.1 Luma sample bidirectional interpolation process
1383 * @param s HEVC decoding context
1384 * @param dst target buffer for block data at block position
1385 * @param dststride stride of the dst buffer
1386 * @param ref0 reference picture0 buffer at origin (0, 0)
1387 * @param mv0 motion vector0 (relative to block position) to get pixel data from
1388 * @param x_off horizontal position of block from origin (0, 0)
1389 * @param y_off vertical position of block from origin (0, 0)
1390 * @param block_w width of block
1391 * @param block_h height of block
1392 * @param ref1 reference picture1 buffer at origin (0, 0)
1393 * @param mv1 motion vector1 (relative to block position) to get pixel data from
1394 * @param current_mv current motion vector structure
1396 static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride,
1397 AVFrame *ref0, const Mv *mv0, int x_off, int y_off,
1398 int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
1400 HEVCLocalContext *lc = s->HEVClc;
1401 ptrdiff_t src0stride = ref0->linesize[0];
1402 ptrdiff_t src1stride = ref1->linesize[0];
1403 int pic_width = s->ps.sps->width;
1404 int pic_height = s->ps.sps->height;
1405 int mx0 = mv0->x & 3;
1406 int my0 = mv0->y & 3;
1407 int mx1 = mv1->x & 3;
1408 int my1 = mv1->y & 3;
1409 int weight_flag = (s->sh.slice_type == P_SLICE && s->ps.pps->weighted_pred_flag) ||
1410 (s->sh.slice_type == B_SLICE && s->ps.pps->weighted_bipred_flag);
1411 int x_off0 = x_off + (mv0->x >> 2);
1412 int y_off0 = y_off + (mv0->y >> 2);
1413 int x_off1 = x_off + (mv1->x >> 2);
1414 int y_off1 = y_off + (mv1->y >> 2);
1415 int idx = ff_hevc_pel_weight[block_w];
1417 uint8_t *src0 = ref0->data[0] + y_off0 * src0stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1418 uint8_t *src1 = ref1->data[0] + y_off1 * src1stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1420 if (x_off0 < QPEL_EXTRA_BEFORE || y_off0 < QPEL_EXTRA_AFTER ||
1421 x_off0 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1422 y_off0 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1423 const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1424 int offset = QPEL_EXTRA_BEFORE * src0stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1425 int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1427 s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset,
1428 edge_emu_stride, src0stride,
1429 block_w + QPEL_EXTRA,
1430 block_h + QPEL_EXTRA,
1431 x_off0 - QPEL_EXTRA_BEFORE, y_off0 - QPEL_EXTRA_BEFORE,
1432 pic_width, pic_height);
1433 src0 = lc->edge_emu_buffer + buf_offset;
1434 src0stride = edge_emu_stride;
1437 if (x_off1 < QPEL_EXTRA_BEFORE || y_off1 < QPEL_EXTRA_AFTER ||
1438 x_off1 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1439 y_off1 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1440 const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1441 int offset = QPEL_EXTRA_BEFORE * src1stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1442 int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1444 s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src1 - offset,
1445 edge_emu_stride, src1stride,
1446 block_w + QPEL_EXTRA,
1447 block_h + QPEL_EXTRA,
1448 x_off1 - QPEL_EXTRA_BEFORE, y_off1 - QPEL_EXTRA_BEFORE,
1449 pic_width, pic_height);
1450 src1 = lc->edge_emu_buffer2 + buf_offset;
1451 src1stride = edge_emu_stride;
1454 s->hevcdsp.put_hevc_qpel[idx][!!my0][!!mx0](lc->tmp, src0, src0stride,
1455 block_h, mx0, my0, block_w);
1457 s->hevcdsp.put_hevc_qpel_bi[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1458 block_h, mx1, my1, block_w);
1460 s->hevcdsp.put_hevc_qpel_bi_w[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1461 block_h, s->sh.luma_log2_weight_denom,
1462 s->sh.luma_weight_l0[current_mv->ref_idx[0]],
1463 s->sh.luma_weight_l1[current_mv->ref_idx[1]],
1464 s->sh.luma_offset_l0[current_mv->ref_idx[0]],
1465 s->sh.luma_offset_l1[current_mv->ref_idx[1]],
1471 * 8.5.3.2.2.2 Chroma sample uniprediction interpolation process
1473 * @param s HEVC decoding context
1474 * @param dst1 target buffer for block data at block position (U plane)
1475 * @param dst2 target buffer for block data at block position (V plane)
1476 * @param dststride stride of the dst1 and dst2 buffers
1477 * @param ref reference picture buffer at origin (0, 0)
1478 * @param mv motion vector (relative to block position) to get pixel data from
1479 * @param x_off horizontal position of block from origin (0, 0)
1480 * @param y_off vertical position of block from origin (0, 0)
1481 * @param block_w width of block
1482 * @param block_h height of block
1483 * @param chroma_weight weighting factor applied to the chroma prediction
1484 * @param chroma_offset additive offset applied to the chroma prediction value
1487 static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0,
1488 ptrdiff_t dststride, uint8_t *src0, ptrdiff_t srcstride, int reflist,
1489 int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int chroma_weight, int chroma_offset)
1491 HEVCLocalContext *lc = s->HEVClc;
1492 int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1493 int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1494 const Mv *mv = ¤t_mv->mv[reflist];
1495 int weight_flag = (s->sh.slice_type == P_SLICE && s->ps.pps->weighted_pred_flag) ||
1496 (s->sh.slice_type == B_SLICE && s->ps.pps->weighted_bipred_flag);
1497 int idx = ff_hevc_pel_weight[block_w];
1498 int hshift = s->ps.sps->hshift[1];
1499 int vshift = s->ps.sps->vshift[1];
1500 intptr_t mx = av_mod_uintp2(mv->x, 2 + hshift);
1501 intptr_t my = av_mod_uintp2(mv->y, 2 + vshift);
1502 intptr_t _mx = mx << (1 - hshift);
1503 intptr_t _my = my << (1 - vshift);
1505 x_off += mv->x >> (2 + hshift);
1506 y_off += mv->y >> (2 + vshift);
1507 src0 += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1509 if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER ||
1510 x_off >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1511 y_off >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1512 const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1513 int offset0 = EPEL_EXTRA_BEFORE * (srcstride + (1 << s->ps.sps->pixel_shift));
1514 int buf_offset0 = EPEL_EXTRA_BEFORE *
1515 (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1516 s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset0,
1517 edge_emu_stride, srcstride,
1518 block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1519 x_off - EPEL_EXTRA_BEFORE,
1520 y_off - EPEL_EXTRA_BEFORE,
1521 pic_width, pic_height);
1523 src0 = lc->edge_emu_buffer + buf_offset0;
1524 srcstride = edge_emu_stride;
1527 s->hevcdsp.put_hevc_epel_uni[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1528 block_h, _mx, _my, block_w);
1530 s->hevcdsp.put_hevc_epel_uni_w[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1531 block_h, s->sh.chroma_log2_weight_denom,
1532 chroma_weight, chroma_offset, _mx, _my, block_w);
1536 * 8.5.3.2.2.2 Chroma sample bidirectional interpolation process
1538 * @param s HEVC decoding context
1539 * @param dst target buffer for block data at block position
1540 * @param dststride stride of the dst buffer
1541 * @param ref0 reference picture0 buffer at origin (0, 0)
1542 * @param mv0 motion vector0 (relative to block position) to get pixel data from
1543 * @param x_off horizontal position of block from origin (0, 0)
1544 * @param y_off vertical position of block from origin (0, 0)
1545 * @param block_w width of block
1546 * @param block_h height of block
1547 * @param ref1 reference picture1 buffer at origin (0, 0)
1548 * @param mv1 motion vector1 (relative to block position) to get pixel data from
1549 * @param current_mv current motion vector structure
1550 * @param cidx chroma component(cb, cr)
1552 static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1,
1553 int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
1555 HEVCLocalContext *lc = s->HEVClc;
1556 uint8_t *src1 = ref0->data[cidx+1];
1557 uint8_t *src2 = ref1->data[cidx+1];
1558 ptrdiff_t src1stride = ref0->linesize[cidx+1];
1559 ptrdiff_t src2stride = ref1->linesize[cidx+1];
1560 int weight_flag = (s->sh.slice_type == P_SLICE && s->ps.pps->weighted_pred_flag) ||
1561 (s->sh.slice_type == B_SLICE && s->ps.pps->weighted_bipred_flag);
1562 int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1563 int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1564 Mv *mv0 = ¤t_mv->mv[0];
1565 Mv *mv1 = ¤t_mv->mv[1];
1566 int hshift = s->ps.sps->hshift[1];
1567 int vshift = s->ps.sps->vshift[1];
1569 intptr_t mx0 = av_mod_uintp2(mv0->x, 2 + hshift);
1570 intptr_t my0 = av_mod_uintp2(mv0->y, 2 + vshift);
1571 intptr_t mx1 = av_mod_uintp2(mv1->x, 2 + hshift);
1572 intptr_t my1 = av_mod_uintp2(mv1->y, 2 + vshift);
1573 intptr_t _mx0 = mx0 << (1 - hshift);
1574 intptr_t _my0 = my0 << (1 - vshift);
1575 intptr_t _mx1 = mx1 << (1 - hshift);
1576 intptr_t _my1 = my1 << (1 - vshift);
1578 int x_off0 = x_off + (mv0->x >> (2 + hshift));
1579 int y_off0 = y_off + (mv0->y >> (2 + vshift));
1580 int x_off1 = x_off + (mv1->x >> (2 + hshift));
1581 int y_off1 = y_off + (mv1->y >> (2 + vshift));
1582 int idx = ff_hevc_pel_weight[block_w];
1583 src1 += y_off0 * src1stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1584 src2 += y_off1 * src2stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1586 if (x_off0 < EPEL_EXTRA_BEFORE || y_off0 < EPEL_EXTRA_AFTER ||
1587 x_off0 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1588 y_off0 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1589 const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1590 int offset1 = EPEL_EXTRA_BEFORE * (src1stride + (1 << s->ps.sps->pixel_shift));
1591 int buf_offset1 = EPEL_EXTRA_BEFORE *
1592 (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1594 s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1,
1595 edge_emu_stride, src1stride,
1596 block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1597 x_off0 - EPEL_EXTRA_BEFORE,
1598 y_off0 - EPEL_EXTRA_BEFORE,
1599 pic_width, pic_height);
1601 src1 = lc->edge_emu_buffer + buf_offset1;
1602 src1stride = edge_emu_stride;
1605 if (x_off1 < EPEL_EXTRA_BEFORE || y_off1 < EPEL_EXTRA_AFTER ||
1606 x_off1 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1607 y_off1 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1608 const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1609 int offset1 = EPEL_EXTRA_BEFORE * (src2stride + (1 << s->ps.sps->pixel_shift));
1610 int buf_offset1 = EPEL_EXTRA_BEFORE *
1611 (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1613 s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src2 - offset1,
1614 edge_emu_stride, src2stride,
1615 block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1616 x_off1 - EPEL_EXTRA_BEFORE,
1617 y_off1 - EPEL_EXTRA_BEFORE,
1618 pic_width, pic_height);
1620 src2 = lc->edge_emu_buffer2 + buf_offset1;
1621 src2stride = edge_emu_stride;
1624 s->hevcdsp.put_hevc_epel[idx][!!my0][!!mx0](lc->tmp, src1, src1stride,
1625 block_h, _mx0, _my0, block_w);
1627 s->hevcdsp.put_hevc_epel_bi[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1628 src2, src2stride, lc->tmp,
1629 block_h, _mx1, _my1, block_w);
1631 s->hevcdsp.put_hevc_epel_bi_w[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1632 src2, src2stride, lc->tmp,
1634 s->sh.chroma_log2_weight_denom,
1635 s->sh.chroma_weight_l0[current_mv->ref_idx[0]][cidx],
1636 s->sh.chroma_weight_l1[current_mv->ref_idx[1]][cidx],
1637 s->sh.chroma_offset_l0[current_mv->ref_idx[0]][cidx],
1638 s->sh.chroma_offset_l1[current_mv->ref_idx[1]][cidx],
1639 _mx1, _my1, block_w);
1642 static void hevc_await_progress(HEVCContext *s, HEVCFrame *ref,
1643 const Mv *mv, int y0, int height)
1645 int y = FFMAX(0, (mv->y >> 2) + y0 + height + 9);
1647 if (s->threads_type == FF_THREAD_FRAME )
1648 ff_thread_await_progress(&ref->tf, y, 0);
1651 static void hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW,
1652 int nPbH, int log2_cb_size, int part_idx,
1653 int merge_idx, MvField *mv)
1655 HEVCLocalContext *lc = s->HEVClc;
1656 enum InterPredIdc inter_pred_idc = PRED_L0;
1659 ff_hevc_set_neighbour_available(s, x0, y0, nPbW, nPbH);
1661 if (s->sh.slice_type == B_SLICE)
1662 inter_pred_idc = ff_hevc_inter_pred_idc_decode(s, nPbW, nPbH);
1664 if (inter_pred_idc != PRED_L1) {
1665 if (s->sh.nb_refs[L0])
1666 mv->ref_idx[0]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L0]);
1668 mv->pred_flag = PF_L0;
1669 ff_hevc_hls_mvd_coding(s, x0, y0, 0);
1670 mvp_flag = ff_hevc_mvp_lx_flag_decode(s);
1671 ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1672 part_idx, merge_idx, mv, mvp_flag, 0);
1673 mv->mv[0].x += lc->pu.mvd.x;
1674 mv->mv[0].y += lc->pu.mvd.y;
1677 if (inter_pred_idc != PRED_L0) {
1678 if (s->sh.nb_refs[L1])
1679 mv->ref_idx[1]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L1]);
1681 if (s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc == PRED_BI) {
1682 AV_ZERO32(&lc->pu.mvd);
1684 ff_hevc_hls_mvd_coding(s, x0, y0, 1);
1687 mv->pred_flag += PF_L1;
1688 mvp_flag = ff_hevc_mvp_lx_flag_decode(s);
1689 ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1690 part_idx, merge_idx, mv, mvp_flag, 1);
1691 mv->mv[1].x += lc->pu.mvd.x;
1692 mv->mv[1].y += lc->pu.mvd.y;
1696 static void hls_prediction_unit(HEVCContext *s, int x0, int y0,
1698 int log2_cb_size, int partIdx, int idx)
1700 #define POS(c_idx, x, y) \
1701 &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \
1702 (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)]
1703 HEVCLocalContext *lc = s->HEVClc;
1705 struct MvField current_mv = {{{ 0 }}};
1707 int min_pu_width = s->ps.sps->min_pu_width;
1709 MvField *tab_mvf = s->ref->tab_mvf;
1710 RefPicList *refPicList = s->ref->refPicList;
1711 HEVCFrame *ref0 = NULL, *ref1 = NULL;
1712 uint8_t *dst0 = POS(0, x0, y0);
1713 uint8_t *dst1 = POS(1, x0, y0);
1714 uint8_t *dst2 = POS(2, x0, y0);
1715 int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
1716 int min_cb_width = s->ps.sps->min_cb_width;
1717 int x_cb = x0 >> log2_min_cb_size;
1718 int y_cb = y0 >> log2_min_cb_size;
1722 int skip_flag = SAMPLE_CTB(s->skip_flag, x_cb, y_cb);
1725 lc->pu.merge_flag = ff_hevc_merge_flag_decode(s);
1727 if (skip_flag || lc->pu.merge_flag) {
1728 if (s->sh.max_num_merge_cand > 1)
1729 merge_idx = ff_hevc_merge_idx_decode(s);
1733 ff_hevc_luma_mv_merge_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1734 partIdx, merge_idx, ¤t_mv);
1736 hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1737 partIdx, merge_idx, ¤t_mv);
1740 x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1741 y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1743 for (j = 0; j < nPbH >> s->ps.sps->log2_min_pu_size; j++)
1744 for (i = 0; i < nPbW >> s->ps.sps->log2_min_pu_size; i++)
1745 tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv;
1747 if (current_mv.pred_flag & PF_L0) {
1748 ref0 = refPicList[0].ref[current_mv.ref_idx[0]];
1751 hevc_await_progress(s, ref0, ¤t_mv.mv[0], y0, nPbH);
1753 if (current_mv.pred_flag & PF_L1) {
1754 ref1 = refPicList[1].ref[current_mv.ref_idx[1]];
1757 hevc_await_progress(s, ref1, ¤t_mv.mv[1], y0, nPbH);
1760 if (current_mv.pred_flag == PF_L0) {
1761 int x0_c = x0 >> s->ps.sps->hshift[1];
1762 int y0_c = y0 >> s->ps.sps->vshift[1];
1763 int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1764 int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1766 luma_mc_uni(s, dst0, s->frame->linesize[0], ref0->frame,
1767 ¤t_mv.mv[0], x0, y0, nPbW, nPbH,
1768 s->sh.luma_weight_l0[current_mv.ref_idx[0]],
1769 s->sh.luma_offset_l0[current_mv.ref_idx[0]]);
1771 if (s->ps.sps->chroma_format_idc) {
1772 chroma_mc_uni(s, dst1, s->frame->linesize[1], ref0->frame->data[1], ref0->frame->linesize[1],
1773 0, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1774 s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0]);
1775 chroma_mc_uni(s, dst2, s->frame->linesize[2], ref0->frame->data[2], ref0->frame->linesize[2],
1776 0, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1777 s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1]);
1779 } else if (current_mv.pred_flag == PF_L1) {
1780 int x0_c = x0 >> s->ps.sps->hshift[1];
1781 int y0_c = y0 >> s->ps.sps->vshift[1];
1782 int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1783 int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1785 luma_mc_uni(s, dst0, s->frame->linesize[0], ref1->frame,
1786 ¤t_mv.mv[1], x0, y0, nPbW, nPbH,
1787 s->sh.luma_weight_l1[current_mv.ref_idx[1]],
1788 s->sh.luma_offset_l1[current_mv.ref_idx[1]]);
1790 if (s->ps.sps->chroma_format_idc) {
1791 chroma_mc_uni(s, dst1, s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1],
1792 1, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1793 s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0]);
1795 chroma_mc_uni(s, dst2, s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2],
1796 1, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1797 s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1]);
1799 } else if (current_mv.pred_flag == PF_BI) {
1800 int x0_c = x0 >> s->ps.sps->hshift[1];
1801 int y0_c = y0 >> s->ps.sps->vshift[1];
1802 int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1803 int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1805 luma_mc_bi(s, dst0, s->frame->linesize[0], ref0->frame,
1806 ¤t_mv.mv[0], x0, y0, nPbW, nPbH,
1807 ref1->frame, ¤t_mv.mv[1], ¤t_mv);
1809 if (s->ps.sps->chroma_format_idc) {
1810 chroma_mc_bi(s, dst1, s->frame->linesize[1], ref0->frame, ref1->frame,
1811 x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, 0);
1813 chroma_mc_bi(s, dst2, s->frame->linesize[2], ref0->frame, ref1->frame,
1814 x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, 1);
1822 static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size,
1823 int prev_intra_luma_pred_flag)
1825 HEVCLocalContext *lc = s->HEVClc;
1826 int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1827 int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1828 int min_pu_width = s->ps.sps->min_pu_width;
1829 int size_in_pus = pu_size >> s->ps.sps->log2_min_pu_size;
1830 int x0b = av_mod_uintp2(x0, s->ps.sps->log2_ctb_size);
1831 int y0b = av_mod_uintp2(y0, s->ps.sps->log2_ctb_size);
1833 int cand_up = (lc->ctb_up_flag || y0b) ?
1834 s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] : INTRA_DC;
1835 int cand_left = (lc->ctb_left_flag || x0b) ?
1836 s->tab_ipm[y_pu * min_pu_width + x_pu - 1] : INTRA_DC;
1838 int y_ctb = (y0 >> (s->ps.sps->log2_ctb_size)) << (s->ps.sps->log2_ctb_size);
1840 MvField *tab_mvf = s->ref->tab_mvf;
1841 int intra_pred_mode;
1845 // intra_pred_mode prediction does not cross vertical CTB boundaries
1846 if ((y0 - 1) < y_ctb)
1849 if (cand_left == cand_up) {
1850 if (cand_left < 2) {
1851 candidate[0] = INTRA_PLANAR;
1852 candidate[1] = INTRA_DC;
1853 candidate[2] = INTRA_ANGULAR_26;
1855 candidate[0] = cand_left;
1856 candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
1857 candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
1860 candidate[0] = cand_left;
1861 candidate[1] = cand_up;
1862 if (candidate[0] != INTRA_PLANAR && candidate[1] != INTRA_PLANAR) {
1863 candidate[2] = INTRA_PLANAR;
1864 } else if (candidate[0] != INTRA_DC && candidate[1] != INTRA_DC) {
1865 candidate[2] = INTRA_DC;
1867 candidate[2] = INTRA_ANGULAR_26;
1871 if (prev_intra_luma_pred_flag) {
1872 intra_pred_mode = candidate[lc->pu.mpm_idx];
1874 if (candidate[0] > candidate[1])
1875 FFSWAP(uint8_t, candidate[0], candidate[1]);
1876 if (candidate[0] > candidate[2])
1877 FFSWAP(uint8_t, candidate[0], candidate[2]);
1878 if (candidate[1] > candidate[2])
1879 FFSWAP(uint8_t, candidate[1], candidate[2]);
1881 intra_pred_mode = lc->pu.rem_intra_luma_pred_mode;
1882 for (i = 0; i < 3; i++)
1883 if (intra_pred_mode >= candidate[i])
1887 /* write the intra prediction units into the mv array */
1890 for (i = 0; i < size_in_pus; i++) {
1891 memset(&s->tab_ipm[(y_pu + i) * min_pu_width + x_pu],
1892 intra_pred_mode, size_in_pus);
1894 for (j = 0; j < size_in_pus; j++) {
1895 tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].pred_flag = PF_INTRA;
1899 return intra_pred_mode;
1902 static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0,
1903 int log2_cb_size, int ct_depth)
1905 int length = (1 << log2_cb_size) >> s->ps.sps->log2_min_cb_size;
1906 int x_cb = x0 >> s->ps.sps->log2_min_cb_size;
1907 int y_cb = y0 >> s->ps.sps->log2_min_cb_size;
1910 for (y = 0; y < length; y++)
1911 memset(&s->tab_ct_depth[(y_cb + y) * s->ps.sps->min_cb_width + x_cb],
1915 static const uint8_t tab_mode_idx[] = {
1916 0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20,
1917 21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
1919 static void intra_prediction_unit(HEVCContext *s, int x0, int y0,
1922 HEVCLocalContext *lc = s->HEVClc;
1923 static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
1924 uint8_t prev_intra_luma_pred_flag[4];
1925 int split = lc->cu.part_mode == PART_NxN;
1926 int pb_size = (1 << log2_cb_size) >> split;
1927 int side = split + 1;
1931 for (i = 0; i < side; i++)
1932 for (j = 0; j < side; j++)
1933 prev_intra_luma_pred_flag[2 * i + j] = ff_hevc_prev_intra_luma_pred_flag_decode(s);
1935 for (i = 0; i < side; i++) {
1936 for (j = 0; j < side; j++) {
1937 if (prev_intra_luma_pred_flag[2 * i + j])
1938 lc->pu.mpm_idx = ff_hevc_mpm_idx_decode(s);
1940 lc->pu.rem_intra_luma_pred_mode = ff_hevc_rem_intra_luma_pred_mode_decode(s);
1942 lc->pu.intra_pred_mode[2 * i + j] =
1943 luma_intra_pred_mode(s, x0 + pb_size * j, y0 + pb_size * i, pb_size,
1944 prev_intra_luma_pred_flag[2 * i + j]);
1948 if (s->ps.sps->chroma_format_idc == 3) {
1949 for (i = 0; i < side; i++) {
1950 for (j = 0; j < side; j++) {
1951 lc->pu.chroma_mode_c[2 * i + j] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
1952 if (chroma_mode != 4) {
1953 if (lc->pu.intra_pred_mode[2 * i + j] == intra_chroma_table[chroma_mode])
1954 lc->pu.intra_pred_mode_c[2 * i + j] = 34;
1956 lc->pu.intra_pred_mode_c[2 * i + j] = intra_chroma_table[chroma_mode];
1958 lc->pu.intra_pred_mode_c[2 * i + j] = lc->pu.intra_pred_mode[2 * i + j];
1962 } else if (s->ps.sps->chroma_format_idc == 2) {
1964 lc->pu.chroma_mode_c[0] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
1965 if (chroma_mode != 4) {
1966 if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
1969 mode_idx = intra_chroma_table[chroma_mode];
1971 mode_idx = lc->pu.intra_pred_mode[0];
1973 lc->pu.intra_pred_mode_c[0] = tab_mode_idx[mode_idx];
1974 } else if (s->ps.sps->chroma_format_idc != 0) {
1975 chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
1976 if (chroma_mode != 4) {
1977 if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
1978 lc->pu.intra_pred_mode_c[0] = 34;
1980 lc->pu.intra_pred_mode_c[0] = intra_chroma_table[chroma_mode];
1982 lc->pu.intra_pred_mode_c[0] = lc->pu.intra_pred_mode[0];
1987 static void intra_prediction_unit_default_value(HEVCContext *s,
1991 HEVCLocalContext *lc = s->HEVClc;
1992 int pb_size = 1 << log2_cb_size;
1993 int size_in_pus = pb_size >> s->ps.sps->log2_min_pu_size;
1994 int min_pu_width = s->ps.sps->min_pu_width;
1995 MvField *tab_mvf = s->ref->tab_mvf;
1996 int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1997 int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
2000 if (size_in_pus == 0)
2002 for (j = 0; j < size_in_pus; j++)
2003 memset(&s->tab_ipm[(y_pu + j) * min_pu_width + x_pu], INTRA_DC, size_in_pus);
2004 if (lc->cu.pred_mode == MODE_INTRA)
2005 for (j = 0; j < size_in_pus; j++)
2006 for (k = 0; k < size_in_pus; k++)
2007 tab_mvf[(y_pu + j) * min_pu_width + x_pu + k].pred_flag = PF_INTRA;
2010 static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
2012 int cb_size = 1 << log2_cb_size;
2013 HEVCLocalContext *lc = s->HEVClc;
2014 int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
2015 int length = cb_size >> log2_min_cb_size;
2016 int min_cb_width = s->ps.sps->min_cb_width;
2017 int x_cb = x0 >> log2_min_cb_size;
2018 int y_cb = y0 >> log2_min_cb_size;
2019 int idx = log2_cb_size - 2;
2020 int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2025 lc->cu.pred_mode = MODE_INTRA;
2026 lc->cu.part_mode = PART_2Nx2N;
2027 lc->cu.intra_split_flag = 0;
2029 SAMPLE_CTB(s->skip_flag, x_cb, y_cb) = 0;
2030 for (x = 0; x < 4; x++)
2031 lc->pu.intra_pred_mode[x] = 1;
2032 if (s->ps.pps->transquant_bypass_enable_flag) {
2033 lc->cu.cu_transquant_bypass_flag = ff_hevc_cu_transquant_bypass_flag_decode(s);
2034 if (lc->cu.cu_transquant_bypass_flag)
2035 set_deblocking_bypass(s, x0, y0, log2_cb_size);
2037 lc->cu.cu_transquant_bypass_flag = 0;
2039 if (s->sh.slice_type != I_SLICE) {
2040 uint8_t skip_flag = ff_hevc_skip_flag_decode(s, x0, y0, x_cb, y_cb);
2042 x = y_cb * min_cb_width + x_cb;
2043 for (y = 0; y < length; y++) {
2044 memset(&s->skip_flag[x], skip_flag, length);
2047 lc->cu.pred_mode = skip_flag ? MODE_SKIP : MODE_INTER;
2049 x = y_cb * min_cb_width + x_cb;
2050 for (y = 0; y < length; y++) {
2051 memset(&s->skip_flag[x], 0, length);
2056 if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) {
2057 hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2058 intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2060 if (!s->sh.disable_deblocking_filter_flag)
2061 ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
2065 if (s->sh.slice_type != I_SLICE)
2066 lc->cu.pred_mode = ff_hevc_pred_mode_decode(s);
2067 if (lc->cu.pred_mode != MODE_INTRA ||
2068 log2_cb_size == s->ps.sps->log2_min_cb_size) {
2069 lc->cu.part_mode = ff_hevc_part_mode_decode(s, log2_cb_size);
2070 lc->cu.intra_split_flag = lc->cu.part_mode == PART_NxN &&
2071 lc->cu.pred_mode == MODE_INTRA;
2074 if (lc->cu.pred_mode == MODE_INTRA) {
2075 if (lc->cu.part_mode == PART_2Nx2N && s->ps.sps->pcm_enabled_flag &&
2076 log2_cb_size >= s->ps.sps->pcm.log2_min_pcm_cb_size &&
2077 log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
2078 pcm_flag = ff_hevc_pcm_flag_decode(s);
2081 intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2082 ret = hls_pcm_sample(s, x0, y0, log2_cb_size);
2083 if (s->ps.sps->pcm.loop_filter_disable_flag)
2084 set_deblocking_bypass(s, x0, y0, log2_cb_size);
2089 intra_prediction_unit(s, x0, y0, log2_cb_size);
2092 intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2093 switch (lc->cu.part_mode) {
2095 hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2098 hls_prediction_unit(s, x0, y0, cb_size, cb_size / 2, log2_cb_size, 0, idx);
2099 hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size, cb_size / 2, log2_cb_size, 1, idx);
2102 hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size, log2_cb_size, 0, idx - 1);
2103 hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1);
2106 hls_prediction_unit(s, x0, y0, cb_size, cb_size / 4, log2_cb_size, 0, idx);
2107 hls_prediction_unit(s, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx);
2110 hls_prediction_unit(s, x0, y0, cb_size, cb_size * 3 / 4, log2_cb_size, 0, idx);
2111 hls_prediction_unit(s, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1, idx);
2114 hls_prediction_unit(s, x0, y0, cb_size / 4, cb_size, log2_cb_size, 0, idx - 2);
2115 hls_prediction_unit(s, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
2118 hls_prediction_unit(s, x0, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 0, idx - 2);
2119 hls_prediction_unit(s, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2);
2122 hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size / 2, log2_cb_size, 0, idx - 1);
2123 hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
2124 hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
2125 hls_prediction_unit(s, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
2131 int rqt_root_cbf = 1;
2133 if (lc->cu.pred_mode != MODE_INTRA &&
2134 !(lc->cu.part_mode == PART_2Nx2N && lc->pu.merge_flag)) {
2135 rqt_root_cbf = ff_hevc_no_residual_syntax_flag_decode(s);
2138 const static int cbf[2] = { 0 };
2139 lc->cu.max_trafo_depth = lc->cu.pred_mode == MODE_INTRA ?
2140 s->ps.sps->max_transform_hierarchy_depth_intra + lc->cu.intra_split_flag :
2141 s->ps.sps->max_transform_hierarchy_depth_inter;
2142 ret = hls_transform_tree(s, x0, y0, x0, y0, x0, y0,
2144 log2_cb_size, 0, 0, cbf, cbf);
2148 if (!s->sh.disable_deblocking_filter_flag)
2149 ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
2154 if (s->ps.pps->cu_qp_delta_enabled_flag && lc->tu.is_cu_qp_delta_coded == 0)
2155 ff_hevc_set_qPy(s, x0, y0, log2_cb_size);
2157 x = y_cb * min_cb_width + x_cb;
2158 for (y = 0; y < length; y++) {
2159 memset(&s->qp_y_tab[x], lc->qp_y, length);
2163 if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2164 ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
2165 lc->qPy_pred = lc->qp_y;
2168 set_ct_depth(s, x0, y0, log2_cb_size, lc->ct_depth);
2173 static int hls_coding_quadtree(HEVCContext *s, int x0, int y0,
2174 int log2_cb_size, int cb_depth)
2176 HEVCLocalContext *lc = s->HEVClc;
2177 const int cb_size = 1 << log2_cb_size;
2181 lc->ct_depth = cb_depth;
2182 if (x0 + cb_size <= s->ps.sps->width &&
2183 y0 + cb_size <= s->ps.sps->height &&
2184 log2_cb_size > s->ps.sps->log2_min_cb_size) {
2185 split_cu = ff_hevc_split_coding_unit_flag_decode(s, cb_depth, x0, y0);
2187 split_cu = (log2_cb_size > s->ps.sps->log2_min_cb_size);
2189 if (s->ps.pps->cu_qp_delta_enabled_flag &&
2190 log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth) {
2191 lc->tu.is_cu_qp_delta_coded = 0;
2192 lc->tu.cu_qp_delta = 0;
2195 if (s->sh.cu_chroma_qp_offset_enabled_flag &&
2196 log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_chroma_qp_offset_depth) {
2197 lc->tu.is_cu_chroma_qp_offset_coded = 0;
2201 int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2202 const int cb_size_split = cb_size >> 1;
2203 const int x1 = x0 + cb_size_split;
2204 const int y1 = y0 + cb_size_split;
2208 more_data = hls_coding_quadtree(s, x0, y0, log2_cb_size - 1, cb_depth + 1);
2212 if (more_data && x1 < s->ps.sps->width) {
2213 more_data = hls_coding_quadtree(s, x1, y0, log2_cb_size - 1, cb_depth + 1);
2217 if (more_data && y1 < s->ps.sps->height) {
2218 more_data = hls_coding_quadtree(s, x0, y1, log2_cb_size - 1, cb_depth + 1);
2222 if (more_data && x1 < s->ps.sps->width &&
2223 y1 < s->ps.sps->height) {
2224 more_data = hls_coding_quadtree(s, x1, y1, log2_cb_size - 1, cb_depth + 1);
2229 if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2230 ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
2231 lc->qPy_pred = lc->qp_y;
2234 return ((x1 + cb_size_split) < s->ps.sps->width ||
2235 (y1 + cb_size_split) < s->ps.sps->height);
2239 ret = hls_coding_unit(s, x0, y0, log2_cb_size);
2242 if ((!((x0 + cb_size) %
2243 (1 << (s->ps.sps->log2_ctb_size))) ||
2244 (x0 + cb_size >= s->ps.sps->width)) &&
2246 (1 << (s->ps.sps->log2_ctb_size))) ||
2247 (y0 + cb_size >= s->ps.sps->height))) {
2248 int end_of_slice_flag = ff_hevc_end_of_slice_flag_decode(s);
2249 return !end_of_slice_flag;
2258 static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb,
2261 HEVCLocalContext *lc = s->HEVClc;
2262 int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2263 int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2264 int ctb_addr_in_slice = ctb_addr_rs - s->sh.slice_addr;
2266 s->tab_slice_address[ctb_addr_rs] = s->sh.slice_addr;
2268 if (s->ps.pps->entropy_coding_sync_enabled_flag) {
2269 if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
2270 lc->first_qp_group = 1;
2271 lc->end_of_tiles_x = s->ps.sps->width;
2272 } else if (s->ps.pps->tiles_enabled_flag) {
2273 if (ctb_addr_ts && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[ctb_addr_ts - 1]) {
2274 int idxX = s->ps.pps->col_idxX[x_ctb >> s->ps.sps->log2_ctb_size];
2275 lc->end_of_tiles_x = x_ctb + (s->ps.pps->column_width[idxX] << s->ps.sps->log2_ctb_size);
2276 lc->first_qp_group = 1;
2279 lc->end_of_tiles_x = s->ps.sps->width;
2282 lc->end_of_tiles_y = FFMIN(y_ctb + ctb_size, s->ps.sps->height);
2284 lc->boundary_flags = 0;
2285 if (s->ps.pps->tiles_enabled_flag) {
2286 if (x_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]])
2287 lc->boundary_flags |= BOUNDARY_LEFT_TILE;
2288 if (x_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - 1])
2289 lc->boundary_flags |= BOUNDARY_LEFT_SLICE;
2290 if (y_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]])
2291 lc->boundary_flags |= BOUNDARY_UPPER_TILE;
2292 if (y_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - s->ps.sps->ctb_width])
2293 lc->boundary_flags |= BOUNDARY_UPPER_SLICE;
2295 if (ctb_addr_in_slice <= 0)
2296 lc->boundary_flags |= BOUNDARY_LEFT_SLICE;
2297 if (ctb_addr_in_slice < s->ps.sps->ctb_width)
2298 lc->boundary_flags |= BOUNDARY_UPPER_SLICE;
2301 lc->ctb_left_flag = ((x_ctb > 0) && (ctb_addr_in_slice > 0) && !(lc->boundary_flags & BOUNDARY_LEFT_TILE));
2302 lc->ctb_up_flag = ((y_ctb > 0) && (ctb_addr_in_slice >= s->ps.sps->ctb_width) && !(lc->boundary_flags & BOUNDARY_UPPER_TILE));
2303 lc->ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 - s->ps.sps->ctb_width]]));
2304 lc->ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 - s->ps.sps->ctb_width]]));
2307 static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
2309 HEVCContext *s = avctxt->priv_data;
2310 int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2314 int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs];
2316 if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) {
2317 av_log(s->avctx, AV_LOG_ERROR, "Impossible initial tile.\n");
2318 return AVERROR_INVALIDDATA;
2321 if (s->sh.dependent_slice_segment_flag) {
2322 int prev_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1];
2323 if (s->tab_slice_address[prev_rs] != s->sh.slice_addr) {
2324 av_log(s->avctx, AV_LOG_ERROR, "Previous slice segment missing\n");
2325 return AVERROR_INVALIDDATA;
2329 while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2330 int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2332 x_ctb = (ctb_addr_rs % ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2333 y_ctb = (ctb_addr_rs / ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2334 hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
2336 ff_hevc_cabac_init(s, ctb_addr_ts);
2338 hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2340 s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset;
2341 s->deblock[ctb_addr_rs].tc_offset = s->sh.tc_offset;
2342 s->filter_slice_edges[ctb_addr_rs] = s->sh.slice_loop_filter_across_slices_enabled_flag;
2344 more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2345 if (more_data < 0) {
2346 s->tab_slice_address[ctb_addr_rs] = -1;
2352 ff_hevc_save_states(s, ctb_addr_ts);
2353 ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
2356 if (x_ctb + ctb_size >= s->ps.sps->width &&
2357 y_ctb + ctb_size >= s->ps.sps->height)
2358 ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
2363 static int hls_slice_data(HEVCContext *s)
2371 s->avctx->execute(s->avctx, hls_decode_entry, arg, ret , 1, sizeof(int));
2374 static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
2376 HEVCContext *s1 = avctxt->priv_data, *s;
2377 HEVCLocalContext *lc;
2378 int ctb_size = 1<< s1->ps.sps->log2_ctb_size;
2380 int *ctb_row_p = input_ctb_row;
2381 int ctb_row = ctb_row_p[job];
2382 int ctb_addr_rs = s1->sh.slice_ctb_addr_rs + ctb_row * ((s1->ps.sps->width + ctb_size - 1) >> s1->ps.sps->log2_ctb_size);
2383 int ctb_addr_ts = s1->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
2384 int thread = ctb_row % s1->threads_number;
2387 s = s1->sList[self_id];
2391 ret = init_get_bits8(&lc->gb, s->data + s->sh.offset[ctb_row - 1], s->sh.size[ctb_row - 1]);
2395 ff_init_cabac_decoder(&lc->cc, s->data + s->sh.offset[(ctb_row)-1], s->sh.size[ctb_row - 1]);
2398 while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2399 int x_ctb = (ctb_addr_rs % s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2400 int y_ctb = (ctb_addr_rs / s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2402 hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
2404 ff_thread_await_progress2(s->avctx, ctb_row, thread, SHIFT_CTB_WPP);
2406 if (avpriv_atomic_int_get(&s1->wpp_err)){
2407 ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2411 ff_hevc_cabac_init(s, ctb_addr_ts);
2412 hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2413 more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2415 if (more_data < 0) {
2416 s->tab_slice_address[ctb_addr_rs] = -1;
2417 avpriv_atomic_int_set(&s1->wpp_err, 1);
2418 ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2424 ff_hevc_save_states(s, ctb_addr_ts);
2425 ff_thread_report_progress2(s->avctx, ctb_row, thread, 1);
2426 ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
2428 if (!more_data && (x_ctb+ctb_size) < s->ps.sps->width && ctb_row != s->sh.num_entry_point_offsets) {
2429 avpriv_atomic_int_set(&s1->wpp_err, 1);
2430 ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2434 if ((x_ctb+ctb_size) >= s->ps.sps->width && (y_ctb+ctb_size) >= s->ps.sps->height ) {
2435 ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
2436 ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2439 ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2442 if(x_ctb >= s->ps.sps->width) {
2446 ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2451 static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
2453 const uint8_t *data = nal->data;
2454 int length = nal->size;
2455 HEVCLocalContext *lc = s->HEVClc;
2456 int *ret = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
2457 int *arg = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
2459 int64_t startheader, cmpt = 0;
2465 return AVERROR(ENOMEM);
2468 if (s->sh.slice_ctb_addr_rs + s->sh.num_entry_point_offsets * s->ps.sps->ctb_width >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
2469 av_log(s->avctx, AV_LOG_ERROR, "WPP ctb addresses are wrong (%d %d %d %d)\n",
2470 s->sh.slice_ctb_addr_rs, s->sh.num_entry_point_offsets,
2471 s->ps.sps->ctb_width, s->ps.sps->ctb_height
2473 res = AVERROR_INVALIDDATA;
2477 ff_alloc_entries(s->avctx, s->sh.num_entry_point_offsets + 1);
2480 for (i = 1; i < s->threads_number; i++) {
2481 s->sList[i] = av_malloc(sizeof(HEVCContext));
2482 memcpy(s->sList[i], s, sizeof(HEVCContext));
2483 s->HEVClcList[i] = av_mallocz(sizeof(HEVCLocalContext));
2484 s->sList[i]->HEVClc = s->HEVClcList[i];
2488 offset = (lc->gb.index >> 3);
2490 for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[0]; j < nal->skipped_bytes; j++) {
2491 if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2497 for (i = 1; i < s->sh.num_entry_point_offsets; i++) {
2498 offset += (s->sh.entry_point_offset[i - 1] - cmpt);
2499 for (j = 0, cmpt = 0, startheader = offset
2500 + s->sh.entry_point_offset[i]; j < nal->skipped_bytes; j++) {
2501 if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2506 s->sh.size[i - 1] = s->sh.entry_point_offset[i] - cmpt;
2507 s->sh.offset[i - 1] = offset;
2510 if (s->sh.num_entry_point_offsets != 0) {
2511 offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt;
2512 if (length < offset) {
2513 av_log(s->avctx, AV_LOG_ERROR, "entry_point_offset table is corrupted\n");
2514 res = AVERROR_INVALIDDATA;
2517 s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset;
2518 s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset;
2523 for (i = 1; i < s->threads_number; i++) {
2524 s->sList[i]->HEVClc->first_qp_group = 1;
2525 s->sList[i]->HEVClc->qp_y = s->sList[0]->HEVClc->qp_y;
2526 memcpy(s->sList[i], s, sizeof(HEVCContext));
2527 s->sList[i]->HEVClc = s->HEVClcList[i];
2530 avpriv_atomic_int_set(&s->wpp_err, 0);
2531 ff_reset_entries(s->avctx);
2533 for (i = 0; i <= s->sh.num_entry_point_offsets; i++) {
2538 if (s->ps.pps->entropy_coding_sync_enabled_flag)
2539 s->avctx->execute2(s->avctx, hls_decode_entry_wpp, arg, ret, s->sh.num_entry_point_offsets + 1);
2541 for (i = 0; i <= s->sh.num_entry_point_offsets; i++)
2549 static int set_side_data(HEVCContext *s)
2551 AVFrame *out = s->ref->frame;
2553 if (s->sei_frame_packing_present &&
2554 s->frame_packing_arrangement_type >= 3 &&
2555 s->frame_packing_arrangement_type <= 5 &&
2556 s->content_interpretation_type > 0 &&
2557 s->content_interpretation_type < 3) {
2558 AVStereo3D *stereo = av_stereo3d_create_side_data(out);
2560 return AVERROR(ENOMEM);
2562 switch (s->frame_packing_arrangement_type) {
2564 if (s->quincunx_subsampling)
2565 stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
2567 stereo->type = AV_STEREO3D_SIDEBYSIDE;
2570 stereo->type = AV_STEREO3D_TOPBOTTOM;
2573 stereo->type = AV_STEREO3D_FRAMESEQUENCE;
2577 if (s->content_interpretation_type == 2)
2578 stereo->flags = AV_STEREO3D_FLAG_INVERT;
2581 if (s->sei_display_orientation_present &&
2582 (s->sei_anticlockwise_rotation || s->sei_hflip || s->sei_vflip)) {
2583 double angle = s->sei_anticlockwise_rotation * 360 / (double) (1 << 16);
2584 AVFrameSideData *rotation = av_frame_new_side_data(out,
2585 AV_FRAME_DATA_DISPLAYMATRIX,
2586 sizeof(int32_t) * 9);
2588 return AVERROR(ENOMEM);
2590 av_display_rotation_set((int32_t *)rotation->data, angle);
2591 av_display_matrix_flip((int32_t *)rotation->data,
2592 s->sei_hflip, s->sei_vflip);
2595 // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2596 // so the side data persists for the entire coded video sequence.
2597 if (s->sei_mastering_display_info_present > 0 &&
2598 IS_IRAP(s) && s->no_rasl_output_flag) {
2599 s->sei_mastering_display_info_present--;
2601 if (s->sei_mastering_display_info_present) {
2602 // HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b
2603 const int mapping[3] = {2, 0, 1};
2604 const int chroma_den = 50000;
2605 const int luma_den = 10000;
2607 AVMasteringDisplayMetadata *metadata =
2608 av_mastering_display_metadata_create_side_data(out);
2610 return AVERROR(ENOMEM);
2612 for (i = 0; i < 3; i++) {
2613 const int j = mapping[i];
2614 metadata->display_primaries[i][0].num = s->display_primaries[j][0];
2615 metadata->display_primaries[i][0].den = chroma_den;
2616 metadata->display_primaries[i][1].num = s->display_primaries[j][1];
2617 metadata->display_primaries[i][1].den = chroma_den;
2619 metadata->white_point[0].num = s->white_point[0];
2620 metadata->white_point[0].den = chroma_den;
2621 metadata->white_point[1].num = s->white_point[1];
2622 metadata->white_point[1].den = chroma_den;
2624 metadata->max_luminance.num = s->max_mastering_luminance;
2625 metadata->max_luminance.den = luma_den;
2626 metadata->min_luminance.num = s->min_mastering_luminance;
2627 metadata->min_luminance.den = luma_den;
2628 metadata->has_luminance = 1;
2629 metadata->has_primaries = 1;
2631 av_log(s->avctx, AV_LOG_DEBUG, "Mastering Display Metadata:\n");
2632 av_log(s->avctx, AV_LOG_DEBUG,
2633 "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
2634 av_q2d(metadata->display_primaries[0][0]),
2635 av_q2d(metadata->display_primaries[0][1]),
2636 av_q2d(metadata->display_primaries[1][0]),
2637 av_q2d(metadata->display_primaries[1][1]),
2638 av_q2d(metadata->display_primaries[2][0]),
2639 av_q2d(metadata->display_primaries[2][1]),
2640 av_q2d(metadata->white_point[0]), av_q2d(metadata->white_point[1]));
2641 av_log(s->avctx, AV_LOG_DEBUG,
2642 "min_luminance=%f, max_luminance=%f\n",
2643 av_q2d(metadata->min_luminance), av_q2d(metadata->max_luminance));
2646 if (s->a53_caption) {
2647 AVFrameSideData* sd = av_frame_new_side_data(out,
2648 AV_FRAME_DATA_A53_CC,
2649 s->a53_caption_size);
2651 memcpy(sd->data, s->a53_caption, s->a53_caption_size);
2652 av_freep(&s->a53_caption);
2653 s->a53_caption_size = 0;
2654 s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
2660 static int hevc_frame_start(HEVCContext *s)
2662 HEVCLocalContext *lc = s->HEVClc;
2663 int pic_size_in_ctb = ((s->ps.sps->width >> s->ps.sps->log2_min_cb_size) + 1) *
2664 ((s->ps.sps->height >> s->ps.sps->log2_min_cb_size) + 1);
2667 memset(s->horizontal_bs, 0, s->bs_width * s->bs_height);
2668 memset(s->vertical_bs, 0, s->bs_width * s->bs_height);
2669 memset(s->cbf_luma, 0, s->ps.sps->min_tb_width * s->ps.sps->min_tb_height);
2670 memset(s->is_pcm, 0, (s->ps.sps->min_pu_width + 1) * (s->ps.sps->min_pu_height + 1));
2671 memset(s->tab_slice_address, -1, pic_size_in_ctb * sizeof(*s->tab_slice_address));
2674 s->first_nal_type = s->nal_unit_type;
2676 if (s->ps.pps->tiles_enabled_flag)
2677 lc->end_of_tiles_x = s->ps.pps->column_width[0] << s->ps.sps->log2_ctb_size;
2679 ret = ff_hevc_set_new_ref(s, &s->frame, s->poc);
2683 ret = ff_hevc_frame_rps(s);
2685 av_log(s->avctx, AV_LOG_ERROR, "Error constructing the frame RPS.\n");
2689 s->ref->frame->key_frame = IS_IRAP(s);
2691 ret = set_side_data(s);
2695 s->frame->pict_type = 3 - s->sh.slice_type;
2698 ff_hevc_bump_frame(s);
2700 av_frame_unref(s->output_frame);
2701 ret = ff_hevc_output_frame(s, s->output_frame, 0);
2705 if (!s->avctx->hwaccel)
2706 ff_thread_finish_setup(s->avctx);
2712 ff_hevc_unref_frame(s, s->ref, ~0);
2717 static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
2719 HEVCLocalContext *lc = s->HEVClc;
2720 GetBitContext *gb = &lc->gb;
2721 int ctb_addr_ts, ret;
2724 s->nal_unit_type = nal->type;
2725 s->temporal_id = nal->temporal_id;
2727 switch (s->nal_unit_type) {
2729 ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps);
2734 ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps,
2735 s->apply_defdispwin);
2740 ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps);
2744 case NAL_SEI_PREFIX:
2745 case NAL_SEI_SUFFIX:
2746 ret = ff_hevc_decode_nal_sei(s);
2757 case NAL_BLA_W_RADL:
2759 case NAL_IDR_W_RADL:
2766 ret = hls_slice_header(s);
2770 if (s->max_ra == INT_MAX) {
2771 if (s->nal_unit_type == NAL_CRA_NUT || IS_BLA(s)) {
2775 s->max_ra = INT_MIN;
2779 if ((s->nal_unit_type == NAL_RASL_R || s->nal_unit_type == NAL_RASL_N) &&
2780 s->poc <= s->max_ra) {
2784 if (s->nal_unit_type == NAL_RASL_R && s->poc > s->max_ra)
2785 s->max_ra = INT_MIN;
2788 if (s->sh.first_slice_in_pic_flag) {
2789 ret = hevc_frame_start(s);
2792 } else if (!s->ref) {
2793 av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
2797 if (s->nal_unit_type != s->first_nal_type) {
2798 av_log(s->avctx, AV_LOG_ERROR,
2799 "Non-matching NAL types of the VCL NALUs: %d %d\n",
2800 s->first_nal_type, s->nal_unit_type);
2801 return AVERROR_INVALIDDATA;
2804 if (!s->sh.dependent_slice_segment_flag &&
2805 s->sh.slice_type != I_SLICE) {
2806 ret = ff_hevc_slice_rpl(s);
2808 av_log(s->avctx, AV_LOG_WARNING,
2809 "Error constructing the reference lists for the current slice.\n");
2814 if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) {
2815 ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0);
2820 if (s->avctx->hwaccel) {
2821 ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size);
2825 if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0)
2826 ctb_addr_ts = hls_slice_data_wpp(s, nal);
2828 ctb_addr_ts = hls_slice_data(s);
2829 if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) {
2833 if (ctb_addr_ts < 0) {
2841 s->seq_decode = (s->seq_decode + 1) & 0xff;
2842 s->max_ra = INT_MAX;
2848 av_log(s->avctx, AV_LOG_INFO,
2849 "Skipping NAL unit %d\n", s->nal_unit_type);
2854 if (s->avctx->err_recognition & AV_EF_EXPLODE)
2859 static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
2864 s->last_eos = s->eos;
2867 /* split the input packet into NAL units, so we know the upper bound on the
2868 * number of slices in the frame */
2869 ret = ff_h2645_packet_split(&s->pkt, buf, length, s->avctx, s->is_nalff,
2870 s->nal_length_size, s->avctx->codec_id);
2872 av_log(s->avctx, AV_LOG_ERROR,
2873 "Error splitting the input into NAL units.\n");
2877 for (i = 0; i < s->pkt.nb_nals; i++) {
2878 if (s->pkt.nals[i].type == NAL_EOB_NUT ||
2879 s->pkt.nals[i].type == NAL_EOS_NUT)
2883 /* decode the NAL units */
2884 for (i = 0; i < s->pkt.nb_nals; i++) {
2885 ret = decode_nal_unit(s, &s->pkt.nals[i]);
2887 av_log(s->avctx, AV_LOG_WARNING,
2888 "Error parsing NAL unit #%d.\n", i);
2894 if (s->ref && s->threads_type == FF_THREAD_FRAME)
2895 ff_thread_report_progress(&s->ref->tf, INT_MAX, 0);
2900 static void print_md5(void *log_ctx, int level, uint8_t md5[16])
2903 for (i = 0; i < 16; i++)
2904 av_log(log_ctx, level, "%02"PRIx8, md5[i]);
2907 static int verify_md5(HEVCContext *s, AVFrame *frame)
2909 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
2914 return AVERROR(EINVAL);
2916 pixel_shift = desc->comp[0].depth > 8;
2918 av_log(s->avctx, AV_LOG_DEBUG, "Verifying checksum for frame with POC %d: ",
2921 /* the checksums are LE, so we have to byteswap for >8bpp formats
2924 if (pixel_shift && !s->checksum_buf) {
2925 av_fast_malloc(&s->checksum_buf, &s->checksum_buf_size,
2926 FFMAX3(frame->linesize[0], frame->linesize[1],
2927 frame->linesize[2]));
2928 if (!s->checksum_buf)
2929 return AVERROR(ENOMEM);
2933 for (i = 0; frame->data[i]; i++) {
2934 int width = s->avctx->coded_width;
2935 int height = s->avctx->coded_height;
2936 int w = (i == 1 || i == 2) ? (width >> desc->log2_chroma_w) : width;
2937 int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height;
2940 av_md5_init(s->md5_ctx);
2941 for (j = 0; j < h; j++) {
2942 const uint8_t *src = frame->data[i] + j * frame->linesize[i];
2945 s->bdsp.bswap16_buf((uint16_t *) s->checksum_buf,
2946 (const uint16_t *) src, w);
2947 src = s->checksum_buf;
2950 av_md5_update(s->md5_ctx, src, w << pixel_shift);
2952 av_md5_final(s->md5_ctx, md5);
2954 if (!memcmp(md5, s->md5[i], 16)) {
2955 av_log (s->avctx, AV_LOG_DEBUG, "plane %d - correct ", i);
2956 print_md5(s->avctx, AV_LOG_DEBUG, md5);
2957 av_log (s->avctx, AV_LOG_DEBUG, "; ");
2959 av_log (s->avctx, AV_LOG_ERROR, "mismatching checksum of plane %d - ", i);
2960 print_md5(s->avctx, AV_LOG_ERROR, md5);
2961 av_log (s->avctx, AV_LOG_ERROR, " != ");
2962 print_md5(s->avctx, AV_LOG_ERROR, s->md5[i]);
2963 av_log (s->avctx, AV_LOG_ERROR, "\n");
2964 return AVERROR_INVALIDDATA;
2968 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2973 static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output,
2977 HEVCContext *s = avctx->priv_data;
2980 ret = ff_hevc_output_frame(s, data, 1);
2989 ret = decode_nal_units(s, avpkt->data, avpkt->size);
2993 if (avctx->hwaccel) {
2994 if (s->ref && (ret = avctx->hwaccel->end_frame(avctx)) < 0) {
2995 av_log(avctx, AV_LOG_ERROR,
2996 "hardware accelerator failed to decode picture\n");
2997 ff_hevc_unref_frame(s, s->ref, ~0);
3001 /* verify the SEI checksum */
3002 if (avctx->err_recognition & AV_EF_CRCCHECK && s->is_decoded &&
3004 ret = verify_md5(s, s->ref->frame);
3005 if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) {
3006 ff_hevc_unref_frame(s, s->ref, ~0);
3013 if (s->is_decoded) {
3014 av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc);
3018 if (s->output_frame->buf[0]) {
3019 av_frame_move_ref(data, s->output_frame);
3026 static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
3030 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
3034 dst->tab_mvf_buf = av_buffer_ref(src->tab_mvf_buf);
3035 if (!dst->tab_mvf_buf)
3037 dst->tab_mvf = src->tab_mvf;
3039 dst->rpl_tab_buf = av_buffer_ref(src->rpl_tab_buf);
3040 if (!dst->rpl_tab_buf)
3042 dst->rpl_tab = src->rpl_tab;
3044 dst->rpl_buf = av_buffer_ref(src->rpl_buf);
3048 dst->poc = src->poc;
3049 dst->ctb_count = src->ctb_count;
3050 dst->window = src->window;
3051 dst->flags = src->flags;
3052 dst->sequence = src->sequence;
3054 if (src->hwaccel_picture_private) {
3055 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
3056 if (!dst->hwaccel_priv_buf)
3058 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
3063 ff_hevc_unref_frame(s, dst, ~0);
3064 return AVERROR(ENOMEM);
3067 static av_cold int hevc_decode_free(AVCodecContext *avctx)
3069 HEVCContext *s = avctx->priv_data;
3074 av_freep(&s->md5_ctx);
3076 av_freep(&s->cabac_state);
3078 for (i = 0; i < 3; i++) {
3079 av_freep(&s->sao_pixel_buffer_h[i]);
3080 av_freep(&s->sao_pixel_buffer_v[i]);
3082 av_frame_free(&s->output_frame);
3084 for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3085 ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3086 av_frame_free(&s->DPB[i].frame);
3089 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++)
3090 av_buffer_unref(&s->ps.vps_list[i]);
3091 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++)
3092 av_buffer_unref(&s->ps.sps_list[i]);
3093 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++)
3094 av_buffer_unref(&s->ps.pps_list[i]);
3099 av_freep(&s->sh.entry_point_offset);
3100 av_freep(&s->sh.offset);
3101 av_freep(&s->sh.size);
3103 for (i = 1; i < s->threads_number; i++) {
3104 HEVCLocalContext *lc = s->HEVClcList[i];
3106 av_freep(&s->HEVClcList[i]);
3107 av_freep(&s->sList[i]);
3110 if (s->HEVClc == s->HEVClcList[0])
3112 av_freep(&s->HEVClcList[0]);
3114 ff_h2645_packet_uninit(&s->pkt);
3119 static av_cold int hevc_init_context(AVCodecContext *avctx)
3121 HEVCContext *s = avctx->priv_data;
3126 s->HEVClc = av_mallocz(sizeof(HEVCLocalContext));
3129 s->HEVClcList[0] = s->HEVClc;
3132 s->cabac_state = av_malloc(HEVC_CONTEXTS);
3133 if (!s->cabac_state)
3136 s->output_frame = av_frame_alloc();
3137 if (!s->output_frame)
3140 for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3141 s->DPB[i].frame = av_frame_alloc();
3142 if (!s->DPB[i].frame)
3144 s->DPB[i].tf.f = s->DPB[i].frame;
3147 s->max_ra = INT_MAX;
3149 s->md5_ctx = av_md5_alloc();
3153 ff_bswapdsp_init(&s->bdsp);
3155 s->context_initialized = 1;
3158 ff_hevc_reset_sei(s);
3163 hevc_decode_free(avctx);
3164 return AVERROR(ENOMEM);
3167 static int hevc_update_thread_context(AVCodecContext *dst,
3168 const AVCodecContext *src)
3170 HEVCContext *s = dst->priv_data;
3171 HEVCContext *s0 = src->priv_data;
3174 if (!s->context_initialized) {
3175 ret = hevc_init_context(dst);
3180 for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3181 ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3182 if (s0->DPB[i].frame->buf[0]) {
3183 ret = hevc_ref_frame(s, &s->DPB[i], &s0->DPB[i]);
3189 if (s->ps.sps != s0->ps.sps)
3191 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++) {
3192 av_buffer_unref(&s->ps.vps_list[i]);
3193 if (s0->ps.vps_list[i]) {
3194 s->ps.vps_list[i] = av_buffer_ref(s0->ps.vps_list[i]);
3195 if (!s->ps.vps_list[i])
3196 return AVERROR(ENOMEM);
3200 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3201 av_buffer_unref(&s->ps.sps_list[i]);
3202 if (s0->ps.sps_list[i]) {
3203 s->ps.sps_list[i] = av_buffer_ref(s0->ps.sps_list[i]);
3204 if (!s->ps.sps_list[i])
3205 return AVERROR(ENOMEM);
3209 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++) {
3210 av_buffer_unref(&s->ps.pps_list[i]);
3211 if (s0->ps.pps_list[i]) {
3212 s->ps.pps_list[i] = av_buffer_ref(s0->ps.pps_list[i]);
3213 if (!s->ps.pps_list[i])
3214 return AVERROR(ENOMEM);
3218 if (s->ps.sps != s0->ps.sps)
3219 if ((ret = set_sps(s, s0->ps.sps, src->pix_fmt)) < 0)
3222 s->seq_decode = s0->seq_decode;
3223 s->seq_output = s0->seq_output;
3224 s->pocTid0 = s0->pocTid0;
3225 s->max_ra = s0->max_ra;
3227 s->no_rasl_output_flag = s0->no_rasl_output_flag;
3229 s->is_nalff = s0->is_nalff;
3230 s->nal_length_size = s0->nal_length_size;
3232 s->threads_number = s0->threads_number;
3233 s->threads_type = s0->threads_type;
3236 s->seq_decode = (s->seq_decode + 1) & 0xff;
3237 s->max_ra = INT_MAX;
3243 static int hevc_decode_extradata(HEVCContext *s)
3245 AVCodecContext *avctx = s->avctx;
3249 bytestream2_init(&gb, avctx->extradata, avctx->extradata_size);
3251 if (avctx->extradata_size > 3 &&
3252 (avctx->extradata[0] || avctx->extradata[1] ||
3253 avctx->extradata[2] > 1)) {
3254 /* It seems the extradata is encoded as hvcC format.
3255 * Temporarily, we support configurationVersion==0 until 14496-15 3rd
3256 * is finalized. When finalized, configurationVersion will be 1 and we
3257 * can recognize hvcC by checking if avctx->extradata[0]==1 or not. */
3258 int i, j, num_arrays, nal_len_size;
3262 bytestream2_skip(&gb, 21);
3263 nal_len_size = (bytestream2_get_byte(&gb) & 3) + 1;
3264 num_arrays = bytestream2_get_byte(&gb);
3266 /* nal units in the hvcC always have length coded with 2 bytes,
3267 * so put a fake nal_length_size = 2 while parsing them */
3268 s->nal_length_size = 2;
3270 /* Decode nal units from hvcC. */
3271 for (i = 0; i < num_arrays; i++) {
3272 int type = bytestream2_get_byte(&gb) & 0x3f;
3273 int cnt = bytestream2_get_be16(&gb);
3275 for (j = 0; j < cnt; j++) {
3276 // +2 for the nal size field
3277 int nalsize = bytestream2_peek_be16(&gb) + 2;
3278 if (bytestream2_get_bytes_left(&gb) < nalsize) {
3279 av_log(s->avctx, AV_LOG_ERROR,
3280 "Invalid NAL unit size in extradata.\n");
3281 return AVERROR_INVALIDDATA;
3284 ret = decode_nal_units(s, gb.buffer, nalsize);
3286 av_log(avctx, AV_LOG_ERROR,
3287 "Decoding nal unit %d %d from hvcC failed\n",
3291 bytestream2_skip(&gb, nalsize);
3295 /* Now store right nal length size, that will be used to parse
3297 s->nal_length_size = nal_len_size;
3300 ret = decode_nal_units(s, avctx->extradata, avctx->extradata_size);
3305 /* export stream parameters from the first SPS */
3306 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3307 if (s->ps.sps_list[i]) {
3308 const HEVCSPS *sps = (const HEVCSPS*)s->ps.sps_list[i]->data;
3309 export_stream_params(s->avctx, &s->ps, sps);
3317 static av_cold int hevc_decode_init(AVCodecContext *avctx)
3319 HEVCContext *s = avctx->priv_data;
3322 avctx->internal->allocate_progress = 1;
3324 ret = hevc_init_context(avctx);
3328 s->enable_parallel_tiles = 0;
3329 s->picture_struct = 0;
3332 if(avctx->active_thread_type & FF_THREAD_SLICE)
3333 s->threads_number = avctx->thread_count;
3335 s->threads_number = 1;
3337 if (avctx->extradata_size > 0 && avctx->extradata) {
3338 ret = hevc_decode_extradata(s);
3340 hevc_decode_free(avctx);
3345 if((avctx->active_thread_type & FF_THREAD_FRAME) && avctx->thread_count > 1)
3346 s->threads_type = FF_THREAD_FRAME;
3348 s->threads_type = FF_THREAD_SLICE;
3353 static av_cold int hevc_init_thread_copy(AVCodecContext *avctx)
3355 HEVCContext *s = avctx->priv_data;
3358 memset(s, 0, sizeof(*s));
3360 ret = hevc_init_context(avctx);
3367 static void hevc_decode_flush(AVCodecContext *avctx)
3369 HEVCContext *s = avctx->priv_data;
3370 ff_hevc_flush_dpb(s);
3371 s->max_ra = INT_MAX;
3375 #define OFFSET(x) offsetof(HEVCContext, x)
3376 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
3378 static const AVOption options[] = {
3379 { "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin),
3380 AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3381 { "strict-displaywin", "stricly apply default display window size", OFFSET(apply_defdispwin),
3382 AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3386 static const AVClass hevc_decoder_class = {
3387 .class_name = "HEVC decoder",
3388 .item_name = av_default_item_name,
3390 .version = LIBAVUTIL_VERSION_INT,
3393 AVCodec ff_hevc_decoder = {
3395 .long_name = NULL_IF_CONFIG_SMALL("HEVC (High Efficiency Video Coding)"),
3396 .type = AVMEDIA_TYPE_VIDEO,
3397 .id = AV_CODEC_ID_HEVC,
3398 .priv_data_size = sizeof(HEVCContext),
3399 .priv_class = &hevc_decoder_class,
3400 .init = hevc_decode_init,
3401 .close = hevc_decode_free,
3402 .decode = hevc_decode_frame,
3403 .flush = hevc_decode_flush,
3404 .update_thread_context = hevc_update_thread_context,
3405 .init_thread_copy = hevc_init_thread_copy,
3406 .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
3407 AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS,
3408 .profiles = NULL_IF_CONFIG_SMALL(ff_hevc_profiles),