4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libavutil/pixdesc.h"
24 #include "bytestream.h"
29 static uint32_t inverse_recenter(int r, uint32_t v)
34 return r - ((v + 1) >> 1);
39 static uint32_t decode_unsigned_subexp_with_ref(uint32_t sub_exp,
43 return inverse_recenter(r, sub_exp);
45 return mx - 1 - inverse_recenter(mx - 1 - r, sub_exp);
49 static int32_t decode_signed_subexp_with_ref(uint32_t sub_exp, int low,
52 int32_t x = decode_unsigned_subexp_with_ref(sub_exp, high - low, r - low);
56 static void read_global_param(AV1DecContext *s, int type, int ref, int idx)
58 uint8_t primary_frame, prev_frame;
59 uint32_t abs_bits, prec_bits, round, prec_diff, sub, mx;
60 int32_t r, prev_gm_param;
62 primary_frame = s->raw_frame_header->primary_ref_frame;
63 prev_frame = s->raw_frame_header->ref_frame_idx[primary_frame];
64 abs_bits = AV1_GM_ABS_ALPHA_BITS;
65 prec_bits = AV1_GM_ALPHA_PREC_BITS;
67 /* setup_past_independence() sets PrevGmParams to default values. We can
68 * simply point to the current's frame gm_params as they will be initialized
69 * with defaults at this point.
71 if (s->raw_frame_header->primary_ref_frame == AV1_PRIMARY_REF_NONE)
72 prev_gm_param = s->cur_frame.gm_params[ref][idx];
74 prev_gm_param = s->ref[prev_frame].gm_params[ref][idx];
77 if (type == AV1_WARP_MODEL_TRANSLATION) {
78 abs_bits = AV1_GM_ABS_TRANS_ONLY_BITS -
79 !s->raw_frame_header->allow_high_precision_mv;
80 prec_bits = AV1_GM_TRANS_ONLY_PREC_BITS -
81 !s->raw_frame_header->allow_high_precision_mv;
83 abs_bits = AV1_GM_ABS_TRANS_BITS;
84 prec_bits = AV1_GM_TRANS_PREC_BITS;
87 round = (idx % 3) == 2 ? (1 << AV1_WARPEDMODEL_PREC_BITS) : 0;
88 prec_diff = AV1_WARPEDMODEL_PREC_BITS - prec_bits;
89 sub = (idx % 3) == 2 ? (1 << prec_bits) : 0;
91 r = (prev_gm_param >> prec_diff) - sub;
93 s->cur_frame.gm_params[ref][idx] =
94 (decode_signed_subexp_with_ref(s->raw_frame_header->gm_params[ref][idx],
95 -mx, mx + 1, r) << prec_diff) + round;
99 * update gm type/params, since cbs already implemented part of this funcation,
100 * so we don't need to full implement spec.
102 static void global_motion_params(AV1DecContext *s)
104 const AV1RawFrameHeader *header = s->raw_frame_header;
107 for (ref = AV1_REF_FRAME_LAST; ref <= AV1_REF_FRAME_ALTREF; ref++) {
108 s->cur_frame.gm_type[ref] = AV1_WARP_MODEL_IDENTITY;
109 for (int i = 0; i < 6; i++)
110 s->cur_frame.gm_params[ref][i] = (i % 3 == 2) ?
111 1 << AV1_WARPEDMODEL_PREC_BITS : 0;
113 if (header->frame_type == AV1_FRAME_KEY ||
114 header->frame_type == AV1_FRAME_INTRA_ONLY)
117 for (ref = AV1_REF_FRAME_LAST; ref <= AV1_REF_FRAME_ALTREF; ref++) {
118 if (header->is_global[ref]) {
119 if (header->is_rot_zoom[ref]) {
120 type = AV1_WARP_MODEL_ROTZOOM;
122 type = header->is_translation[ref] ? AV1_WARP_MODEL_TRANSLATION
123 : AV1_WARP_MODEL_AFFINE;
126 type = AV1_WARP_MODEL_IDENTITY;
128 s->cur_frame.gm_type[ref] = type;
130 if (type >= AV1_WARP_MODEL_ROTZOOM) {
131 read_global_param(s, type, ref, 2);
132 read_global_param(s, type, ref, 3);
133 if (type == AV1_WARP_MODEL_AFFINE) {
134 read_global_param(s, type, ref, 4);
135 read_global_param(s, type, ref, 5);
137 s->cur_frame.gm_params[ref][4] = -s->cur_frame.gm_params[ref][3];
138 s->cur_frame.gm_params[ref][5] = s->cur_frame.gm_params[ref][2];
141 if (type >= AV1_WARP_MODEL_TRANSLATION) {
142 read_global_param(s, type, ref, 0);
143 read_global_param(s, type, ref, 1);
148 static int get_relative_dist(const AV1RawSequenceHeader *seq,
149 unsigned int a, unsigned int b)
151 unsigned int diff = a - b;
152 unsigned int m = 1 << seq->order_hint_bits_minus_1;
153 return (diff & (m - 1)) - (diff & m);
156 static void skip_mode_params(AV1DecContext *s)
158 const AV1RawFrameHeader *header = s->raw_frame_header;
159 const AV1RawSequenceHeader *seq = s->raw_seq;
161 int forward_idx, backward_idx;
162 int forward_hint, backward_hint;
163 int second_forward_idx, second_forward_hint;
164 int ref_hint, dist, i;
166 if (!header->skip_mode_present)
171 for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
172 ref_hint = s->ref[header->ref_frame_idx[i]].raw_frame_header->order_hint;
173 dist = get_relative_dist(seq, ref_hint, header->order_hint);
175 if (forward_idx < 0 ||
176 get_relative_dist(seq, ref_hint, forward_hint) > 0) {
178 forward_hint = ref_hint;
180 } else if (dist > 0) {
181 if (backward_idx < 0 ||
182 get_relative_dist(seq, ref_hint, backward_hint) < 0) {
184 backward_hint = ref_hint;
189 if (forward_idx < 0) {
191 } else if (backward_idx >= 0) {
192 s->cur_frame.skip_mode_frame_idx[0] =
193 AV1_REF_FRAME_LAST + FFMIN(forward_idx, backward_idx);
194 s->cur_frame.skip_mode_frame_idx[1] =
195 AV1_REF_FRAME_LAST + FFMAX(forward_idx, backward_idx);
199 second_forward_idx = -1;
200 for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
201 ref_hint = s->ref[header->ref_frame_idx[i]].raw_frame_header->order_hint;
202 if (get_relative_dist(seq, ref_hint, forward_hint) < 0) {
203 if (second_forward_idx < 0 ||
204 get_relative_dist(seq, ref_hint, second_forward_hint) > 0) {
205 second_forward_idx = i;
206 second_forward_hint = ref_hint;
211 if (second_forward_idx < 0)
214 s->cur_frame.skip_mode_frame_idx[0] =
215 AV1_REF_FRAME_LAST + FFMIN(forward_idx, second_forward_idx);
216 s->cur_frame.skip_mode_frame_idx[1] =
217 AV1_REF_FRAME_LAST + FFMAX(forward_idx, second_forward_idx);
220 static void coded_lossless_param(AV1DecContext *s)
222 const AV1RawFrameHeader *header = s->raw_frame_header;
225 if (header->delta_q_y_dc || header->delta_q_u_ac ||
226 header->delta_q_u_dc || header->delta_q_v_ac ||
227 header->delta_q_v_dc) {
228 s->cur_frame.coded_lossless = 0;
232 s->cur_frame.coded_lossless = 1;
233 for (i = 0; i < AV1_MAX_SEGMENTS; i++) {
235 if (header->feature_enabled[i][AV1_SEG_LVL_ALT_Q]) {
236 qindex = (header->base_q_idx +
237 header->feature_value[i][AV1_SEG_LVL_ALT_Q]);
239 qindex = header->base_q_idx;
241 qindex = av_clip_uintp2(qindex, 8);
244 s->cur_frame.coded_lossless = 0;
250 static int init_tile_data(AV1DecContext *s)
254 s->raw_frame_header->tile_cols * s->raw_frame_header->tile_rows;
255 if (s->tile_num < cur_tile_num) {
256 int ret = av_reallocp_array(&s->tile_group_info, cur_tile_num,
257 sizeof(TileGroupInfo));
263 s->tile_num = cur_tile_num;
268 static int get_tiles_info(AVCodecContext *avctx, const AV1RawTileGroup *tile_group)
270 AV1DecContext *s = avctx->priv_data;
272 uint16_t tile_num, tile_row, tile_col;
273 uint32_t size = 0, size_bytes = 0;
275 bytestream2_init(&gb, tile_group->tile_data.data,
276 tile_group->tile_data.data_size);
277 s->tg_start = tile_group->tg_start;
278 s->tg_end = tile_group->tg_end;
280 for (tile_num = tile_group->tg_start; tile_num <= tile_group->tg_end; tile_num++) {
281 tile_row = tile_num / s->raw_frame_header->tile_cols;
282 tile_col = tile_num % s->raw_frame_header->tile_cols;
284 if (tile_num == tile_group->tg_end) {
285 s->tile_group_info[tile_num].tile_size = bytestream2_get_bytes_left(&gb);
286 s->tile_group_info[tile_num].tile_offset = bytestream2_tell(&gb);
287 s->tile_group_info[tile_num].tile_row = tile_row;
288 s->tile_group_info[tile_num].tile_column = tile_col;
291 size_bytes = s->raw_frame_header->tile_size_bytes_minus1 + 1;
292 if (bytestream2_get_bytes_left(&gb) < size_bytes)
293 return AVERROR_INVALIDDATA;
295 for (int i = 0; i < size_bytes; i++)
296 size |= bytestream2_get_byteu(&gb) << 8 * i;
297 if (bytestream2_get_bytes_left(&gb) <= size)
298 return AVERROR_INVALIDDATA;
301 s->tile_group_info[tile_num].tile_size = size;
302 s->tile_group_info[tile_num].tile_offset = bytestream2_tell(&gb);
303 s->tile_group_info[tile_num].tile_row = tile_row;
304 s->tile_group_info[tile_num].tile_column = tile_col;
306 bytestream2_skipu(&gb, size);
313 static int get_pixel_format(AVCodecContext *avctx)
315 AV1DecContext *s = avctx->priv_data;
316 const AV1RawSequenceHeader *seq = s->raw_seq;
319 enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE;
320 #define HWACCEL_MAX (CONFIG_AV1_DXVA2_HWACCEL + \
321 CONFIG_AV1_D3D11VA_HWACCEL * 2 + \
322 CONFIG_AV1_NVDEC_HWACCEL + \
323 CONFIG_AV1_VAAPI_HWACCEL)
324 enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
326 if (seq->seq_profile == 2 && seq->color_config.high_bitdepth)
327 bit_depth = seq->color_config.twelve_bit ? 12 : 10;
328 else if (seq->seq_profile <= 2)
329 bit_depth = seq->color_config.high_bitdepth ? 10 : 8;
331 av_log(avctx, AV_LOG_ERROR,
332 "Unknown AV1 profile %d.\n", seq->seq_profile);
336 if (!seq->color_config.mono_chrome) {
337 // 4:4:4 x:0 y:0, 4:2:2 x:1 y:0, 4:2:0 x:1 y:1
338 if (seq->color_config.subsampling_x == 0 &&
339 seq->color_config.subsampling_y == 0) {
341 pix_fmt = AV_PIX_FMT_YUV444P;
342 else if (bit_depth == 10)
343 pix_fmt = AV_PIX_FMT_YUV444P10;
344 else if (bit_depth == 12)
345 pix_fmt = AV_PIX_FMT_YUV444P12;
347 av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n");
348 } else if (seq->color_config.subsampling_x == 1 &&
349 seq->color_config.subsampling_y == 0) {
351 pix_fmt = AV_PIX_FMT_YUV422P;
352 else if (bit_depth == 10)
353 pix_fmt = AV_PIX_FMT_YUV422P10;
354 else if (bit_depth == 12)
355 pix_fmt = AV_PIX_FMT_YUV422P12;
357 av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n");
358 } else if (seq->color_config.subsampling_x == 1 &&
359 seq->color_config.subsampling_y == 1) {
361 pix_fmt = AV_PIX_FMT_YUV420P;
362 else if (bit_depth == 10)
363 pix_fmt = AV_PIX_FMT_YUV420P10;
364 else if (bit_depth == 12)
365 pix_fmt = AV_PIX_FMT_YUV420P12;
367 av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n");
370 if (seq->color_config.subsampling_x == 1 &&
371 seq->color_config.subsampling_y == 1)
372 pix_fmt = AV_PIX_FMT_YUV440P;
374 av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n");
377 av_log(avctx, AV_LOG_DEBUG, "AV1 decode get format: %s.\n",
378 av_get_pix_fmt_name(pix_fmt));
380 if (pix_fmt == AV_PIX_FMT_NONE)
382 s->pix_fmt = pix_fmt;
384 switch (s->pix_fmt) {
385 case AV_PIX_FMT_YUV420P:
386 #if CONFIG_AV1_DXVA2_HWACCEL
387 *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
389 #if CONFIG_AV1_D3D11VA_HWACCEL
390 *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
391 *fmtp++ = AV_PIX_FMT_D3D11;
393 #if CONFIG_AV1_NVDEC_HWACCEL
394 *fmtp++ = AV_PIX_FMT_CUDA;
396 #if CONFIG_AV1_VAAPI_HWACCEL
397 *fmtp++ = AV_PIX_FMT_VAAPI;
400 case AV_PIX_FMT_YUV420P10:
401 #if CONFIG_AV1_DXVA2_HWACCEL
402 *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
404 #if CONFIG_AV1_D3D11VA_HWACCEL
405 *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
406 *fmtp++ = AV_PIX_FMT_D3D11;
408 #if CONFIG_AV1_NVDEC_HWACCEL
409 *fmtp++ = AV_PIX_FMT_CUDA;
411 #if CONFIG_AV1_VAAPI_HWACCEL
412 *fmtp++ = AV_PIX_FMT_VAAPI;
417 *fmtp++ = s->pix_fmt;
418 *fmtp = AV_PIX_FMT_NONE;
420 ret = ff_thread_get_format(avctx, pix_fmts);
425 * check if the HW accel is inited correctly. If not, return un-implemented.
426 * Since now the av1 decoder doesn't support native decode, if it will be
427 * implemented in the future, need remove this check.
429 if (!avctx->hwaccel) {
430 av_log(avctx, AV_LOG_ERROR, "Your platform doesn't suppport"
431 " hardware accelerated AV1 decoding.\n");
432 return AVERROR(ENOSYS);
435 avctx->pix_fmt = ret;
440 static void av1_frame_unref(AVCodecContext *avctx, AV1Frame *f)
442 ff_thread_release_buffer(avctx, &f->tf);
443 av_buffer_unref(&f->hwaccel_priv_buf);
444 f->hwaccel_picture_private = NULL;
445 av_buffer_unref(&f->header_ref);
446 f->raw_frame_header = NULL;
447 f->spatial_id = f->temporal_id = 0;
448 memset(f->skip_mode_frame_idx, 0,
449 2 * sizeof(uint8_t));
450 f->coded_lossless = 0;
453 static int av1_frame_ref(AVCodecContext *avctx, AV1Frame *dst, const AV1Frame *src)
457 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
461 dst->header_ref = av_buffer_ref(src->header_ref);
462 if (!dst->header_ref)
465 dst->raw_frame_header = src->raw_frame_header;
467 if (src->hwaccel_picture_private) {
468 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
469 if (!dst->hwaccel_priv_buf)
471 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
474 dst->spatial_id = src->spatial_id;
475 dst->temporal_id = src->temporal_id;
478 AV1_NUM_REF_FRAMES * sizeof(uint8_t));
479 memcpy(dst->gm_params,
481 AV1_NUM_REF_FRAMES * 6 * sizeof(int32_t));
482 memcpy(dst->skip_mode_frame_idx,
483 src->skip_mode_frame_idx,
484 2 * sizeof(uint8_t));
485 dst->coded_lossless = src->coded_lossless;
490 av1_frame_unref(avctx, dst);
491 return AVERROR(ENOMEM);
494 static av_cold int av1_decode_free(AVCodecContext *avctx)
496 AV1DecContext *s = avctx->priv_data;
498 for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
499 av1_frame_unref(avctx, &s->ref[i]);
500 av_frame_free(&s->ref[i].tf.f);
502 av1_frame_unref(avctx, &s->cur_frame);
503 av_frame_free(&s->cur_frame.tf.f);
505 av_buffer_unref(&s->seq_ref);
506 av_buffer_unref(&s->header_ref);
507 av_freep(&s->tile_group_info);
509 ff_cbs_fragment_free(&s->current_obu);
510 ff_cbs_close(&s->cbc);
515 static int set_context_with_sequence(AVCodecContext *avctx,
516 const AV1RawSequenceHeader *seq)
518 int width = seq->max_frame_width_minus_1 + 1;
519 int height = seq->max_frame_height_minus_1 + 1;
521 avctx->profile = seq->seq_profile;
522 avctx->level = seq->seq_level_idx[0];
525 seq->color_config.color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
526 avctx->color_primaries = seq->color_config.color_primaries;
527 avctx->colorspace = seq->color_config.color_primaries;
528 avctx->color_trc = seq->color_config.transfer_characteristics;
530 switch (seq->color_config.chroma_sample_position) {
531 case AV1_CSP_VERTICAL:
532 avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
534 case AV1_CSP_COLOCATED:
535 avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
539 if (avctx->width != width || avctx->height != height) {
540 int ret = ff_set_dimensions(avctx, width, height);
544 avctx->sample_aspect_ratio = (AVRational) { 1, 1 };
546 if (seq->timing_info.num_units_in_display_tick &&
547 seq->timing_info.time_scale) {
548 av_reduce(&avctx->framerate.den, &avctx->framerate.num,
549 seq->timing_info.num_units_in_display_tick,
550 seq->timing_info.time_scale,
552 if (seq->timing_info.equal_picture_interval)
553 avctx->ticks_per_frame = seq->timing_info.num_ticks_per_picture_minus_1 + 1;
559 static int update_context_with_frame_header(AVCodecContext *avctx,
560 const AV1RawFrameHeader *header)
562 AVRational aspect_ratio;
563 int width = header->frame_width_minus_1 + 1;
564 int height = header->frame_height_minus_1 + 1;
565 int r_width = header->render_width_minus_1 + 1;
566 int r_height = header->render_height_minus_1 + 1;
569 if (avctx->width != width || avctx->height != height) {
570 ret = ff_set_dimensions(avctx, width, height);
575 av_reduce(&aspect_ratio.num, &aspect_ratio.den,
576 (int64_t)height * r_width,
577 (int64_t)width * r_height,
580 if (av_cmp_q(avctx->sample_aspect_ratio, aspect_ratio)) {
581 ret = ff_set_sar(avctx, aspect_ratio);
589 static av_cold int av1_decode_init(AVCodecContext *avctx)
591 AV1DecContext *s = avctx->priv_data;
592 AV1RawSequenceHeader *seq;
596 s->pix_fmt = AV_PIX_FMT_NONE;
598 for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
599 s->ref[i].tf.f = av_frame_alloc();
600 if (!s->ref[i].tf.f) {
601 av_log(avctx, AV_LOG_ERROR,
602 "Failed to allocate reference frame buffer %d.\n", i);
603 return AVERROR(ENOMEM);
607 s->cur_frame.tf.f = av_frame_alloc();
608 if (!s->cur_frame.tf.f) {
609 av_log(avctx, AV_LOG_ERROR,
610 "Failed to allocate current frame buffer.\n");
611 return AVERROR(ENOMEM);
614 ret = ff_cbs_init(&s->cbc, AV_CODEC_ID_AV1, avctx);
618 if (avctx->extradata && avctx->extradata_size) {
619 ret = ff_cbs_read(s->cbc, &s->current_obu, avctx->extradata,
620 avctx->extradata_size);
622 av_log(avctx, AV_LOG_WARNING, "Failed to read extradata.\n");
626 seq = ((CodedBitstreamAV1Context *)(s->cbc->priv_data))->sequence_header;
628 av_log(avctx, AV_LOG_WARNING, "No sequence header available.\n");
632 ret = set_context_with_sequence(avctx, seq);
634 av_log(avctx, AV_LOG_WARNING, "Failed to set decoder context.\n");
639 ff_cbs_fragment_reset(&s->current_obu);
645 static int av1_frame_alloc(AVCodecContext *avctx, AV1Frame *f)
647 AV1DecContext *s = avctx->priv_data;
648 AV1RawFrameHeader *header= s->raw_frame_header;
652 f->header_ref = av_buffer_ref(s->header_ref);
654 return AVERROR(ENOMEM);
656 f->raw_frame_header = s->raw_frame_header;
658 ret = update_context_with_frame_header(avctx, header);
660 av_log(avctx, AV_LOG_ERROR, "Failed to update context with frame header\n");
664 if ((ret = ff_thread_get_buffer(avctx, &f->tf, AV_GET_BUFFER_FLAG_REF)) < 0)
668 frame->key_frame = header->frame_type == AV1_FRAME_KEY;
670 switch (header->frame_type) {
672 case AV1_FRAME_INTRA_ONLY:
673 frame->pict_type = AV_PICTURE_TYPE_I;
675 case AV1_FRAME_INTER:
676 frame->pict_type = AV_PICTURE_TYPE_P;
678 case AV1_FRAME_SWITCH:
679 frame->pict_type = AV_PICTURE_TYPE_SP;
683 if (avctx->hwaccel) {
684 const AVHWAccel *hwaccel = avctx->hwaccel;
685 if (hwaccel->frame_priv_data_size) {
686 f->hwaccel_priv_buf =
687 av_buffer_allocz(hwaccel->frame_priv_data_size);
688 if (!f->hwaccel_priv_buf)
690 f->hwaccel_picture_private = f->hwaccel_priv_buf->data;
696 av1_frame_unref(avctx, f);
697 return AVERROR(ENOMEM);
700 static int set_output_frame(AVCodecContext *avctx, AVFrame *frame,
701 const AVPacket *pkt, int *got_frame)
703 AV1DecContext *s = avctx->priv_data;
704 const AVFrame *srcframe = s->cur_frame.tf.f;
707 ret = av_frame_ref(frame, srcframe);
711 frame->pts = pkt->pts;
712 frame->pkt_dts = pkt->dts;
713 frame->pkt_size = pkt->size;
720 static int update_reference_list(AVCodecContext *avctx)
722 AV1DecContext *s = avctx->priv_data;
723 const AV1RawFrameHeader *header = s->raw_frame_header;
726 for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
727 if (header->refresh_frame_flags & (1 << i)) {
728 if (s->ref[i].tf.f->buf[0])
729 av1_frame_unref(avctx, &s->ref[i]);
730 if ((ret = av1_frame_ref(avctx, &s->ref[i], &s->cur_frame)) < 0) {
731 av_log(avctx, AV_LOG_ERROR,
732 "Failed to update frame %d in reference list\n", i);
740 static int get_current_frame(AVCodecContext *avctx)
742 AV1DecContext *s = avctx->priv_data;
745 if (s->cur_frame.tf.f->buf[0])
746 av1_frame_unref(avctx, &s->cur_frame);
748 ret = av1_frame_alloc(avctx, &s->cur_frame);
750 av_log(avctx, AV_LOG_ERROR,
751 "Failed to allocate space for current frame.\n");
755 ret = init_tile_data(s);
757 av_log(avctx, AV_LOG_ERROR, "Failed to init tile data.\n");
761 global_motion_params(s);
763 coded_lossless_param(s);
768 static int av1_decode_frame(AVCodecContext *avctx, void *frame,
769 int *got_frame, AVPacket *pkt)
771 AV1DecContext *s = avctx->priv_data;
772 AV1RawTileGroup *raw_tile_group = NULL;
775 ret = ff_cbs_read_packet(s->cbc, &s->current_obu, pkt);
777 av_log(avctx, AV_LOG_ERROR, "Failed to read packet.\n");
780 av_log(avctx, AV_LOG_DEBUG, "Total obu for this frame:%d.\n",
781 s->current_obu.nb_units);
783 for (int i = 0; i < s->current_obu.nb_units; i++) {
784 CodedBitstreamUnit *unit = &s->current_obu.units[i];
785 AV1RawOBU *obu = unit->content;
786 const AV1RawOBUHeader *header;
791 header = &obu->header;
792 av_log(avctx, AV_LOG_DEBUG, "Obu idx:%d, obu type:%d.\n", i, unit->type);
794 switch (unit->type) {
795 case AV1_OBU_SEQUENCE_HEADER:
796 av_buffer_unref(&s->seq_ref);
797 s->seq_ref = av_buffer_ref(unit->content_ref);
799 ret = AVERROR(ENOMEM);
803 s->raw_seq = &obu->obu.sequence_header;
805 ret = set_context_with_sequence(avctx, s->raw_seq);
807 av_log(avctx, AV_LOG_ERROR, "Failed to set context.\n");
812 if (s->pix_fmt == AV_PIX_FMT_NONE) {
813 ret = get_pixel_format(avctx);
815 av_log(avctx, AV_LOG_ERROR,
816 "Failed to get pixel format.\n");
822 if (avctx->hwaccel && avctx->hwaccel->decode_params) {
823 ret = avctx->hwaccel->decode_params(avctx, unit->type, unit->data,
826 av_log(avctx, AV_LOG_ERROR, "HW accel decode params fail.\n");
832 case AV1_OBU_REDUNDANT_FRAME_HEADER:
833 if (s->raw_frame_header)
837 case AV1_OBU_FRAME_HEADER:
839 av_log(avctx, AV_LOG_ERROR, "Missing Sequence Header.\n");
840 ret = AVERROR_INVALIDDATA;
844 av_buffer_unref(&s->header_ref);
845 s->header_ref = av_buffer_ref(unit->content_ref);
846 if (!s->header_ref) {
847 ret = AVERROR(ENOMEM);
851 if (unit->type == AV1_OBU_FRAME)
852 s->raw_frame_header = &obu->obu.frame.header;
854 s->raw_frame_header = &obu->obu.frame_header;
856 if (s->raw_frame_header->show_existing_frame) {
857 if (s->cur_frame.tf.f->buf[0])
858 av1_frame_unref(avctx, &s->cur_frame);
860 ret = av1_frame_ref(avctx, &s->cur_frame,
861 &s->ref[s->raw_frame_header->frame_to_show_map_idx]);
863 av_log(avctx, AV_LOG_ERROR, "Failed to get reference frame.\n");
867 ret = update_reference_list(avctx);
869 av_log(avctx, AV_LOG_ERROR, "Failed to update reference list.\n");
873 ret = set_output_frame(avctx, frame, pkt, got_frame);
875 av_log(avctx, AV_LOG_ERROR, "Set output frame error.\n");
877 s->raw_frame_header = NULL;
882 ret = get_current_frame(avctx);
884 av_log(avctx, AV_LOG_ERROR, "Get current frame error\n");
888 s->cur_frame.spatial_id = header->spatial_id;
889 s->cur_frame.temporal_id = header->temporal_id;
891 if (avctx->hwaccel) {
892 ret = avctx->hwaccel->start_frame(avctx, unit->data,
895 av_log(avctx, AV_LOG_ERROR, "HW accel start frame fail.\n");
899 if (unit->type != AV1_OBU_FRAME)
902 case AV1_OBU_TILE_GROUP:
903 if (!s->raw_frame_header) {
904 av_log(avctx, AV_LOG_ERROR, "Missing Frame Header.\n");
905 ret = AVERROR_INVALIDDATA;
909 if (unit->type == AV1_OBU_FRAME)
910 raw_tile_group = &obu->obu.frame.tile_group;
912 raw_tile_group = &obu->obu.tile_group;
914 ret = get_tiles_info(avctx, raw_tile_group);
918 if (avctx->hwaccel) {
919 ret = avctx->hwaccel->decode_slice(avctx,
920 raw_tile_group->tile_data.data,
921 raw_tile_group->tile_data.data_size);
923 av_log(avctx, AV_LOG_ERROR,
924 "HW accel decode slice fail.\n");
929 case AV1_OBU_TILE_LIST:
930 case AV1_OBU_TEMPORAL_DELIMITER:
931 case AV1_OBU_PADDING:
932 case AV1_OBU_METADATA:
935 av_log(avctx, AV_LOG_DEBUG,
936 "Unknown obu type: %d (%"SIZE_SPECIFIER" bits).\n",
937 unit->type, unit->data_size);
940 if (raw_tile_group && (s->tile_num == raw_tile_group->tg_end + 1)) {
941 if (avctx->hwaccel) {
942 ret = avctx->hwaccel->end_frame(avctx);
944 av_log(avctx, AV_LOG_ERROR, "HW accel end frame fail.\n");
949 ret = update_reference_list(avctx);
951 av_log(avctx, AV_LOG_ERROR, "Failed to update reference list.\n");
955 if (s->raw_frame_header->show_frame) {
956 ret = set_output_frame(avctx, frame, pkt, got_frame);
958 av_log(avctx, AV_LOG_ERROR, "Set output frame error\n");
962 raw_tile_group = NULL;
963 s->raw_frame_header = NULL;
968 ff_cbs_fragment_reset(&s->current_obu);
970 s->raw_frame_header = NULL;
974 static void av1_decode_flush(AVCodecContext *avctx)
976 AV1DecContext *s = avctx->priv_data;
978 for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++)
979 av1_frame_unref(avctx, &s->ref[i]);
981 av1_frame_unref(avctx, &s->cur_frame);
982 s->raw_frame_header = NULL;
985 ff_cbs_flush(s->cbc);
988 AVCodec ff_av1_decoder = {
990 .long_name = NULL_IF_CONFIG_SMALL("Alliance for Open Media AV1"),
991 .type = AVMEDIA_TYPE_VIDEO,
992 .id = AV_CODEC_ID_AV1,
993 .priv_data_size = sizeof(AV1DecContext),
994 .init = av1_decode_init,
995 .close = av1_decode_free,
996 .decode = av1_decode_frame,
997 .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_AVOID_PROBING,
998 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
999 FF_CODEC_CAP_INIT_CLEANUP |
1000 FF_CODEC_CAP_SETS_PKT_DTS,
1001 .flush = av1_decode_flush,
1002 .profiles = NULL_IF_CONFIG_SMALL(ff_av1_profiles),
1003 .hw_configs = (const AVCodecHWConfigInternal * []) {
1004 #if CONFIG_AV1_DXVA2_HWACCEL
1007 #if CONFIG_AV1_D3D11VA_HWACCEL
1008 HWACCEL_D3D11VA(av1),
1010 #if CONFIG_AV1_D3D11VA2_HWACCEL
1011 HWACCEL_D3D11VA2(av1),
1013 #if CONFIG_AV1_NVDEC_HWACCEL
1016 #if CONFIG_AV1_VAAPI_HWACCEL