4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libavutil/pixdesc.h"
24 #include "bytestream.h"
29 static uint32_t inverse_recenter(int r, uint32_t v)
34 return r - ((v + 1) >> 1);
39 static uint32_t decode_unsigned_subexp_with_ref(uint32_t sub_exp,
43 return inverse_recenter(r, sub_exp);
45 return mx - 1 - inverse_recenter(mx - 1 - r, sub_exp);
49 static int32_t decode_signed_subexp_with_ref(uint32_t sub_exp, int low,
52 int32_t x = decode_unsigned_subexp_with_ref(sub_exp, high - low, r - low);
56 static void read_global_param(AV1DecContext *s, int type, int ref, int idx)
58 uint8_t primary_frame, prev_frame;
59 uint32_t abs_bits, prec_bits, round, prec_diff, sub, mx;
62 primary_frame = s->raw_frame_header->primary_ref_frame;
63 prev_frame = s->raw_frame_header->ref_frame_idx[primary_frame];
64 abs_bits = AV1_GM_ABS_ALPHA_BITS;
65 prec_bits = AV1_GM_ALPHA_PREC_BITS;
68 if (type == AV1_WARP_MODEL_TRANSLATION) {
69 abs_bits = AV1_GM_ABS_TRANS_ONLY_BITS -
70 !s->raw_frame_header->allow_high_precision_mv;
71 prec_bits = AV1_GM_TRANS_ONLY_PREC_BITS -
72 !s->raw_frame_header->allow_high_precision_mv;
74 abs_bits = AV1_GM_ABS_TRANS_BITS;
75 prec_bits = AV1_GM_TRANS_PREC_BITS;
78 round = (idx % 3) == 2 ? (1 << AV1_WARPEDMODEL_PREC_BITS) : 0;
79 prec_diff = AV1_WARPEDMODEL_PREC_BITS - prec_bits;
80 sub = (idx % 3) == 2 ? (1 << prec_bits) : 0;
82 r = (s->ref[prev_frame].gm_params[ref][idx] >> prec_diff) - sub;
84 s->cur_frame.gm_params[ref][idx] =
85 (decode_signed_subexp_with_ref(s->raw_frame_header->gm_params[ref][idx],
86 -mx, mx + 1, r) << prec_diff) + round;
90 * update gm type/params, since cbs already implemented part of this funcation,
91 * so we don't need to full implement spec.
93 static void global_motion_params(AV1DecContext *s)
95 const AV1RawFrameHeader *header = s->raw_frame_header;
98 for (ref = AV1_REF_FRAME_LAST; ref <= AV1_REF_FRAME_ALTREF; ref++) {
99 s->cur_frame.gm_type[ref] = AV1_WARP_MODEL_IDENTITY;
100 for (int i = 0; i < 6; i++)
101 s->cur_frame.gm_params[ref][i] = (i % 3 == 2) ?
102 1 << AV1_WARPEDMODEL_PREC_BITS : 0;
104 if (header->frame_type == AV1_FRAME_KEY ||
105 header->frame_type == AV1_FRAME_INTRA_ONLY)
108 for (ref = AV1_REF_FRAME_LAST; ref <= AV1_REF_FRAME_ALTREF; ref++) {
109 if (header->is_global[ref]) {
110 if (header->is_rot_zoom[ref]) {
111 type = AV1_WARP_MODEL_ROTZOOM;
113 type = header->is_translation[ref] ? AV1_WARP_MODEL_TRANSLATION
114 : AV1_WARP_MODEL_AFFINE;
117 type = AV1_WARP_MODEL_IDENTITY;
119 s->cur_frame.gm_type[ref] = type;
121 if (type >= AV1_WARP_MODEL_ROTZOOM) {
122 read_global_param(s, type, ref, 2);
123 read_global_param(s, type, ref, 3);
124 if (type == AV1_WARP_MODEL_AFFINE) {
125 read_global_param(s, type, ref, 4);
126 read_global_param(s, type, ref, 5);
128 s->cur_frame.gm_params[ref][4] = -s->cur_frame.gm_params[ref][3];
129 s->cur_frame.gm_params[ref][5] = s->cur_frame.gm_params[ref][2];
132 if (type >= AV1_WARP_MODEL_TRANSLATION) {
133 read_global_param(s, type, ref, 0);
134 read_global_param(s, type, ref, 1);
139 static int init_tile_data(AV1DecContext *s)
143 s->raw_frame_header->tile_cols * s->raw_frame_header->tile_rows;
144 if (s->tile_num < cur_tile_num) {
145 int ret = av_reallocp_array(&s->tile_group_info, cur_tile_num,
146 sizeof(TileGroupInfo));
152 s->tile_num = cur_tile_num;
157 static int get_tiles_info(AVCodecContext *avctx, const AV1RawTileGroup *tile_group)
159 AV1DecContext *s = avctx->priv_data;
161 uint16_t tile_num, tile_row, tile_col;
162 uint32_t size = 0, size_bytes = 0;
164 bytestream2_init(&gb, tile_group->tile_data.data,
165 tile_group->tile_data.data_size);
166 s->tg_start = tile_group->tg_start;
167 s->tg_end = tile_group->tg_end;
169 for (tile_num = tile_group->tg_start; tile_num <= tile_group->tg_end; tile_num++) {
170 tile_row = tile_num / s->raw_frame_header->tile_cols;
171 tile_col = tile_num % s->raw_frame_header->tile_cols;
173 if (tile_num == tile_group->tg_end) {
174 s->tile_group_info[tile_num].tile_size = bytestream2_get_bytes_left(&gb);
175 s->tile_group_info[tile_num].tile_offset = bytestream2_tell(&gb);
176 s->tile_group_info[tile_num].tile_row = tile_row;
177 s->tile_group_info[tile_num].tile_column = tile_col;
180 size_bytes = s->raw_frame_header->tile_size_bytes_minus1 + 1;
181 if (bytestream2_get_bytes_left(&gb) < size_bytes)
182 return AVERROR_INVALIDDATA;
184 for (int i = 0; i < size_bytes; i++)
185 size |= bytestream2_get_byteu(&gb) << 8 * i;
186 if (bytestream2_get_bytes_left(&gb) <= size)
187 return AVERROR_INVALIDDATA;
190 s->tile_group_info[tile_num].tile_size = size;
191 s->tile_group_info[tile_num].tile_offset = bytestream2_tell(&gb);
192 s->tile_group_info[tile_num].tile_row = tile_row;
193 s->tile_group_info[tile_num].tile_column = tile_col;
195 bytestream2_skipu(&gb, size);
202 static int get_pixel_format(AVCodecContext *avctx)
204 AV1DecContext *s = avctx->priv_data;
205 const AV1RawSequenceHeader *seq = s->raw_seq;
208 enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE;
209 #define HWACCEL_MAX (0)
210 enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
212 if (seq->seq_profile == 2 && seq->color_config.high_bitdepth)
213 bit_depth = seq->color_config.twelve_bit ? 12 : 10;
214 else if (seq->seq_profile <= 2)
215 bit_depth = seq->color_config.high_bitdepth ? 10 : 8;
217 av_log(avctx, AV_LOG_ERROR,
218 "Unknown AV1 profile %d.\n", seq->seq_profile);
222 if (!seq->color_config.mono_chrome) {
223 // 4:4:4 x:0 y:0, 4:2:2 x:1 y:0, 4:2:0 x:1 y:1
224 if (seq->color_config.subsampling_x == 0 &&
225 seq->color_config.subsampling_y == 0) {
227 pix_fmt = AV_PIX_FMT_YUV444P;
228 else if (bit_depth == 10)
229 pix_fmt = AV_PIX_FMT_YUV444P10;
230 else if (bit_depth == 12)
231 pix_fmt = AV_PIX_FMT_YUV444P12;
233 av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n");
234 } else if (seq->color_config.subsampling_x == 1 &&
235 seq->color_config.subsampling_y == 0) {
237 pix_fmt = AV_PIX_FMT_YUV422P;
238 else if (bit_depth == 10)
239 pix_fmt = AV_PIX_FMT_YUV422P10;
240 else if (bit_depth == 12)
241 pix_fmt = AV_PIX_FMT_YUV422P12;
243 av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n");
244 } else if (seq->color_config.subsampling_x == 1 &&
245 seq->color_config.subsampling_y == 1) {
247 pix_fmt = AV_PIX_FMT_YUV420P;
248 else if (bit_depth == 10)
249 pix_fmt = AV_PIX_FMT_YUV420P10;
250 else if (bit_depth == 12)
251 pix_fmt = AV_PIX_FMT_YUV420P12;
253 av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n");
256 if (seq->color_config.subsampling_x == 1 &&
257 seq->color_config.subsampling_y == 1)
258 pix_fmt = AV_PIX_FMT_YUV440P;
260 av_log(avctx, AV_LOG_WARNING, "Unknown AV1 pixel format.\n");
263 av_log(avctx, AV_LOG_DEBUG, "AV1 decode get format: %s.\n",
264 av_get_pix_fmt_name(pix_fmt));
266 if (pix_fmt == AV_PIX_FMT_NONE)
268 s->pix_fmt = pix_fmt;
270 *fmtp++ = s->pix_fmt;
271 *fmtp = AV_PIX_FMT_NONE;
273 ret = ff_thread_get_format(avctx, pix_fmts);
278 * check if the HW accel is inited correctly. If not, return un-implemented.
279 * Since now the av1 decoder doesn't support native decode, if it will be
280 * implemented in the future, need remove this check.
282 if (!avctx->hwaccel) {
283 av_log(avctx, AV_LOG_ERROR, "Your platform doesn't suppport"
284 " hardware accelerated AV1 decoding.\n");
285 return AVERROR(ENOSYS);
288 avctx->pix_fmt = ret;
293 static void av1_frame_unref(AVCodecContext *avctx, AV1Frame *f)
295 ff_thread_release_buffer(avctx, &f->tf);
296 av_buffer_unref(&f->hwaccel_priv_buf);
297 f->hwaccel_picture_private = NULL;
298 f->spatial_id = f->temporal_id = 0;
301 static int av1_frame_ref(AVCodecContext *avctx, AV1Frame *dst, const AV1Frame *src)
305 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
309 if (src->hwaccel_picture_private) {
310 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
311 if (!dst->hwaccel_priv_buf)
313 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
316 dst->spatial_id = src->spatial_id;
317 dst->temporal_id = src->temporal_id;
320 AV1_NUM_REF_FRAMES * sizeof(uint8_t));
321 memcpy(dst->gm_params,
323 AV1_NUM_REF_FRAMES * 6 * sizeof(int32_t));
328 av1_frame_unref(avctx, dst);
329 return AVERROR(ENOMEM);
332 static av_cold int av1_decode_free(AVCodecContext *avctx)
334 AV1DecContext *s = avctx->priv_data;
336 for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
337 av1_frame_unref(avctx, &s->ref[i]);
338 av_frame_free(&s->ref[i].tf.f);
340 av1_frame_unref(avctx, &s->cur_frame);
341 av_frame_free(&s->cur_frame.tf.f);
343 av_buffer_unref(&s->seq_ref);
344 av_buffer_unref(&s->header_ref);
345 av_freep(&s->tile_group_info);
347 ff_cbs_fragment_free(&s->current_obu);
348 ff_cbs_close(&s->cbc);
353 static int set_context_with_sequence(AVCodecContext *avctx,
354 const AV1RawSequenceHeader *seq)
356 int width = seq->max_frame_width_minus_1 + 1;
357 int height = seq->max_frame_height_minus_1 + 1;
359 avctx->profile = seq->seq_profile;
360 avctx->level = seq->seq_level_idx[0];
363 seq->color_config.color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
364 avctx->color_primaries = seq->color_config.color_primaries;
365 avctx->colorspace = seq->color_config.color_primaries;
366 avctx->color_trc = seq->color_config.transfer_characteristics;
368 switch (seq->color_config.chroma_sample_position) {
369 case AV1_CSP_VERTICAL:
370 avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
372 case AV1_CSP_COLOCATED:
373 avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
377 if (avctx->width != width || avctx->height != height) {
378 int ret = ff_set_dimensions(avctx, width, height);
382 avctx->sample_aspect_ratio = (AVRational) { 1, 1 };
384 if (seq->timing_info.num_units_in_display_tick &&
385 seq->timing_info.time_scale) {
386 av_reduce(&avctx->framerate.den, &avctx->framerate.num,
387 seq->timing_info.num_units_in_display_tick,
388 seq->timing_info.time_scale,
390 if (seq->timing_info.equal_picture_interval)
391 avctx->ticks_per_frame = seq->timing_info.num_ticks_per_picture_minus_1 + 1;
397 static int update_context_with_frame_header(AVCodecContext *avctx,
398 const AV1RawFrameHeader *header)
400 AVRational aspect_ratio;
401 int width = header->frame_width_minus_1 + 1;
402 int height = header->frame_height_minus_1 + 1;
403 int r_width = header->render_width_minus_1 + 1;
404 int r_height = header->render_height_minus_1 + 1;
407 if (avctx->width != width || avctx->height != height) {
408 ret = ff_set_dimensions(avctx, width, height);
413 av_reduce(&aspect_ratio.num, &aspect_ratio.den,
414 (int64_t)height * r_width,
415 (int64_t)width * r_height,
418 if (av_cmp_q(avctx->sample_aspect_ratio, aspect_ratio)) {
419 ret = ff_set_sar(avctx, aspect_ratio);
427 static av_cold int av1_decode_init(AVCodecContext *avctx)
429 AV1DecContext *s = avctx->priv_data;
430 AV1RawSequenceHeader *seq;
434 s->pix_fmt = AV_PIX_FMT_NONE;
436 for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
437 s->ref[i].tf.f = av_frame_alloc();
438 if (!s->ref[i].tf.f) {
439 av_log(avctx, AV_LOG_ERROR,
440 "Failed to allocate reference frame buffer %d.\n", i);
441 return AVERROR(ENOMEM);
445 s->cur_frame.tf.f = av_frame_alloc();
446 if (!s->cur_frame.tf.f) {
447 av_log(avctx, AV_LOG_ERROR,
448 "Failed to allocate current frame buffer.\n");
449 return AVERROR(ENOMEM);
452 ret = ff_cbs_init(&s->cbc, AV_CODEC_ID_AV1, avctx);
456 if (avctx->extradata && avctx->extradata_size) {
457 ret = ff_cbs_read(s->cbc, &s->current_obu, avctx->extradata,
458 avctx->extradata_size);
460 av_log(avctx, AV_LOG_WARNING, "Failed to read extradata.\n");
464 seq = ((CodedBitstreamAV1Context *)(s->cbc->priv_data))->sequence_header;
466 av_log(avctx, AV_LOG_WARNING, "No sequence header available.\n");
470 ret = set_context_with_sequence(avctx, seq);
472 av_log(avctx, AV_LOG_WARNING, "Failed to set decoder context.\n");
477 ff_cbs_fragment_reset(&s->current_obu);
483 static int av1_frame_alloc(AVCodecContext *avctx, AV1Frame *f)
485 AV1DecContext *s = avctx->priv_data;
486 AV1RawFrameHeader *header= s->raw_frame_header;
490 ret = update_context_with_frame_header(avctx, header);
492 av_log(avctx, AV_LOG_ERROR, "Failed to update context with frame header\n");
496 if ((ret = ff_thread_get_buffer(avctx, &f->tf, AV_GET_BUFFER_FLAG_REF)) < 0)
500 frame->key_frame = header->frame_type == AV1_FRAME_KEY;
502 switch (header->frame_type) {
504 case AV1_FRAME_INTRA_ONLY:
505 frame->pict_type = AV_PICTURE_TYPE_I;
507 case AV1_FRAME_INTER:
508 frame->pict_type = AV_PICTURE_TYPE_P;
510 case AV1_FRAME_SWITCH:
511 frame->pict_type = AV_PICTURE_TYPE_SP;
515 if (avctx->hwaccel) {
516 const AVHWAccel *hwaccel = avctx->hwaccel;
517 if (hwaccel->frame_priv_data_size) {
518 f->hwaccel_priv_buf =
519 av_buffer_allocz(hwaccel->frame_priv_data_size);
520 if (!f->hwaccel_priv_buf)
522 f->hwaccel_picture_private = f->hwaccel_priv_buf->data;
528 av1_frame_unref(avctx, f);
529 return AVERROR(ENOMEM);
532 static int set_output_frame(AVCodecContext *avctx, AVFrame *frame,
533 const AVPacket *pkt, int *got_frame)
535 AV1DecContext *s = avctx->priv_data;
536 const AVFrame *srcframe = s->cur_frame.tf.f;
539 ret = av_frame_ref(frame, srcframe);
543 frame->pts = pkt->pts;
544 frame->pkt_dts = pkt->dts;
545 frame->pkt_size = pkt->size;
552 static int update_reference_list(AVCodecContext *avctx)
554 AV1DecContext *s = avctx->priv_data;
555 const AV1RawFrameHeader *header = s->raw_frame_header;
558 for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
559 if (header->refresh_frame_flags & (1 << i)) {
560 if (s->ref[i].tf.f->buf[0])
561 av1_frame_unref(avctx, &s->ref[i]);
562 if ((ret = av1_frame_ref(avctx, &s->ref[i], &s->cur_frame)) < 0) {
563 av_log(avctx, AV_LOG_ERROR,
564 "Failed to update frame %d in reference list\n", i);
572 static int get_current_frame(AVCodecContext *avctx)
574 AV1DecContext *s = avctx->priv_data;
577 if (s->cur_frame.tf.f->buf[0])
578 av1_frame_unref(avctx, &s->cur_frame);
580 ret = av1_frame_alloc(avctx, &s->cur_frame);
582 av_log(avctx, AV_LOG_ERROR,
583 "Failed to allocate space for current frame.\n");
587 ret = init_tile_data(s);
589 av_log(avctx, AV_LOG_ERROR, "Failed to init tile data.\n");
593 global_motion_params(s);
598 static int av1_decode_frame(AVCodecContext *avctx, void *frame,
599 int *got_frame, AVPacket *pkt)
601 AV1DecContext *s = avctx->priv_data;
602 AV1RawTileGroup *raw_tile_group = NULL;
605 ret = ff_cbs_read_packet(s->cbc, &s->current_obu, pkt);
607 av_log(avctx, AV_LOG_ERROR, "Failed to read packet.\n");
610 av_log(avctx, AV_LOG_DEBUG, "Total obu for this frame:%d.\n",
611 s->current_obu.nb_units);
613 for (int i = 0; i < s->current_obu.nb_units; i++) {
614 CodedBitstreamUnit *unit = &s->current_obu.units[i];
615 AV1RawOBU *obu = unit->content;
616 const AV1RawOBUHeader *header;
621 header = &obu->header;
622 av_log(avctx, AV_LOG_DEBUG, "Obu idx:%d, obu type:%d.\n", i, unit->type);
624 switch (unit->type) {
625 case AV1_OBU_SEQUENCE_HEADER:
626 av_buffer_unref(&s->seq_ref);
627 s->seq_ref = av_buffer_ref(unit->content_ref);
629 ret = AVERROR(ENOMEM);
633 s->raw_seq = &obu->obu.sequence_header;
635 ret = set_context_with_sequence(avctx, s->raw_seq);
637 av_log(avctx, AV_LOG_ERROR, "Failed to set context.\n");
642 if (s->pix_fmt == AV_PIX_FMT_NONE) {
643 ret = get_pixel_format(avctx);
645 av_log(avctx, AV_LOG_ERROR,
646 "Failed to get pixel format.\n");
652 if (avctx->hwaccel && avctx->hwaccel->decode_params) {
653 ret = avctx->hwaccel->decode_params(avctx, unit->type, unit->data,
656 av_log(avctx, AV_LOG_ERROR, "HW accel decode params fail.\n");
662 case AV1_OBU_REDUNDANT_FRAME_HEADER:
663 if (s->raw_frame_header)
667 case AV1_OBU_FRAME_HEADER:
669 av_log(avctx, AV_LOG_ERROR, "Missing Sequence Header.\n");
670 ret = AVERROR_INVALIDDATA;
674 av_buffer_unref(&s->header_ref);
675 s->header_ref = av_buffer_ref(unit->content_ref);
676 if (!s->header_ref) {
677 ret = AVERROR(ENOMEM);
681 if (unit->type == AV1_OBU_FRAME)
682 s->raw_frame_header = &obu->obu.frame.header;
684 s->raw_frame_header = &obu->obu.frame_header;
686 if (s->raw_frame_header->show_existing_frame) {
687 if (s->cur_frame.tf.f->buf[0])
688 av1_frame_unref(avctx, &s->cur_frame);
690 ret = av1_frame_ref(avctx, &s->cur_frame,
691 &s->ref[s->raw_frame_header->frame_to_show_map_idx]);
693 av_log(avctx, AV_LOG_ERROR, "Failed to get reference frame.\n");
697 ret = update_reference_list(avctx);
699 av_log(avctx, AV_LOG_ERROR, "Failed to update reference list.\n");
703 ret = set_output_frame(avctx, frame, pkt, got_frame);
705 av_log(avctx, AV_LOG_ERROR, "Set output frame error.\n");
707 s->raw_frame_header = NULL;
712 ret = get_current_frame(avctx);
714 av_log(avctx, AV_LOG_ERROR, "Get current frame error\n");
718 s->cur_frame.spatial_id = header->spatial_id;
719 s->cur_frame.temporal_id = header->temporal_id;
721 if (avctx->hwaccel) {
722 ret = avctx->hwaccel->start_frame(avctx, unit->data,
725 av_log(avctx, AV_LOG_ERROR, "HW accel start frame fail.\n");
729 if (unit->type != AV1_OBU_FRAME)
732 case AV1_OBU_TILE_GROUP:
733 if (!s->raw_frame_header) {
734 av_log(avctx, AV_LOG_ERROR, "Missing Frame Header.\n");
735 ret = AVERROR_INVALIDDATA;
739 if (unit->type == AV1_OBU_FRAME)
740 raw_tile_group = &obu->obu.frame.tile_group;
742 raw_tile_group = &obu->obu.tile_group;
744 ret = get_tiles_info(avctx, raw_tile_group);
748 if (avctx->hwaccel) {
749 ret = avctx->hwaccel->decode_slice(avctx,
750 raw_tile_group->tile_data.data,
751 raw_tile_group->tile_data.data_size);
753 av_log(avctx, AV_LOG_ERROR,
754 "HW accel decode slice fail.\n");
759 case AV1_OBU_TILE_LIST:
760 case AV1_OBU_TEMPORAL_DELIMITER:
761 case AV1_OBU_PADDING:
762 case AV1_OBU_METADATA:
765 av_log(avctx, AV_LOG_DEBUG,
766 "Unknown obu type: %d (%"SIZE_SPECIFIER" bits).\n",
767 unit->type, unit->data_size);
770 if (raw_tile_group && (s->tile_num == raw_tile_group->tg_end + 1)) {
771 if (avctx->hwaccel) {
772 ret = avctx->hwaccel->end_frame(avctx);
774 av_log(avctx, AV_LOG_ERROR, "HW accel end frame fail.\n");
779 ret = update_reference_list(avctx);
781 av_log(avctx, AV_LOG_ERROR, "Failed to update reference list.\n");
785 if (s->raw_frame_header->show_frame) {
786 ret = set_output_frame(avctx, frame, pkt, got_frame);
788 av_log(avctx, AV_LOG_ERROR, "Set output frame error\n");
792 raw_tile_group = NULL;
793 s->raw_frame_header = NULL;
798 ff_cbs_fragment_reset(&s->current_obu);
800 s->raw_frame_header = NULL;
804 static void av1_decode_flush(AVCodecContext *avctx)
806 AV1DecContext *s = avctx->priv_data;
808 for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++)
809 av1_frame_unref(avctx, &s->ref[i]);
811 av1_frame_unref(avctx, &s->cur_frame);
812 s->raw_frame_header = NULL;
815 ff_cbs_flush(s->cbc);
818 AVCodec ff_av1_decoder = {
820 .long_name = NULL_IF_CONFIG_SMALL("Alliance for Open Media AV1"),
821 .type = AVMEDIA_TYPE_VIDEO,
822 .id = AV_CODEC_ID_AV1,
823 .priv_data_size = sizeof(AV1DecContext),
824 .init = av1_decode_init,
825 .close = av1_decode_free,
826 .decode = av1_decode_frame,
827 .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_AVOID_PROBING,
828 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
829 FF_CODEC_CAP_INIT_CLEANUP |
830 FF_CODEC_CAP_SETS_PKT_DTS,
831 .flush = av1_decode_flush,
832 .profiles = NULL_IF_CONFIG_SMALL(ff_av1_profiles),
833 .hw_configs = (const AVCodecHWConfigInternal * []) {