]> git.sesse.net Git - ffmpeg/blob - libavcodec/dxva2_av1.c
avfilter/avfilter: Remove compatibility code for old filter options
[ffmpeg] / libavcodec / dxva2_av1.c
1 /*
2  * DXVA2 AV1 HW acceleration.
3  *
4  * copyright (c) 2020 Hendrik Leppkes
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22
23 #include "libavutil/avassert.h"
24 #include "libavutil/pixdesc.h"
25
26 #include "dxva2_internal.h"
27 #include "av1dec.h"
28
29 #define MAX_TILES 256
30
31 struct AV1DXVAContext {
32     FFDXVASharedContext shared;
33
34     unsigned int bitstream_allocated;
35     uint8_t *bitstream_cache;
36 };
37
38 struct av1_dxva2_picture_context {
39     DXVA_PicParams_AV1    pp;
40     unsigned              tile_count;
41     DXVA_Tile_AV1         tiles[MAX_TILES];
42     uint8_t              *bitstream;
43     unsigned              bitstream_size;
44 };
45
46 static int get_bit_depth_from_seq(const AV1RawSequenceHeader *seq)
47 {
48     if (seq->seq_profile == 2 && seq->color_config.high_bitdepth)
49         return seq->color_config.twelve_bit ? 12 : 10;
50     else if (seq->seq_profile <= 2 && seq->color_config.high_bitdepth)
51         return 10;
52     else
53         return 8;
54 }
55
56 static int fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *ctx, const AV1DecContext *h,
57                                     DXVA_PicParams_AV1 *pp)
58 {
59     int i,j, uses_lr;
60     const AV1RawSequenceHeader *seq = h->raw_seq;
61     const AV1RawFrameHeader *frame_header = h->raw_frame_header;
62     const AV1RawFilmGrainParams *film_grain = &h->cur_frame.film_grain;
63
64     unsigned char remap_lr_type[4] = { AV1_RESTORE_NONE, AV1_RESTORE_SWITCHABLE, AV1_RESTORE_WIENER, AV1_RESTORE_SGRPROJ };
65     int apply_grain = !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) && film_grain->apply_grain;
66
67     memset(pp, 0, sizeof(*pp));
68
69     pp->width  = avctx->width;
70     pp->height = avctx->height;
71
72     pp->max_width  = seq->max_frame_width_minus_1 + 1;
73     pp->max_height = seq->max_frame_height_minus_1 + 1;
74
75     pp->CurrPicTextureIndex = ff_dxva2_get_surface_index(avctx, ctx, h->cur_frame.tf.f);
76     pp->superres_denom      = frame_header->use_superres ? frame_header->coded_denom : AV1_SUPERRES_NUM;
77     pp->bitdepth            = get_bit_depth_from_seq(seq);
78     pp->seq_profile         = seq->seq_profile;
79
80     /* Tiling info */
81     pp->tiles.cols = frame_header->tile_cols;
82     pp->tiles.rows = frame_header->tile_rows;
83     pp->tiles.context_update_id = frame_header->context_update_tile_id;
84
85     for (i = 0; i < pp->tiles.cols; i++)
86         pp->tiles.widths[i] = frame_header->width_in_sbs_minus_1[i] + 1;
87
88     for (i = 0; i < pp->tiles.rows; i++)
89         pp->tiles.heights[i] = frame_header->height_in_sbs_minus_1[i] + 1;
90
91     /* Coding tools */
92     pp->coding.use_128x128_superblock       = seq->use_128x128_superblock;
93     pp->coding.intra_edge_filter            = seq->enable_intra_edge_filter;
94     pp->coding.interintra_compound          = seq->enable_interintra_compound;
95     pp->coding.masked_compound              = seq->enable_masked_compound;
96     pp->coding.warped_motion                = frame_header->allow_warped_motion;
97     pp->coding.dual_filter                  = seq->enable_dual_filter;
98     pp->coding.jnt_comp                     = seq->enable_jnt_comp;
99     pp->coding.screen_content_tools         = frame_header->allow_screen_content_tools;
100     pp->coding.integer_mv                   = frame_header->force_integer_mv || !(frame_header->frame_type & 1);
101     pp->coding.cdef                         = seq->enable_cdef;
102     pp->coding.restoration                  = seq->enable_restoration;
103     pp->coding.film_grain                   = seq->film_grain_params_present && !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN);
104     pp->coding.intrabc                      = frame_header->allow_intrabc;
105     pp->coding.high_precision_mv            = frame_header->allow_high_precision_mv;
106     pp->coding.switchable_motion_mode       = frame_header->is_motion_mode_switchable;
107     pp->coding.filter_intra                 = seq->enable_filter_intra;
108     pp->coding.disable_frame_end_update_cdf = frame_header->disable_frame_end_update_cdf;
109     pp->coding.disable_cdf_update           = frame_header->disable_cdf_update;
110     pp->coding.reference_mode               = frame_header->reference_select;
111     pp->coding.skip_mode                    = frame_header->skip_mode_present;
112     pp->coding.reduced_tx_set               = frame_header->reduced_tx_set;
113     pp->coding.superres                     = frame_header->use_superres;
114     pp->coding.tx_mode                      = frame_header->tx_mode;
115     pp->coding.use_ref_frame_mvs            = frame_header->use_ref_frame_mvs;
116     pp->coding.enable_ref_frame_mvs         = seq->enable_ref_frame_mvs;
117     pp->coding.reference_frame_update       = 1; // 0 for show_existing_frame with key frames, but those are not passed to the hwaccel
118
119     /* Format & Picture Info flags */
120     pp->format.frame_type     = frame_header->frame_type;
121     pp->format.show_frame     = frame_header->show_frame;
122     pp->format.showable_frame = frame_header->showable_frame;
123     pp->format.subsampling_x  = seq->color_config.subsampling_x;
124     pp->format.subsampling_y  = seq->color_config.subsampling_y;
125     pp->format.mono_chrome    = seq->color_config.mono_chrome;
126
127     /* References */
128     pp->primary_ref_frame = frame_header->primary_ref_frame;
129     pp->order_hint        = frame_header->order_hint;
130     pp->order_hint_bits   = seq->enable_order_hint ? seq->order_hint_bits_minus_1 + 1 : 0;
131
132     memset(pp->RefFrameMapTextureIndex, 0xFF, sizeof(pp->RefFrameMapTextureIndex));
133     for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
134         int8_t ref_idx = frame_header->ref_frame_idx[i];
135         AVFrame *ref_frame = h->ref[ref_idx].tf.f;
136
137         pp->frame_refs[i].width  = ref_frame->width;
138         pp->frame_refs[i].height = ref_frame->height;
139         pp->frame_refs[i].Index  = ref_frame->buf[0] ? ref_idx : 0xFF;
140
141         /* Global Motion */
142         pp->frame_refs[i].wminvalid = (h->cur_frame.gm_type[AV1_REF_FRAME_LAST + i] == AV1_WARP_MODEL_IDENTITY);
143         pp->frame_refs[i].wmtype    = h->cur_frame.gm_type[AV1_REF_FRAME_LAST + i];
144         for (j = 0; j < 6; ++j) {
145              pp->frame_refs[i].wmmat[j] = h->cur_frame.gm_params[AV1_REF_FRAME_LAST + i][j];
146         }
147     }
148     for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
149         AVFrame *ref_frame = h->ref[i].tf.f;
150         if (ref_frame->buf[0])
151             pp->RefFrameMapTextureIndex[i] = ff_dxva2_get_surface_index(avctx, ctx, ref_frame);
152     }
153
154     /* Loop filter parameters */
155     pp->loop_filter.filter_level[0]        = frame_header->loop_filter_level[0];
156     pp->loop_filter.filter_level[1]        = frame_header->loop_filter_level[1];
157     pp->loop_filter.filter_level_u         = frame_header->loop_filter_level[2];
158     pp->loop_filter.filter_level_v         = frame_header->loop_filter_level[3];
159     pp->loop_filter.sharpness_level        = frame_header->loop_filter_sharpness;
160     pp->loop_filter.mode_ref_delta_enabled = frame_header->loop_filter_delta_enabled;
161     pp->loop_filter.mode_ref_delta_update  = frame_header->loop_filter_delta_update;
162     pp->loop_filter.delta_lf_multi         = frame_header->delta_lf_multi;
163     pp->loop_filter.delta_lf_present       = frame_header->delta_lf_present;
164     pp->loop_filter.delta_lf_res           = frame_header->delta_lf_res;
165
166     for (i = 0; i < AV1_TOTAL_REFS_PER_FRAME; i++) {
167         pp->loop_filter.ref_deltas[i] = frame_header->loop_filter_ref_deltas[i];
168     }
169
170     pp->loop_filter.mode_deltas[0]                = frame_header->loop_filter_mode_deltas[0];
171     pp->loop_filter.mode_deltas[1]                = frame_header->loop_filter_mode_deltas[1];
172     pp->loop_filter.frame_restoration_type[0]     = remap_lr_type[frame_header->lr_type[0]];
173     pp->loop_filter.frame_restoration_type[1]     = remap_lr_type[frame_header->lr_type[1]];
174     pp->loop_filter.frame_restoration_type[2]     = remap_lr_type[frame_header->lr_type[2]];
175     uses_lr = frame_header->lr_type[0] || frame_header->lr_type[1] || frame_header->lr_type[2];
176     pp->loop_filter.log2_restoration_unit_size[0] = uses_lr ? (6 + frame_header->lr_unit_shift) : 8;
177     pp->loop_filter.log2_restoration_unit_size[1] = uses_lr ? (6 + frame_header->lr_unit_shift - frame_header->lr_uv_shift) : 8;
178     pp->loop_filter.log2_restoration_unit_size[2] = uses_lr ? (6 + frame_header->lr_unit_shift - frame_header->lr_uv_shift) : 8;
179
180     /* Quantization */
181     pp->quantization.delta_q_present = frame_header->delta_q_present;
182     pp->quantization.delta_q_res     = frame_header->delta_q_res;
183     pp->quantization.base_qindex     = frame_header->base_q_idx;
184     pp->quantization.y_dc_delta_q    = frame_header->delta_q_y_dc;
185     pp->quantization.u_dc_delta_q    = frame_header->delta_q_u_dc;
186     pp->quantization.v_dc_delta_q    = frame_header->delta_q_v_dc;
187     pp->quantization.u_ac_delta_q    = frame_header->delta_q_u_ac;
188     pp->quantization.v_ac_delta_q    = frame_header->delta_q_v_ac;
189     pp->quantization.qm_y            = frame_header->using_qmatrix ? frame_header->qm_y : 0xFF;
190     pp->quantization.qm_u            = frame_header->using_qmatrix ? frame_header->qm_u : 0xFF;
191     pp->quantization.qm_v            = frame_header->using_qmatrix ? frame_header->qm_v : 0xFF;
192
193     /* Cdef parameters */
194     pp->cdef.damping = frame_header->cdef_damping_minus_3;
195     pp->cdef.bits    = frame_header->cdef_bits;
196     for (i = 0; i < 8; i++) {
197         pp->cdef.y_strengths[i].primary    = frame_header->cdef_y_pri_strength[i];
198         pp->cdef.y_strengths[i].secondary  = frame_header->cdef_y_sec_strength[i];
199         pp->cdef.uv_strengths[i].primary   = frame_header->cdef_uv_pri_strength[i];
200         pp->cdef.uv_strengths[i].secondary = frame_header->cdef_uv_sec_strength[i];
201     }
202
203     /* Misc flags */
204     pp->interp_filter = frame_header->interpolation_filter;
205
206     /* Segmentation */
207     pp->segmentation.enabled         = frame_header->segmentation_enabled;
208     pp->segmentation.update_map      = frame_header->segmentation_update_map;
209     pp->segmentation.update_data     = frame_header->segmentation_update_data;
210     pp->segmentation.temporal_update = frame_header->segmentation_temporal_update;
211     for (i = 0; i < AV1_MAX_SEGMENTS; i++) {
212         for (j = 0; j < AV1_SEG_LVL_MAX; j++) {
213             pp->segmentation.feature_mask[i].mask |= frame_header->feature_enabled[i][j] << j;
214             pp->segmentation.feature_data[i][j]    = frame_header->feature_value[i][j];
215         }
216     }
217
218     /* Film grain */
219     if (apply_grain) {
220         pp->film_grain.apply_grain              = 1;
221         pp->film_grain.scaling_shift_minus8     = film_grain->grain_scaling_minus_8;
222         pp->film_grain.chroma_scaling_from_luma = film_grain->chroma_scaling_from_luma;
223         pp->film_grain.ar_coeff_lag             = film_grain->ar_coeff_lag;
224         pp->film_grain.ar_coeff_shift_minus6    = film_grain->ar_coeff_shift_minus_6;
225         pp->film_grain.grain_scale_shift        = film_grain->grain_scale_shift;
226         pp->film_grain.overlap_flag             = film_grain->overlap_flag;
227         pp->film_grain.clip_to_restricted_range = film_grain->clip_to_restricted_range;
228         pp->film_grain.matrix_coeff_is_identity = (seq->color_config.matrix_coefficients == AVCOL_SPC_RGB);
229
230         pp->film_grain.grain_seed               = film_grain->grain_seed;
231         pp->film_grain.num_y_points             = film_grain->num_y_points;
232         for (i = 0; i < film_grain->num_y_points; i++) {
233             pp->film_grain.scaling_points_y[i][0] = film_grain->point_y_value[i];
234             pp->film_grain.scaling_points_y[i][1] = film_grain->point_y_scaling[i];
235         }
236         pp->film_grain.num_cb_points            = film_grain->num_cb_points;
237         for (i = 0; i < film_grain->num_cb_points; i++) {
238             pp->film_grain.scaling_points_cb[i][0] = film_grain->point_cb_value[i];
239             pp->film_grain.scaling_points_cb[i][1] = film_grain->point_cb_scaling[i];
240         }
241         pp->film_grain.num_cr_points            = film_grain->num_cr_points;
242         for (i = 0; i < film_grain->num_cr_points; i++) {
243             pp->film_grain.scaling_points_cr[i][0] = film_grain->point_cr_value[i];
244             pp->film_grain.scaling_points_cr[i][1] = film_grain->point_cr_scaling[i];
245         }
246         for (i = 0; i < 24; i++) {
247             pp->film_grain.ar_coeffs_y[i] = film_grain->ar_coeffs_y_plus_128[i];
248         }
249         for (i = 0; i < 25; i++) {
250             pp->film_grain.ar_coeffs_cb[i] = film_grain->ar_coeffs_cb_plus_128[i];
251             pp->film_grain.ar_coeffs_cr[i] = film_grain->ar_coeffs_cr_plus_128[i];
252         }
253         pp->film_grain.cb_mult      = film_grain->cb_mult;
254         pp->film_grain.cb_luma_mult = film_grain->cb_luma_mult;
255         pp->film_grain.cr_mult      = film_grain->cr_mult;
256         pp->film_grain.cr_luma_mult = film_grain->cr_luma_mult;
257         pp->film_grain.cb_offset    = film_grain->cb_offset;
258         pp->film_grain.cr_offset    = film_grain->cr_offset;
259         pp->film_grain.cr_offset    = film_grain->cr_offset;
260     }
261
262     // XXX: Setting the StatusReportFeedbackNumber breaks decoding on some drivers (tested on NVIDIA 457.09)
263     // Status Reporting is not used by FFmpeg, hence not providing a number does not cause any issues
264     //pp->StatusReportFeedbackNumber = 1 + DXVA_CONTEXT_REPORT_ID(avctx, ctx)++;
265     return 0;
266 }
267
268 static int dxva2_av1_start_frame(AVCodecContext *avctx,
269                                  av_unused const uint8_t *buffer,
270                                  av_unused uint32_t size)
271 {
272     const AV1DecContext *h = avctx->priv_data;
273     AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
274     struct av1_dxva2_picture_context *ctx_pic = h->cur_frame.hwaccel_picture_private;
275
276     if (!DXVA_CONTEXT_VALID(avctx, ctx))
277         return -1;
278     av_assert0(ctx_pic);
279
280     /* Fill up DXVA_PicParams_AV1 */
281     if (fill_picture_parameters(avctx, ctx, h, &ctx_pic->pp) < 0)
282         return -1;
283
284     ctx_pic->bitstream_size = 0;
285     ctx_pic->bitstream      = NULL;
286     return 0;
287 }
288
289 static int dxva2_av1_decode_slice(AVCodecContext *avctx,
290                                   const uint8_t *buffer,
291                                   uint32_t size)
292 {
293     const AV1DecContext *h = avctx->priv_data;
294     const AV1RawFrameHeader *frame_header = h->raw_frame_header;
295     struct av1_dxva2_picture_context *ctx_pic = h->cur_frame.hwaccel_picture_private;
296     struct AV1DXVAContext *ctx = avctx->internal->hwaccel_priv_data;
297     void *tmp;
298
299     ctx_pic->tile_count = frame_header->tile_cols * frame_header->tile_rows;
300
301     /* too many tiles, exceeding all defined levels in the AV1 spec */
302     if (ctx_pic->tile_count > MAX_TILES)
303         return AVERROR(ENOSYS);
304
305     /* Shortcut if all tiles are in the same buffer */
306     if (ctx_pic->tile_count == h->tg_end - h->tg_start + 1) {
307         ctx_pic->bitstream = (uint8_t *)buffer;
308         ctx_pic->bitstream_size = size;
309
310         for (uint32_t tile_num = 0; tile_num < ctx_pic->tile_count; tile_num++) {
311             ctx_pic->tiles[tile_num].DataOffset   = h->tile_group_info[tile_num].tile_offset;
312             ctx_pic->tiles[tile_num].DataSize     = h->tile_group_info[tile_num].tile_size;
313             ctx_pic->tiles[tile_num].row          = h->tile_group_info[tile_num].tile_row;
314             ctx_pic->tiles[tile_num].column       = h->tile_group_info[tile_num].tile_column;
315             ctx_pic->tiles[tile_num].anchor_frame = 0xFF;
316         }
317
318         return 0;
319     }
320
321     /* allocate an internal buffer */
322     tmp = av_fast_realloc(ctx->bitstream_cache, &ctx->bitstream_allocated,
323                           ctx_pic->bitstream_size + size);
324     if (!tmp) {
325         return AVERROR(ENOMEM);
326     }
327     ctx_pic->bitstream = ctx->bitstream_cache = tmp;
328
329     memcpy(ctx_pic->bitstream + ctx_pic->bitstream_size, buffer, size);
330
331     for (uint32_t tile_num = h->tg_start; tile_num <= h->tg_end; tile_num++) {
332         ctx_pic->tiles[tile_num].DataOffset   = ctx_pic->bitstream_size + h->tile_group_info[tile_num].tile_offset;
333         ctx_pic->tiles[tile_num].DataSize     = h->tile_group_info[tile_num].tile_size;
334         ctx_pic->tiles[tile_num].row          = h->tile_group_info[tile_num].tile_row;
335         ctx_pic->tiles[tile_num].column       = h->tile_group_info[tile_num].tile_column;
336         ctx_pic->tiles[tile_num].anchor_frame = 0xFF;
337     }
338
339     ctx_pic->bitstream_size += size;
340
341     return 0;
342 }
343
344 static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
345                                              DECODER_BUFFER_DESC *bs,
346                                              DECODER_BUFFER_DESC *sc)
347 {
348     const AV1DecContext *h = avctx->priv_data;
349     AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
350     struct av1_dxva2_picture_context *ctx_pic = h->cur_frame.hwaccel_picture_private;
351     void     *dxva_data_ptr;
352     uint8_t  *dxva_data;
353     unsigned dxva_size;
354     unsigned padding;
355     unsigned type;
356
357 #if CONFIG_D3D11VA
358     if (ff_dxva2_is_d3d11(avctx)) {
359         type = D3D11_VIDEO_DECODER_BUFFER_BITSTREAM;
360         if (FAILED(ID3D11VideoContext_GetDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context,
361                                                        D3D11VA_CONTEXT(ctx)->decoder,
362                                                        type,
363                                                        &dxva_size, &dxva_data_ptr)))
364             return -1;
365     }
366 #endif
367 #if CONFIG_DXVA2
368     if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
369         type = DXVA2_BitStreamDateBufferType;
370         if (FAILED(IDirectXVideoDecoder_GetBuffer(DXVA2_CONTEXT(ctx)->decoder,
371                                                   type,
372                                                   &dxva_data_ptr, &dxva_size)))
373             return -1;
374     }
375 #endif
376
377     dxva_data = dxva_data_ptr;
378
379     if (ctx_pic->bitstream_size > dxva_size) {
380         av_log(avctx, AV_LOG_ERROR, "Bitstream size exceeds hardware buffer");
381         return -1;
382     }
383
384     memcpy(dxva_data, ctx_pic->bitstream, ctx_pic->bitstream_size);
385
386     padding = FFMIN(128 - ((ctx_pic->bitstream_size) & 127), dxva_size - ctx_pic->bitstream_size);
387     if (padding > 0) {
388         memset(dxva_data + ctx_pic->bitstream_size, 0, padding);
389         ctx_pic->bitstream_size += padding;
390     }
391
392 #if CONFIG_D3D11VA
393     if (ff_dxva2_is_d3d11(avctx))
394         if (FAILED(ID3D11VideoContext_ReleaseDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context, D3D11VA_CONTEXT(ctx)->decoder, type)))
395             return -1;
396 #endif
397 #if CONFIG_DXVA2
398     if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD)
399         if (FAILED(IDirectXVideoDecoder_ReleaseBuffer(DXVA2_CONTEXT(ctx)->decoder, type)))
400             return -1;
401 #endif
402
403 #if CONFIG_D3D11VA
404     if (ff_dxva2_is_d3d11(avctx)) {
405         D3D11_VIDEO_DECODER_BUFFER_DESC *dsc11 = bs;
406         memset(dsc11, 0, sizeof(*dsc11));
407         dsc11->BufferType           = type;
408         dsc11->DataSize             = ctx_pic->bitstream_size;
409         dsc11->NumMBsInBuffer       = 0;
410
411         type = D3D11_VIDEO_DECODER_BUFFER_SLICE_CONTROL;
412     }
413 #endif
414 #if CONFIG_DXVA2
415     if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
416         DXVA2_DecodeBufferDesc *dsc2 = bs;
417         memset(dsc2, 0, sizeof(*dsc2));
418         dsc2->CompressedBufferType = type;
419         dsc2->DataSize             = ctx_pic->bitstream_size;
420         dsc2->NumMBsInBuffer       = 0;
421
422         type = DXVA2_SliceControlBufferType;
423     }
424 #endif
425
426     return ff_dxva2_commit_buffer(avctx, ctx, sc, type,
427                                   ctx_pic->tiles, sizeof(*ctx_pic->tiles) * ctx_pic->tile_count, 0);
428 }
429
430 static int dxva2_av1_end_frame(AVCodecContext *avctx)
431 {
432     const AV1DecContext *h = avctx->priv_data;
433     struct av1_dxva2_picture_context *ctx_pic = h->cur_frame.hwaccel_picture_private;
434     int ret;
435
436     if (ctx_pic->bitstream_size <= 0)
437         return -1;
438
439     ret = ff_dxva2_common_end_frame(avctx, h->cur_frame.tf.f,
440                                     &ctx_pic->pp, sizeof(ctx_pic->pp),
441                                     NULL, 0,
442                                     commit_bitstream_and_slice_buffer);
443
444     return ret;
445 }
446
447 static int dxva2_av1_uninit(AVCodecContext *avctx)
448 {
449     struct AV1DXVAContext *ctx = avctx->internal->hwaccel_priv_data;
450
451     av_freep(&ctx->bitstream_cache);
452     ctx->bitstream_allocated = 0;
453
454     return ff_dxva2_decode_uninit(avctx);
455 }
456
457 #if CONFIG_AV1_DXVA2_HWACCEL
458 const AVHWAccel ff_av1_dxva2_hwaccel = {
459     .name           = "av1_dxva2",
460     .type           = AVMEDIA_TYPE_VIDEO,
461     .id             = AV_CODEC_ID_AV1,
462     .pix_fmt        = AV_PIX_FMT_DXVA2_VLD,
463     .init           = ff_dxva2_decode_init,
464     .uninit         = dxva2_av1_uninit,
465     .start_frame    = dxva2_av1_start_frame,
466     .decode_slice   = dxva2_av1_decode_slice,
467     .end_frame      = dxva2_av1_end_frame,
468     .frame_params   = ff_dxva2_common_frame_params,
469     .frame_priv_data_size = sizeof(struct av1_dxva2_picture_context),
470     .priv_data_size = sizeof(struct AV1DXVAContext),
471 };
472 #endif
473
474 #if CONFIG_AV1_D3D11VA_HWACCEL
475 const AVHWAccel ff_av1_d3d11va_hwaccel = {
476     .name           = "av1_d3d11va",
477     .type           = AVMEDIA_TYPE_VIDEO,
478     .id             = AV_CODEC_ID_AV1,
479     .pix_fmt        = AV_PIX_FMT_D3D11VA_VLD,
480     .init           = ff_dxva2_decode_init,
481     .uninit         = dxva2_av1_uninit,
482     .start_frame    = dxva2_av1_start_frame,
483     .decode_slice   = dxva2_av1_decode_slice,
484     .end_frame      = dxva2_av1_end_frame,
485     .frame_params   = ff_dxva2_common_frame_params,
486     .frame_priv_data_size = sizeof(struct av1_dxva2_picture_context),
487     .priv_data_size = sizeof(struct AV1DXVAContext),
488 };
489 #endif
490
491 #if CONFIG_AV1_D3D11VA2_HWACCEL
492 const AVHWAccel ff_av1_d3d11va2_hwaccel = {
493     .name           = "av1_d3d11va2",
494     .type           = AVMEDIA_TYPE_VIDEO,
495     .id             = AV_CODEC_ID_AV1,
496     .pix_fmt        = AV_PIX_FMT_D3D11,
497     .init           = ff_dxva2_decode_init,
498     .uninit         = dxva2_av1_uninit,
499     .start_frame    = dxva2_av1_start_frame,
500     .decode_slice   = dxva2_av1_decode_slice,
501     .end_frame      = dxva2_av1_end_frame,
502     .frame_params   = ff_dxva2_common_frame_params,
503     .frame_priv_data_size = sizeof(struct av1_dxva2_picture_context),
504     .priv_data_size = sizeof(struct AV1DXVAContext),
505 };
506 #endif