X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fqsvdec.c;h=f543defb1821e756eede49041f4afc8dbfe02199;hb=a247ac640df3da573cd661065bf53f37863e2b46;hp=4a0be811fb92dba9d921b9c20c245658c43173bf;hpb=39278ff0de5b5e3397c22538978bffbb38ee099b;p=ffmpeg diff --git a/libavcodec/qsvdec.c b/libavcodec/qsvdec.c index 4a0be811fb9..f543defb182 100644 --- a/libavcodec/qsvdec.c +++ b/libavcodec/qsvdec.c @@ -21,27 +21,70 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#include #include #include #include #include "libavutil/common.h" +#include "libavutil/fifo.h" +#include "libavutil/frame.h" #include "libavutil/hwcontext.h" #include "libavutil/hwcontext_qsv.h" #include "libavutil/mem.h" #include "libavutil/log.h" -#include "libavutil/pixdesc.h" +#include "libavutil/opt.h" #include "libavutil/pixfmt.h" #include "libavutil/time.h" +#include "libavutil/imgutils.h" #include "avcodec.h" #include "internal.h" +#include "decode.h" +#include "hwconfig.h" #include "qsv.h" #include "qsv_internal.h" -#include "qsvdec.h" -const AVCodecHWConfigInternal *ff_qsv_hw_configs[] = { +typedef struct QSVContext { + // the session used for decoding + mfxSession session; + + // the session we allocated internally, in case the caller did not provide + // one + QSVSession internal_qs; + + QSVFramesContext frames_ctx; + + /** + * a linked list of frames currently being used by QSV + */ + QSVFrame *work_frames; + + AVFifoBuffer *async_fifo; + int zero_consume_run; + int buffered_count; + int reinit_flag; + + enum AVPixelFormat orig_pix_fmt; + uint32_t fourcc; + mfxFrameInfo frame_info; + AVBufferPool *pool; + + int initialized; + + // options set by the caller + int async_depth; + int iopattern; + int gpu_copy; + + char *load_plugins; + + mfxExtBuffer **ext_buffers; + int nb_ext_buffers; +} QSVContext; + +static const AVCodecHWConfigInternal *const qsv_hw_configs[] = { &(const AVCodecHWConfigInternal) { .public = { .pix_fmt = AV_PIX_FMT_QSV, @@ -54,17 +97,61 @@ const AVCodecHWConfigInternal *ff_qsv_hw_configs[] = { NULL }; +static int qsv_get_continuous_buffer(AVCodecContext *avctx, AVFrame *frame, + AVBufferPool *pool) +{ + int ret = 0; + + ff_decode_frame_props(avctx, frame); + + frame->width = avctx->width; + frame->height = avctx->height; + + switch (avctx->pix_fmt) { + case AV_PIX_FMT_NV12: + frame->linesize[0] = FFALIGN(avctx->width, 128); + break; + case AV_PIX_FMT_P010: + frame->linesize[0] = 2 * FFALIGN(avctx->width, 128); + break; + default: + av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format.\n"); + return AVERROR(EINVAL); + } + + frame->linesize[1] = frame->linesize[0]; + frame->buf[0] = av_buffer_pool_get(pool); + if (!frame->buf[0]) + return AVERROR(ENOMEM); + + frame->data[0] = frame->buf[0]->data; + frame->data[1] = frame->data[0] + + frame->linesize[0] * FFALIGN(avctx->height, 64); + + ret = ff_attach_decode_data(frame); + if (ret < 0) + return ret; + + return 0; +} + static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session, AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref) { int ret; + if (q->gpu_copy == MFX_GPUCOPY_ON && + !(q->iopattern & MFX_IOPATTERN_OUT_SYSTEM_MEMORY)) { + av_log(avctx, AV_LOG_WARNING, "GPU-accelerated memory copy " + "only works in system memory mode.\n"); + q->gpu_copy = MFX_GPUCOPY_OFF; + } if (session) { q->session = session; } else if (hw_frames_ref) { - if (q->internal_session) { - MFXClose(q->internal_session); - q->internal_session = NULL; + if (q->internal_qs.session) { + MFXClose(q->internal_qs.session); + q->internal_qs.session = NULL; } av_buffer_unref(&q->frames_ctx.hw_frames_ctx); @@ -72,36 +159,37 @@ static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession ses if (!q->frames_ctx.hw_frames_ctx) return AVERROR(ENOMEM); - ret = ff_qsv_init_session_frames(avctx, &q->internal_session, + ret = ff_qsv_init_session_frames(avctx, &q->internal_qs.session, &q->frames_ctx, q->load_plugins, - q->iopattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY); + q->iopattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY, + q->gpu_copy); if (ret < 0) { av_buffer_unref(&q->frames_ctx.hw_frames_ctx); return ret; } - q->session = q->internal_session; + q->session = q->internal_qs.session; } else if (hw_device_ref) { - if (q->internal_session) { - MFXClose(q->internal_session); - q->internal_session = NULL; + if (q->internal_qs.session) { + MFXClose(q->internal_qs.session); + q->internal_qs.session = NULL; } - ret = ff_qsv_init_session_device(avctx, &q->internal_session, - hw_device_ref, q->load_plugins); + ret = ff_qsv_init_session_device(avctx, &q->internal_qs.session, + hw_device_ref, q->load_plugins, q->gpu_copy); if (ret < 0) return ret; - q->session = q->internal_session; + q->session = q->internal_qs.session; } else { - if (!q->internal_session) { - ret = ff_qsv_init_internal_session(avctx, &q->internal_session, - q->load_plugins); + if (!q->internal_qs.session) { + ret = ff_qsv_init_internal_session(avctx, &q->internal_qs, + q->load_plugins, q->gpu_copy); if (ret < 0) return ret; } - q->session = q->internal_session; + q->session = q->internal_qs.session; } /* make sure the decoder is uninitialized */ @@ -120,19 +208,21 @@ static inline unsigned int qsv_fifo_size(const AVFifoBuffer* fifo) return av_fifo_size(fifo) / qsv_fifo_item_size(); } -static int qsv_decode_init(AVCodecContext *avctx, QSVContext *q) +static int qsv_decode_preinit(AVCodecContext *avctx, QSVContext *q, enum AVPixelFormat pix_fmt, mfxVideoParam *param) { - const AVPixFmtDescriptor *desc; mfxSession session = NULL; int iopattern = 0; - mfxVideoParam param = { 0 }; - int frame_width = avctx->coded_width; - int frame_height = avctx->coded_height; int ret; + enum AVPixelFormat pix_fmts[3] = { + AV_PIX_FMT_QSV, /* opaque format in case of video memory output */ + pix_fmt, /* system memory format obtained from bitstream parser */ + AV_PIX_FMT_NONE }; - desc = av_pix_fmt_desc_get(avctx->sw_pix_fmt); - if (!desc) - return AVERROR_BUG; + ret = ff_get_format(avctx, pix_fmts); + if (ret < 0) { + q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE; + return ret; + } if (!q->async_fifo) { q->async_fifo = av_fifo_alloc(q->async_depth * qsv_fifo_item_size()); @@ -164,54 +254,85 @@ static int qsv_decode_init(AVCodecContext *avctx, QSVContext *q) iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY; q->iopattern = iopattern; + ff_qsv_print_iopattern(avctx, q->iopattern, "Decoder"); + ret = qsv_init_session(avctx, q, session, avctx->hw_frames_ctx, avctx->hw_device_ctx); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Error initializing an MFX session\n"); return ret; } - ret = ff_qsv_codec_id_to_mfx(avctx->codec_id); - if (ret < 0) - return ret; + param->IOPattern = q->iopattern; + param->AsyncDepth = q->async_depth; + param->ExtParam = q->ext_buffers; + param->NumExtParam = q->nb_ext_buffers; - param.mfx.CodecId = ret; - param.mfx.CodecProfile = ff_qsv_profile_to_mfx(avctx->codec_id, avctx->profile); - param.mfx.CodecLevel = avctx->level == FF_LEVEL_UNKNOWN ? MFX_LEVEL_UNKNOWN : avctx->level; - - param.mfx.FrameInfo.BitDepthLuma = desc->comp[0].depth; - param.mfx.FrameInfo.BitDepthChroma = desc->comp[0].depth; - param.mfx.FrameInfo.Shift = desc->comp[0].depth > 8; - param.mfx.FrameInfo.FourCC = q->fourcc; - param.mfx.FrameInfo.Width = frame_width; - param.mfx.FrameInfo.Height = frame_height; - param.mfx.FrameInfo.ChromaFormat = MFX_CHROMAFORMAT_YUV420; - - switch (avctx->field_order) { - case AV_FIELD_PROGRESSIVE: - param.mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_PROGRESSIVE; - break; - case AV_FIELD_TT: - param.mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_FIELD_TFF; - break; - case AV_FIELD_BB: - param.mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_FIELD_BFF; - break; - default: - param.mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_UNKNOWN; - break; - } + return 0; + } + +static int qsv_decode_init_context(AVCodecContext *avctx, QSVContext *q, mfxVideoParam *param) +{ + int ret; - param.IOPattern = q->iopattern; - param.AsyncDepth = q->async_depth; - param.ExtParam = q->ext_buffers; - param.NumExtParam = q->nb_ext_buffers; + avctx->width = param->mfx.FrameInfo.CropW; + avctx->height = param->mfx.FrameInfo.CropH; + avctx->coded_width = param->mfx.FrameInfo.Width; + avctx->coded_height = param->mfx.FrameInfo.Height; + avctx->level = param->mfx.CodecLevel; + avctx->profile = param->mfx.CodecProfile; + avctx->field_order = ff_qsv_map_picstruct(param->mfx.FrameInfo.PicStruct); + avctx->pix_fmt = ff_qsv_map_fourcc(param->mfx.FrameInfo.FourCC); - ret = MFXVideoDECODE_Init(q->session, ¶m); + ret = MFXVideoDECODE_Init(q->session, param); if (ret < 0) return ff_qsv_print_error(avctx, ret, "Error initializing the MFX video decoder"); - q->frame_info = param.mfx.FrameInfo; + q->frame_info = param->mfx.FrameInfo; + + if (!avctx->hw_frames_ctx) + q->pool = av_buffer_pool_init(av_image_get_buffer_size(avctx->pix_fmt, + FFALIGN(avctx->width, 128), FFALIGN(avctx->height, 64), 1), av_buffer_allocz); + return 0; +} + +static int qsv_decode_header(AVCodecContext *avctx, QSVContext *q, + const AVPacket *avpkt, enum AVPixelFormat pix_fmt, + mfxVideoParam *param) +{ + int ret; + + mfxBitstream bs = { 0 }; + + if (avpkt->size) { + bs.Data = avpkt->data; + bs.DataLength = avpkt->size; + bs.MaxLength = bs.DataLength; + bs.TimeStamp = avpkt->pts; + if (avctx->field_order == AV_FIELD_PROGRESSIVE) + bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME; + } else + return AVERROR_INVALIDDATA; + + + if(!q->session) { + ret = qsv_decode_preinit(avctx, q, pix_fmt, param); + if (ret < 0) + return ret; + } + + ret = ff_qsv_codec_id_to_mfx(avctx->codec_id); + if (ret < 0) + return ret; + + param->mfx.CodecId = ret; + ret = MFXVideoDECODE_DecodeHeader(q->session, &bs, param); + if (MFX_ERR_MORE_DATA == ret) { + return AVERROR(EAGAIN); + } + if (ret < 0) + return ff_qsv_print_error(avctx, ret, + "Error decoding stream header"); return 0; } @@ -220,7 +341,11 @@ static int alloc_frame(AVCodecContext *avctx, QSVContext *q, QSVFrame *frame) { int ret; - ret = ff_get_buffer(avctx, frame->frame, AV_GET_BUFFER_FLAG_REF); + if (q->pool) + ret = qsv_get_continuous_buffer(avctx, frame->frame, q->pool); + else + ret = ff_get_buffer(avctx, frame->frame, AV_GET_BUFFER_FLAG_REF); + if (ret < 0) return ret; @@ -318,7 +443,7 @@ static QSVFrame *find_frame(QSVContext *q, mfxFrameSurface1 *surf) static int qsv_decode(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, - AVPacket *avpkt) + const AVPacket *avpkt) { QSVFrame *out_frame; mfxFrameSurface1 *insurf; @@ -419,11 +544,6 @@ static int qsv_decode(AVCodecContext *avctx, QSVContext *q, outsurf = &out_frame->surface; -#if FF_API_PKT_PTS -FF_DISABLE_DEPRECATION_WARNINGS - frame->pkt_pts = outsurf->Data.TimeStamp; -FF_ENABLE_DEPRECATION_WARNINGS -#endif frame->pts = outsurf->Data.TimeStamp; frame->repeat_pict = @@ -449,7 +569,7 @@ FF_ENABLE_DEPRECATION_WARNINGS return bs.DataOffset; } -int ff_qsv_decode_close(QSVContext *q) +static void qsv_decode_close_qsvcontext(QSVContext *q) { QSVFrame *cur = q->work_frames; @@ -476,60 +596,40 @@ int ff_qsv_decode_close(QSVContext *q) av_fifo_free(q->async_fifo); q->async_fifo = NULL; - av_parser_close(q->parser); - avcodec_free_context(&q->avctx_internal); - - if (q->internal_session) - MFXClose(q->internal_session); + ff_qsv_close_internal_session(&q->internal_qs); av_buffer_unref(&q->frames_ctx.hw_frames_ctx); av_buffer_unref(&q->frames_ctx.mids_buf); - - return 0; + av_buffer_pool_uninit(&q->pool); } -int ff_qsv_process_data(AVCodecContext *avctx, QSVContext *q, - AVFrame *frame, int *got_frame, AVPacket *pkt) +static int qsv_process_data(AVCodecContext *avctx, QSVContext *q, + AVFrame *frame, int *got_frame, const AVPacket *pkt) { - uint8_t *dummy_data; - int dummy_size; int ret; - const AVPixFmtDescriptor *desc; - - if (!q->avctx_internal) { - q->avctx_internal = avcodec_alloc_context3(NULL); - if (!q->avctx_internal) - return AVERROR(ENOMEM); - - q->avctx_internal->codec_id = avctx->codec_id; - - q->parser = av_parser_init(avctx->codec_id); - if (!q->parser) - return AVERROR(ENOMEM); - - q->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; - q->orig_pix_fmt = AV_PIX_FMT_NONE; - } + mfxVideoParam param = { 0 }; + enum AVPixelFormat pix_fmt = AV_PIX_FMT_NV12; if (!pkt->size) return qsv_decode(avctx, q, frame, got_frame, pkt); - /* we assume the packets are already split properly and want - * just the codec parameters here */ - av_parser_parse2(q->parser, q->avctx_internal, - &dummy_data, &dummy_size, - pkt->data, pkt->size, pkt->pts, pkt->dts, - pkt->pos); - - avctx->field_order = q->parser->field_order; /* TODO: flush delayed frames on reinit */ - if (q->parser->format != q->orig_pix_fmt || - FFALIGN(q->parser->coded_width, 16) != FFALIGN(avctx->coded_width, 16) || - FFALIGN(q->parser->coded_height, 16) != FFALIGN(avctx->coded_height, 16)) { - enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_QSV, - AV_PIX_FMT_NONE, - AV_PIX_FMT_NONE }; - enum AVPixelFormat qsv_format; + + // sw_pix_fmt, coded_width/height should be set for ff_get_format(), + // assume sw_pix_fmt is NV12 and coded_width/height to be 1280x720, + // the assumption may be not corret but will be updated after header decoded if not true. + if (q->orig_pix_fmt != AV_PIX_FMT_NONE) + pix_fmt = q->orig_pix_fmt; + if (!avctx->coded_width) + avctx->coded_width = 1280; + if (!avctx->coded_height) + avctx->coded_height = 720; + + ret = qsv_decode_header(avctx, q, pkt, pix_fmt, ¶m); + + if (ret >= 0 && (q->orig_pix_fmt != ff_qsv_map_fourcc(param.mfx.FrameInfo.FourCC) || + avctx->coded_width != param.mfx.FrameInfo.Width || + avctx->coded_height != param.mfx.FrameInfo.Height)) { AVPacket zero_pkt = {0}; if (q->buffered_count) { @@ -538,55 +638,275 @@ int ff_qsv_process_data(AVCodecContext *avctx, QSVContext *q, q->buffered_count--; return qsv_decode(avctx, q, frame, got_frame, &zero_pkt); } - q->reinit_flag = 0; - qsv_format = ff_qsv_map_pixfmt(q->parser->format, &q->fourcc); - if (qsv_format < 0) { - av_log(avctx, AV_LOG_ERROR, - "Decoding pixel format '%s' is not supported\n", - av_get_pix_fmt_name(q->parser->format)); - ret = AVERROR(ENOSYS); - goto reinit_fail; - } + q->orig_pix_fmt = avctx->pix_fmt = pix_fmt = ff_qsv_map_fourcc(param.mfx.FrameInfo.FourCC); - q->orig_pix_fmt = q->parser->format; - avctx->pix_fmt = pix_fmts[1] = qsv_format; - avctx->width = q->parser->width; - avctx->height = q->parser->height; - avctx->coded_width = FFALIGN(q->parser->coded_width, 16); - avctx->coded_height = FFALIGN(q->parser->coded_height, 16); - avctx->level = q->avctx_internal->level; - avctx->profile = q->avctx_internal->profile; + avctx->coded_width = param.mfx.FrameInfo.Width; + avctx->coded_height = param.mfx.FrameInfo.Height; - ret = ff_get_format(avctx, pix_fmts); + ret = qsv_decode_preinit(avctx, q, pix_fmt, ¶m); if (ret < 0) goto reinit_fail; + q->initialized = 0; + } - avctx->pix_fmt = ret; - - desc = av_pix_fmt_desc_get(avctx->pix_fmt); - if (!desc) - goto reinit_fail; - - if (desc->comp[0].depth > 8) { - avctx->coded_width = FFALIGN(q->parser->coded_width, 32); - avctx->coded_height = FFALIGN(q->parser->coded_height, 32); - } - - ret = qsv_decode_init(avctx, q); + if (!q->initialized) { + ret = qsv_decode_init_context(avctx, q, ¶m); if (ret < 0) goto reinit_fail; + q->initialized = 1; } return qsv_decode(avctx, q, frame, got_frame, pkt); reinit_fail: - q->orig_pix_fmt = q->parser->format = avctx->pix_fmt = AV_PIX_FMT_NONE; + q->orig_pix_fmt = avctx->pix_fmt = AV_PIX_FMT_NONE; return ret; } -void ff_qsv_decode_flush(AVCodecContext *avctx, QSVContext *q) +enum LoadPlugin { + LOAD_PLUGIN_NONE, + LOAD_PLUGIN_HEVC_SW, + LOAD_PLUGIN_HEVC_HW, +}; + +typedef struct QSVDecContext { + AVClass *class; + QSVContext qsv; + + int load_plugin; + + AVFifoBuffer *packet_fifo; + + AVPacket buffer_pkt; +} QSVDecContext; + +static void qsv_clear_buffers(QSVDecContext *s) { - q->orig_pix_fmt = AV_PIX_FMT_NONE; + AVPacket pkt; + while (av_fifo_size(s->packet_fifo) >= sizeof(pkt)) { + av_fifo_generic_read(s->packet_fifo, &pkt, sizeof(pkt), NULL); + av_packet_unref(&pkt); + } + + av_packet_unref(&s->buffer_pkt); } + +static av_cold int qsv_decode_close(AVCodecContext *avctx) +{ + QSVDecContext *s = avctx->priv_data; + + av_freep(&s->qsv.load_plugins); + + qsv_decode_close_qsvcontext(&s->qsv); + + qsv_clear_buffers(s); + + av_fifo_free(s->packet_fifo); + + return 0; +} + +static av_cold int qsv_decode_init(AVCodecContext *avctx) +{ + QSVDecContext *s = avctx->priv_data; + int ret; + const char *uid = NULL; + + if (avctx->codec_id == AV_CODEC_ID_VP8) { + uid = "f622394d8d87452f878c51f2fc9b4131"; + } else if (avctx->codec_id == AV_CODEC_ID_VP9) { + uid = "a922394d8d87452f878c51f2fc9b4131"; + } + else if (avctx->codec_id == AV_CODEC_ID_HEVC && s->load_plugin != LOAD_PLUGIN_NONE) { + static const char * const uid_hevcdec_sw = "15dd936825ad475ea34e35f3f54217a6"; + static const char * const uid_hevcdec_hw = "33a61c0b4c27454ca8d85dde757c6f8e"; + + if (s->qsv.load_plugins[0]) { + av_log(avctx, AV_LOG_WARNING, + "load_plugins is not empty, but load_plugin is not set to 'none'." + "The load_plugin value will be ignored.\n"); + } else { + if (s->load_plugin == LOAD_PLUGIN_HEVC_SW) + uid = uid_hevcdec_sw; + else + uid = uid_hevcdec_hw; + } + } + if (uid) { + av_freep(&s->qsv.load_plugins); + s->qsv.load_plugins = av_strdup(uid); + if (!s->qsv.load_plugins) + return AVERROR(ENOMEM); + } + + s->qsv.orig_pix_fmt = AV_PIX_FMT_NV12; + s->packet_fifo = av_fifo_alloc(sizeof(AVPacket)); + if (!s->packet_fifo) { + ret = AVERROR(ENOMEM); + goto fail; + } + + return 0; +fail: + qsv_decode_close(avctx); + return ret; +} + +static int qsv_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame, AVPacket *avpkt) +{ + QSVDecContext *s = avctx->priv_data; + AVFrame *frame = data; + int ret; + + /* buffer the input packet */ + if (avpkt->size) { + AVPacket input_ref; + + if (av_fifo_space(s->packet_fifo) < sizeof(input_ref)) { + ret = av_fifo_realloc2(s->packet_fifo, + av_fifo_size(s->packet_fifo) + sizeof(input_ref)); + if (ret < 0) + return ret; + } + + ret = av_packet_ref(&input_ref, avpkt); + if (ret < 0) + return ret; + av_fifo_generic_write(s->packet_fifo, &input_ref, sizeof(input_ref), NULL); + } + + /* process buffered data */ + while (!*got_frame) { + /* prepare the input data */ + if (s->buffer_pkt.size <= 0) { + /* no more data */ + if (av_fifo_size(s->packet_fifo) < sizeof(AVPacket)) + return avpkt->size ? avpkt->size : qsv_process_data(avctx, &s->qsv, frame, got_frame, avpkt); + /* in progress of reinit, no read from fifo and keep the buffer_pkt */ + if (!s->qsv.reinit_flag) { + av_packet_unref(&s->buffer_pkt); + av_fifo_generic_read(s->packet_fifo, &s->buffer_pkt, sizeof(s->buffer_pkt), NULL); + } + } + + ret = qsv_process_data(avctx, &s->qsv, frame, got_frame, &s->buffer_pkt); + if (ret < 0){ + /* Drop buffer_pkt when failed to decode the packet. Otherwise, + the decoder will keep decoding the failure packet. */ + av_packet_unref(&s->buffer_pkt); + return ret; + } + if (s->qsv.reinit_flag) + continue; + + s->buffer_pkt.size -= ret; + s->buffer_pkt.data += ret; + } + + return avpkt->size; +} + +static void qsv_decode_flush(AVCodecContext *avctx) +{ + QSVDecContext *s = avctx->priv_data; + + qsv_clear_buffers(s); + + s->qsv.orig_pix_fmt = AV_PIX_FMT_NONE; + s->qsv.initialized = 0; +} + +#define OFFSET(x) offsetof(QSVDecContext, x) +#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM + +#define DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, opt) \ +static const AVClass x##_qsv_class = { \ + .class_name = #x "_qsv", \ + .item_name = av_default_item_name, \ + .option = opt, \ + .version = LIBAVUTIL_VERSION_INT, \ +}; \ +const AVCodec ff_##x##_qsv_decoder = { \ + .name = #x "_qsv", \ + .long_name = NULL_IF_CONFIG_SMALL(#X " video (Intel Quick Sync Video acceleration)"), \ + .priv_data_size = sizeof(QSVDecContext), \ + .type = AVMEDIA_TYPE_VIDEO, \ + .id = AV_CODEC_ID_##X, \ + .init = qsv_decode_init, \ + .decode = qsv_decode_frame, \ + .flush = qsv_decode_flush, \ + .close = qsv_decode_close, \ + .bsfs = bsf_name, \ + .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HYBRID, \ + .priv_class = &x##_qsv_class, \ + .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12, \ + AV_PIX_FMT_P010, \ + AV_PIX_FMT_QSV, \ + AV_PIX_FMT_NONE }, \ + .hw_configs = qsv_hw_configs, \ + .wrapper_name = "qsv", \ +}; \ + +#define DEFINE_QSV_DECODER(x, X, bsf_name) DEFINE_QSV_DECODER_WITH_OPTION(x, X, bsf_name, options) + +#if CONFIG_HEVC_QSV_DECODER +static const AVOption hevc_options[] = { + { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 1, INT_MAX, VD }, + + { "load_plugin", "A user plugin to load in an internal session", OFFSET(load_plugin), AV_OPT_TYPE_INT, { .i64 = LOAD_PLUGIN_HEVC_HW }, LOAD_PLUGIN_NONE, LOAD_PLUGIN_HEVC_HW, VD, "load_plugin" }, + { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_NONE }, 0, 0, VD, "load_plugin" }, + { "hevc_sw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_SW }, 0, 0, VD, "load_plugin" }, + { "hevc_hw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LOAD_PLUGIN_HEVC_HW }, 0, 0, VD, "load_plugin" }, + + { "load_plugins", "A :-separate list of hexadecimal plugin UIDs to load in an internal session", + OFFSET(qsv.load_plugins), AV_OPT_TYPE_STRING, { .str = "" }, 0, 0, VD }, + + { "gpu_copy", "A GPU-accelerated copy between video and system memory", OFFSET(qsv.gpu_copy), AV_OPT_TYPE_INT, { .i64 = MFX_GPUCOPY_DEFAULT }, MFX_GPUCOPY_DEFAULT, MFX_GPUCOPY_OFF, VD, "gpu_copy"}, + { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_DEFAULT }, 0, 0, VD, "gpu_copy"}, + { "on", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_ON }, 0, 0, VD, "gpu_copy"}, + { "off", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_OFF }, 0, 0, VD, "gpu_copy"}, + { NULL }, +}; +DEFINE_QSV_DECODER_WITH_OPTION(hevc, HEVC, "hevc_mp4toannexb", hevc_options) +#endif + +static const AVOption options[] = { + { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 1, INT_MAX, VD }, + + { "gpu_copy", "A GPU-accelerated copy between video and system memory", OFFSET(qsv.gpu_copy), AV_OPT_TYPE_INT, { .i64 = MFX_GPUCOPY_DEFAULT }, MFX_GPUCOPY_DEFAULT, MFX_GPUCOPY_OFF, VD, "gpu_copy"}, + { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_DEFAULT }, 0, 0, VD, "gpu_copy"}, + { "on", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_ON }, 0, 0, VD, "gpu_copy"}, + { "off", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MFX_GPUCOPY_OFF }, 0, 0, VD, "gpu_copy"}, + { NULL }, +}; + +#if CONFIG_H264_QSV_DECODER +DEFINE_QSV_DECODER(h264, H264, "h264_mp4toannexb") +#endif + +#if CONFIG_MPEG2_QSV_DECODER +DEFINE_QSV_DECODER(mpeg2, MPEG2VIDEO, NULL) +#endif + +#if CONFIG_VC1_QSV_DECODER +DEFINE_QSV_DECODER(vc1, VC1, NULL) +#endif + +#if CONFIG_MJPEG_QSV_DECODER +DEFINE_QSV_DECODER(mjpeg, MJPEG, NULL) +#endif + +#if CONFIG_VP8_QSV_DECODER +DEFINE_QSV_DECODER(vp8, VP8, NULL) +#endif + +#if CONFIG_VP9_QSV_DECODER +DEFINE_QSV_DECODER(vp9, VP9, NULL) +#endif + +#if CONFIG_AV1_QSV_DECODER +DEFINE_QSV_DECODER(av1, AV1, NULL) +#endif