#include "qsv_internal.h"
#include "qsvdec.h"
+const AVCodecHWConfigInternal *ff_qsv_hw_configs[] = {
+ &(const AVCodecHWConfigInternal) {
+ .public = {
+ .pix_fmt = AV_PIX_FMT_QSV,
+ .methods = AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX |
+ AV_CODEC_HW_CONFIG_METHOD_AD_HOC,
+ .device_type = AV_HWDEVICE_TYPE_QSV,
+ },
+ .hwaccel = NULL,
+ },
+ NULL
+};
+
static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session,
- AVBufferRef *hw_frames_ref)
+ AVBufferRef *hw_frames_ref, AVBufferRef *hw_device_ref)
{
int ret;
if (!q->frames_ctx.hw_frames_ctx)
return AVERROR(ENOMEM);
- ret = ff_qsv_init_session_hwcontext(avctx, &q->internal_session,
- &q->frames_ctx, q->load_plugins,
- q->iopattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY);
+ ret = ff_qsv_init_session_frames(avctx, &q->internal_session,
+ &q->frames_ctx, q->load_plugins,
+ q->iopattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY);
if (ret < 0) {
av_buffer_unref(&q->frames_ctx.hw_frames_ctx);
return ret;
}
+ q->session = q->internal_session;
+ } else if (hw_device_ref) {
+ if (q->internal_session) {
+ MFXClose(q->internal_session);
+ q->internal_session = NULL;
+ }
+
+ ret = ff_qsv_init_session_device(avctx, &q->internal_session,
+ hw_device_ref, q->load_plugins);
+ if (ret < 0)
+ return ret;
+
q->session = q->internal_session;
} else {
if (!q->internal_session) {
return 0;
}
+static inline unsigned int qsv_fifo_item_size(void)
+{
+ return sizeof(mfxSyncPoint*) + sizeof(QSVFrame*);
+}
+
+static inline unsigned int qsv_fifo_size(const AVFifoBuffer* fifo)
+{
+ return av_fifo_size(fifo) / qsv_fifo_item_size();
+}
+
static int qsv_decode_init(AVCodecContext *avctx, QSVContext *q)
{
const AVPixFmtDescriptor *desc;
mfxSession session = NULL;
int iopattern = 0;
- mfxVideoParam param = { { 0 } };
+ mfxVideoParam param = { 0 };
int frame_width = avctx->coded_width;
int frame_height = avctx->coded_height;
int ret;
return AVERROR_BUG;
if (!q->async_fifo) {
- q->async_fifo = av_fifo_alloc((1 + q->async_depth) *
- (sizeof(mfxSyncPoint*) + sizeof(QSVFrame*)));
+ q->async_fifo = av_fifo_alloc(q->async_depth * qsv_fifo_item_size());
if (!q->async_fifo)
return AVERROR(ENOMEM);
}
else if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
}
-
- frame_width = frames_hwctx->surfaces[0].Info.Width;
- frame_height = frames_hwctx->surfaces[0].Info.Height;
}
if (!iopattern)
iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
q->iopattern = iopattern;
- ret = qsv_init_session(avctx, q, session, avctx->hw_frames_ctx);
+ ret = qsv_init_session(avctx, q, session, avctx->hw_frames_ctx, avctx->hw_device_ctx);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Error initializing an MFX session\n");
return ret;
frame->surface.Data.MemId = &q->frames_ctx.mids[ret];
}
+ frame->surface.Data.ExtParam = &frame->ext_param;
+ frame->surface.Data.NumExtParam = 1;
+ frame->ext_param = (mfxExtBuffer*)&frame->dec_info;
+ frame->dec_info.Header.BufferId = MFX_EXTBUFF_DECODED_FRAME_INFO;
+ frame->dec_info.Header.BufferSz = sizeof(frame->dec_info);
frame->used = 1;
bs.DataLength = avpkt->size;
bs.MaxLength = bs.DataLength;
bs.TimeStamp = avpkt->pts;
+ if (avctx->field_order == AV_FIELD_PROGRESSIVE)
+ bs.DataFlag |= MFX_BITSTREAM_COMPLETE_FRAME;
}
sync = av_mallocz(sizeof(*sync));
do {
ret = get_surface(avctx, q, &insurf);
- if (ret < 0)
+ if (ret < 0) {
+ av_freep(&sync);
return ret;
+ }
ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
insurf, &outsurf, sync);
av_freep(&sync);
}
- if (!av_fifo_space(q->async_fifo) ||
+ if ((qsv_fifo_size(q->async_fifo) >= q->async_depth) ||
(!avpkt->size && av_fifo_size(q->async_fifo))) {
AVFrame *src_frame;
av_fifo_generic_read(q->async_fifo, &sync, sizeof(sync), NULL);
out_frame->queued = 0;
- do {
- ret = MFXVideoCORE_SyncOperation(q->session, *sync, 1000);
- } while (ret == MFX_WRN_IN_EXECUTION);
+ if (avctx->pix_fmt != AV_PIX_FMT_QSV) {
+ do {
+ ret = MFXVideoCORE_SyncOperation(q->session, *sync, 1000);
+ } while (ret == MFX_WRN_IN_EXECUTION);
+ }
av_freep(&sync);
outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
frame->interlaced_frame =
!(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);
+ frame->pict_type = ff_qsv_map_pictype(out_frame->dec_info.FrameType);
+ //Key frame is IDR frame is only suitable for H264. For HEVC, IRAPs are key frames.
+ if (avctx->codec_id == AV_CODEC_ID_H264)
+ frame->key_frame = !!(out_frame->dec_info.FrameType & MFX_FRAMETYPE_IDR);
/* update the surface properties */
if (avctx->pix_fmt == AV_PIX_FMT_QSV)
uint8_t *dummy_data;
int dummy_size;
int ret;
+ const AVPixFmtDescriptor *desc;
if (!q->avctx_internal) {
q->avctx_internal = avcodec_alloc_context3(NULL);
pkt->data, pkt->size, pkt->pts, pkt->dts,
pkt->pos);
+ avctx->field_order = q->parser->field_order;
/* TODO: flush delayed frames on reinit */
if (q->parser->format != q->orig_pix_fmt ||
- q->parser->coded_width != avctx->coded_width ||
- q->parser->coded_height != avctx->coded_height) {
+ FFALIGN(q->parser->coded_width, 16) != FFALIGN(avctx->coded_width, 16) ||
+ FFALIGN(q->parser->coded_height, 16) != FFALIGN(avctx->coded_height, 16)) {
enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_QSV,
AV_PIX_FMT_NONE,
AV_PIX_FMT_NONE };
avctx->pix_fmt = pix_fmts[1] = qsv_format;
avctx->width = q->parser->width;
avctx->height = q->parser->height;
- avctx->coded_width = q->parser->coded_width;
- avctx->coded_height = q->parser->coded_height;
- avctx->field_order = q->parser->field_order;
+ avctx->coded_width = FFALIGN(q->parser->coded_width, 16);
+ avctx->coded_height = FFALIGN(q->parser->coded_height, 16);
avctx->level = q->avctx_internal->level;
avctx->profile = q->avctx_internal->profile;
avctx->pix_fmt = ret;
+ desc = av_pix_fmt_desc_get(avctx->pix_fmt);
+ if (!desc)
+ goto reinit_fail;
+
+ if (desc->comp[0].depth > 8) {
+ avctx->coded_width = FFALIGN(q->parser->coded_width, 32);
+ avctx->coded_height = FFALIGN(q->parser->coded_height, 32);
+ }
+
ret = qsv_decode_init(avctx, q);
if (ret < 0)
goto reinit_fail;