#include "pixdesc.h"
#include "time.h"
+#define QSV_VERSION_ATLEAST(MAJOR, MINOR) \
+ (MFX_VERSION_MAJOR > (MAJOR) || \
+ MFX_VERSION_MAJOR == (MAJOR) && MFX_VERSION_MINOR >= (MINOR))
+
typedef struct QSVDevicePriv {
AVBufferRef *child_device_ctx;
} QSVDevicePriv;
uint32_t fourcc;
} supported_pixel_formats[] = {
{ AV_PIX_FMT_NV12, MFX_FOURCC_NV12 },
+ { AV_PIX_FMT_BGRA, MFX_FOURCC_RGB4 },
{ AV_PIX_FMT_P010, MFX_FOURCC_P010 },
{ AV_PIX_FMT_PAL8, MFX_FOURCC_P8 },
+#if CONFIG_VAAPI
+ { AV_PIX_FMT_YUYV422,
+ MFX_FOURCC_YUY2 },
+#if QSV_VERSION_ATLEAST(1, 27)
+ { AV_PIX_FMT_Y210,
+ MFX_FOURCC_Y210 },
+#endif
+#endif
};
static uint32_t qsv_fourcc_from_pix_fmt(enum AVPixelFormat pix_fmt)
{
}
-static AVBufferRef *qsv_pool_alloc(void *opaque, int size)
+static AVBufferRef *qsv_pool_alloc(void *opaque, size_t size)
{
AVHWFramesContext *ctx = (AVHWFramesContext*)opaque;
QSVFramesContext *s = ctx->internal->priv;
!(req->Type & (MFX_MEMTYPE_FROM_VPPIN | MFX_MEMTYPE_FROM_VPPOUT)) ||
!(req->Type & MFX_MEMTYPE_EXTERNAL_FRAME))
return MFX_ERR_UNSUPPORTED;
- if (i->Width != i1->Width || i->Height != i1->Height ||
+ if (i->Width > i1->Width || i->Height > i1->Height ||
i->FourCC != i1->FourCC || i->ChromaFormat != i1->ChromaFormat) {
av_log(ctx, AV_LOG_ERROR, "Mismatching surface properties in an "
"allocation request: %dx%d %d %d vs %dx%d %d %d\n",
return ret;
}
+static int map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface)
+{
+ switch (frame->format) {
+ case AV_PIX_FMT_NV12:
+ case AV_PIX_FMT_P010:
+ surface->Data.Y = frame->data[0];
+ surface->Data.UV = frame->data[1];
+ break;
+
+ case AV_PIX_FMT_YUV420P:
+ surface->Data.Y = frame->data[0];
+ surface->Data.U = frame->data[1];
+ surface->Data.V = frame->data[2];
+ break;
+
+ case AV_PIX_FMT_BGRA:
+ surface->Data.B = frame->data[0];
+ surface->Data.G = frame->data[0] + 1;
+ surface->Data.R = frame->data[0] + 2;
+ surface->Data.A = frame->data[0] + 3;
+ break;
+#if CONFIG_VAAPI
+ case AV_PIX_FMT_YUYV422:
+ surface->Data.Y = frame->data[0];
+ surface->Data.U = frame->data[0] + 1;
+ surface->Data.V = frame->data[0] + 3;
+ break;
+
+ case AV_PIX_FMT_Y210:
+ surface->Data.Y16 = (mfxU16 *)frame->data[0];
+ surface->Data.U16 = (mfxU16 *)frame->data[0] + 1;
+ surface->Data.V16 = (mfxU16 *)frame->data[0] + 3;
+ break;
+#endif
+ default:
+ return MFX_ERR_UNSUPPORTED;
+ }
+ surface->Data.Pitch = frame->linesize[0];
+ surface->Data.TimeStamp = frame->pts;
+
+ return 0;
+}
+
static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,
const AVFrame *src)
{
}
out.Info = in->Info;
- out.Data.PitchLow = dst->linesize[0];
- out.Data.Y = dst->data[0];
- out.Data.U = dst->data[1];
- out.Data.V = dst->data[2];
- out.Data.A = dst->data[3];
+ map_frame_to_surface(dst, &out);
do {
err = MFXVideoVPP_RunFrameVPPAsync(s->session_download, in, &out, NULL, &sync);
mfxSyncPoint sync = NULL;
mfxStatus err;
- int ret;
+ int ret = 0;
+ /* make a copy if the input is not padded as libmfx requires */
+ AVFrame tmp_frame;
+ const AVFrame *src_frame;
+ int realigned = 0;
+
while (!s->session_upload_init && !s->session_upload && !ret) {
#if HAVE_PTHREADS
if (ret < 0)
return ret;
+ if (src->height & 15 || src->linesize[0] & 15) {
+ realigned = 1;
+ memset(&tmp_frame, 0, sizeof(tmp_frame));
+ tmp_frame.format = src->format;
+ tmp_frame.width = FFALIGN(src->width, 16);
+ tmp_frame.height = FFALIGN(src->height, 16);
+ ret = av_frame_get_buffer(&tmp_frame, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = av_frame_copy(&tmp_frame, src);
+ if (ret < 0) {
+ av_frame_unref(&tmp_frame);
+ return ret;
+ }
+ }
+
+ src_frame = realigned ? &tmp_frame : src;
+
if (!s->session_upload) {
if (s->child_frames_ref)
- return qsv_transfer_data_child(ctx, dst, src);
+ return qsv_transfer_data_child(ctx, dst, src_frame);
av_log(ctx, AV_LOG_ERROR, "Surface upload not possible\n");
return AVERROR(ENOSYS);
}
in.Info = out->Info;
- in.Data.PitchLow = src->linesize[0];
- in.Data.Y = src->data[0];
- in.Data.U = src->data[1];
- in.Data.V = src->data[2];
- in.Data.A = src->data[3];
+ map_frame_to_surface(src_frame, &in);
do {
err = MFXVideoVPP_RunFrameVPPAsync(s->session_upload, &in, out, NULL, &sync);
return AVERROR_UNKNOWN;
}
+ if (realigned)
+ av_frame_unref(&tmp_frame);
+
return 0;
}
goto fail;
}
- ret = MFXQueryVersion(hwctx->session,&ver);
- if (ret == MFX_ERR_NONE) {
- av_log(ctx, AV_LOG_VERBOSE, "MFX compile/runtime API: %d.%d/%d.%d\n",
- MFX_VERSION_MAJOR, MFX_VERSION_MINOR, ver.Major, ver.Minor);
- }
return 0;
fail:
}
static int qsv_device_derive(AVHWDeviceContext *ctx,
- AVHWDeviceContext *child_device_ctx, int flags)
+ AVHWDeviceContext *child_device_ctx,
+ AVDictionary *opts, int flags)
{
return qsv_device_derive_from_child(ctx, MFX_IMPL_HARDWARE_ANY,
child_device_ctx, flags);
QSVDevicePriv *priv;
enum AVHWDeviceType child_device_type;
AVHWDeviceContext *child_device;
+ AVDictionary *child_device_opts;
AVDictionaryEntry *e;
mfxIMPL impl;
e = av_dict_get(opts, "child_device", NULL, 0);
- if (CONFIG_VAAPI)
+ child_device_opts = NULL;
+ if (CONFIG_VAAPI) {
child_device_type = AV_HWDEVICE_TYPE_VAAPI;
- else if (CONFIG_DXVA2)
+ // libmfx does not actually implement VAAPI properly, rather it
+ // depends on the specific behaviour of a matching iHD driver when
+ // used on recent Intel hardware. Set options to the VAAPI device
+ // creation so that we should pick a usable setup by default if
+ // possible, even when multiple devices and drivers are available.
+ av_dict_set(&child_device_opts, "kernel_driver", "i915", 0);
+ av_dict_set(&child_device_opts, "driver", "iHD", 0);
+ } else if (CONFIG_DXVA2)
child_device_type = AV_HWDEVICE_TYPE_DXVA2;
else {
av_log(ctx, AV_LOG_ERROR, "No supported child device type is enabled\n");
}
ret = av_hwdevice_ctx_create(&priv->child_device_ctx, child_device_type,
- e ? e->value : NULL, NULL, 0);
+ e ? e->value : NULL, child_device_opts, 0);
+
+ av_dict_free(&child_device_opts);
if (ret < 0)
return ret;