2 * This file is part of FFmpeg.
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "hwcontext.h"
22 #include "hwcontext_internal.h"
23 #include "hwcontext_cuda_internal.h"
25 #include "hwcontext_vulkan.h"
27 #include "cuda_check.h"
33 #define CUDA_FRAME_ALIGNMENT 256
35 typedef struct CUDAFramesContext {
36 int shift_width, shift_height;
39 static const enum AVPixelFormat supported_formats[] = {
54 #define CHECK_CU(x) FF_CUDA_CHECK_DL(device_ctx, cu, x)
56 static int cuda_frames_get_constraints(AVHWDeviceContext *ctx,
58 AVHWFramesConstraints *constraints)
62 constraints->valid_sw_formats = av_malloc_array(FF_ARRAY_ELEMS(supported_formats) + 1,
63 sizeof(*constraints->valid_sw_formats));
64 if (!constraints->valid_sw_formats)
65 return AVERROR(ENOMEM);
67 for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++)
68 constraints->valid_sw_formats[i] = supported_formats[i];
69 constraints->valid_sw_formats[FF_ARRAY_ELEMS(supported_formats)] = AV_PIX_FMT_NONE;
71 constraints->valid_hw_formats = av_malloc_array(2, sizeof(*constraints->valid_hw_formats));
72 if (!constraints->valid_hw_formats)
73 return AVERROR(ENOMEM);
75 constraints->valid_hw_formats[0] = AV_PIX_FMT_CUDA;
76 constraints->valid_hw_formats[1] = AV_PIX_FMT_NONE;
81 static void cuda_buffer_free(void *opaque, uint8_t *data)
83 AVHWFramesContext *ctx = opaque;
84 AVHWDeviceContext *device_ctx = ctx->device_ctx;
85 AVCUDADeviceContext *hwctx = device_ctx->hwctx;
86 CudaFunctions *cu = hwctx->internal->cuda_dl;
90 CHECK_CU(cu->cuCtxPushCurrent(hwctx->cuda_ctx));
92 CHECK_CU(cu->cuMemFree((CUdeviceptr)data));
94 CHECK_CU(cu->cuCtxPopCurrent(&dummy));
97 static AVBufferRef *cuda_pool_alloc(void *opaque, int size)
99 AVHWFramesContext *ctx = opaque;
100 AVHWDeviceContext *device_ctx = ctx->device_ctx;
101 AVCUDADeviceContext *hwctx = device_ctx->hwctx;
102 CudaFunctions *cu = hwctx->internal->cuda_dl;
104 AVBufferRef *ret = NULL;
105 CUcontext dummy = NULL;
109 err = CHECK_CU(cu->cuCtxPushCurrent(hwctx->cuda_ctx));
113 err = CHECK_CU(cu->cuMemAlloc(&data, size));
117 ret = av_buffer_create((uint8_t*)data, size, cuda_buffer_free, ctx, 0);
119 CHECK_CU(cu->cuMemFree(data));
124 CHECK_CU(cu->cuCtxPopCurrent(&dummy));
128 static int cuda_frames_init(AVHWFramesContext *ctx)
130 CUDAFramesContext *priv = ctx->internal->priv;
133 for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++) {
134 if (ctx->sw_format == supported_formats[i])
137 if (i == FF_ARRAY_ELEMS(supported_formats)) {
138 av_log(ctx, AV_LOG_ERROR, "Pixel format '%s' is not supported\n",
139 av_get_pix_fmt_name(ctx->sw_format));
140 return AVERROR(ENOSYS);
143 av_pix_fmt_get_chroma_sub_sample(ctx->sw_format, &priv->shift_width, &priv->shift_height);
146 int size = av_image_get_buffer_size(ctx->sw_format, ctx->width, ctx->height, CUDA_FRAME_ALIGNMENT);
150 ctx->internal->pool_internal = av_buffer_pool_init2(size, ctx, cuda_pool_alloc, NULL);
151 if (!ctx->internal->pool_internal)
152 return AVERROR(ENOMEM);
158 static int cuda_get_buffer(AVHWFramesContext *ctx, AVFrame *frame)
162 frame->buf[0] = av_buffer_pool_get(ctx->pool);
164 return AVERROR(ENOMEM);
166 res = av_image_fill_arrays(frame->data, frame->linesize, frame->buf[0]->data,
167 ctx->sw_format, ctx->width, ctx->height, CUDA_FRAME_ALIGNMENT);
171 // YUV420P is a special case.
172 // Nvenc expects the U/V planes in swapped order from how ffmpeg expects them, also chroma is half-aligned
173 if (ctx->sw_format == AV_PIX_FMT_YUV420P) {
174 frame->linesize[1] = frame->linesize[2] = frame->linesize[0] / 2;
175 frame->data[2] = frame->data[1];
176 frame->data[1] = frame->data[2] + frame->linesize[2] * ctx->height / 2;
179 frame->format = AV_PIX_FMT_CUDA;
180 frame->width = ctx->width;
181 frame->height = ctx->height;
186 static int cuda_transfer_get_formats(AVHWFramesContext *ctx,
187 enum AVHWFrameTransferDirection dir,
188 enum AVPixelFormat **formats)
190 enum AVPixelFormat *fmts;
192 fmts = av_malloc_array(2, sizeof(*fmts));
194 return AVERROR(ENOMEM);
196 fmts[0] = ctx->sw_format;
197 fmts[1] = AV_PIX_FMT_NONE;
204 static int cuda_transfer_data(AVHWFramesContext *ctx, AVFrame *dst,
207 CUDAFramesContext *priv = ctx->internal->priv;
208 AVHWDeviceContext *device_ctx = ctx->device_ctx;
209 AVCUDADeviceContext *hwctx = device_ctx->hwctx;
210 CudaFunctions *cu = hwctx->internal->cuda_dl;
215 if ((src->hw_frames_ctx && ((AVHWFramesContext*)src->hw_frames_ctx->data)->format != AV_PIX_FMT_CUDA) ||
216 (dst->hw_frames_ctx && ((AVHWFramesContext*)dst->hw_frames_ctx->data)->format != AV_PIX_FMT_CUDA))
217 return AVERROR(ENOSYS);
219 ret = CHECK_CU(cu->cuCtxPushCurrent(hwctx->cuda_ctx));
223 for (i = 0; i < FF_ARRAY_ELEMS(src->data) && src->data[i]; i++) {
224 CUDA_MEMCPY2D cpy = {
225 .srcPitch = src->linesize[i],
226 .dstPitch = dst->linesize[i],
227 .WidthInBytes = FFMIN(src->linesize[i], dst->linesize[i]),
228 .Height = src->height >> ((i == 0 || i == 3) ? 0 : priv->shift_height),
231 if (src->hw_frames_ctx) {
232 cpy.srcMemoryType = CU_MEMORYTYPE_DEVICE;
233 cpy.srcDevice = (CUdeviceptr)src->data[i];
235 cpy.srcMemoryType = CU_MEMORYTYPE_HOST;
236 cpy.srcHost = src->data[i];
239 if (dst->hw_frames_ctx) {
240 cpy.dstMemoryType = CU_MEMORYTYPE_DEVICE;
241 cpy.dstDevice = (CUdeviceptr)dst->data[i];
243 cpy.dstMemoryType = CU_MEMORYTYPE_HOST;
244 cpy.dstHost = dst->data[i];
247 ret = CHECK_CU(cu->cuMemcpy2DAsync(&cpy, hwctx->stream));
252 if (!dst->hw_frames_ctx) {
253 ret = CHECK_CU(cu->cuStreamSynchronize(hwctx->stream));
259 CHECK_CU(cu->cuCtxPopCurrent(&dummy));
264 static void cuda_device_uninit(AVHWDeviceContext *device_ctx)
266 AVCUDADeviceContext *hwctx = device_ctx->hwctx;
268 if (hwctx->internal) {
269 CudaFunctions *cu = hwctx->internal->cuda_dl;
271 if (hwctx->internal->is_allocated && hwctx->cuda_ctx) {
272 if (hwctx->internal->flags & AV_CUDA_USE_PRIMARY_CONTEXT)
273 CHECK_CU(cu->cuDevicePrimaryCtxRelease(hwctx->internal->cuda_device));
275 CHECK_CU(cu->cuCtxDestroy(hwctx->cuda_ctx));
277 hwctx->cuda_ctx = NULL;
280 cuda_free_functions(&hwctx->internal->cuda_dl);
283 av_freep(&hwctx->internal);
286 static int cuda_device_init(AVHWDeviceContext *ctx)
288 AVCUDADeviceContext *hwctx = ctx->hwctx;
291 if (!hwctx->internal) {
292 hwctx->internal = av_mallocz(sizeof(*hwctx->internal));
293 if (!hwctx->internal)
294 return AVERROR(ENOMEM);
297 if (!hwctx->internal->cuda_dl) {
298 ret = cuda_load_functions(&hwctx->internal->cuda_dl, ctx);
300 av_log(ctx, AV_LOG_ERROR, "Could not dynamically load CUDA\n");
308 cuda_device_uninit(ctx);
312 static int cuda_context_init(AVHWDeviceContext *device_ctx, int flags) {
313 AVCUDADeviceContext *hwctx = device_ctx->hwctx;
316 int ret, dev_active = 0;
317 unsigned int dev_flags = 0;
319 const unsigned int desired_flags = CU_CTX_SCHED_BLOCKING_SYNC;
321 cu = hwctx->internal->cuda_dl;
323 hwctx->internal->flags = flags;
325 if (flags & AV_CUDA_USE_PRIMARY_CONTEXT) {
326 ret = CHECK_CU(cu->cuDevicePrimaryCtxGetState(hwctx->internal->cuda_device,
327 &dev_flags, &dev_active));
331 if (dev_active && dev_flags != desired_flags) {
332 av_log(device_ctx, AV_LOG_ERROR, "Primary context already active with incompatible flags.\n");
333 return AVERROR(ENOTSUP);
334 } else if (dev_flags != desired_flags) {
335 ret = CHECK_CU(cu->cuDevicePrimaryCtxSetFlags(hwctx->internal->cuda_device,
341 ret = CHECK_CU(cu->cuDevicePrimaryCtxRetain(&hwctx->cuda_ctx,
342 hwctx->internal->cuda_device));
346 ret = CHECK_CU(cu->cuCtxCreate(&hwctx->cuda_ctx, desired_flags,
347 hwctx->internal->cuda_device));
351 CHECK_CU(cu->cuCtxPopCurrent(&dummy));
354 hwctx->internal->is_allocated = 1;
356 // Setting stream to NULL will make functions automatically use the default CUstream
357 hwctx->stream = NULL;
362 static int cuda_device_create(AVHWDeviceContext *device_ctx,
364 AVDictionary *opts, int flags)
366 AVCUDADeviceContext *hwctx = device_ctx->hwctx;
368 int ret, device_idx = 0;
371 device_idx = strtol(device, NULL, 0);
373 if (cuda_device_init(device_ctx) < 0)
376 cu = hwctx->internal->cuda_dl;
378 ret = CHECK_CU(cu->cuInit(0));
382 ret = CHECK_CU(cu->cuDeviceGet(&hwctx->internal->cuda_device, device_idx));
386 ret = cuda_context_init(device_ctx, flags);
393 cuda_device_uninit(device_ctx);
394 return AVERROR_UNKNOWN;
397 static int cuda_device_derive(AVHWDeviceContext *device_ctx,
398 AVHWDeviceContext *src_ctx, AVDictionary *opts,
400 AVCUDADeviceContext *hwctx = device_ctx->hwctx;
402 const char *src_uuid = NULL;
403 int ret, i, device_count;
406 VkPhysicalDeviceIDProperties vk_idp = {
407 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES,
411 switch (src_ctx->type) {
413 case AV_HWDEVICE_TYPE_VULKAN: {
414 AVVulkanDeviceContext *vkctx = src_ctx->hwctx;
415 VkPhysicalDeviceProperties2 vk_dev_props = {
416 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
419 vkGetPhysicalDeviceProperties2(vkctx->phys_dev, &vk_dev_props);
420 src_uuid = vk_idp.deviceUUID;
425 return AVERROR(ENOSYS);
429 av_log(device_ctx, AV_LOG_ERROR,
430 "Failed to get UUID of source device.\n");
434 if (cuda_device_init(device_ctx) < 0)
437 cu = hwctx->internal->cuda_dl;
439 ret = CHECK_CU(cu->cuInit(0));
443 ret = CHECK_CU(cu->cuDeviceGetCount(&device_count));
447 hwctx->internal->cuda_device = -1;
448 for (i = 0; i < device_count; i++) {
452 ret = CHECK_CU(cu->cuDeviceGet(&dev, i));
456 ret = CHECK_CU(cu->cuDeviceGetUuid(&uuid, dev));
460 if (memcmp(src_uuid, uuid.bytes, sizeof (uuid.bytes)) == 0) {
461 hwctx->internal->cuda_device = dev;
466 if (hwctx->internal->cuda_device == -1) {
467 av_log(device_ctx, AV_LOG_ERROR, "Could not derive CUDA device.\n");
471 ret = cuda_context_init(device_ctx, flags);
478 cuda_device_uninit(device_ctx);
479 return AVERROR_UNKNOWN;
482 const HWContextType ff_hwcontext_type_cuda = {
483 .type = AV_HWDEVICE_TYPE_CUDA,
486 .device_hwctx_size = sizeof(AVCUDADeviceContext),
487 .frames_priv_size = sizeof(CUDAFramesContext),
489 .device_create = cuda_device_create,
490 .device_derive = cuda_device_derive,
491 .device_init = cuda_device_init,
492 .device_uninit = cuda_device_uninit,
493 .frames_get_constraints = cuda_frames_get_constraints,
494 .frames_init = cuda_frames_init,
495 .frames_get_buffer = cuda_get_buffer,
496 .transfer_get_formats = cuda_transfer_get_formats,
497 .transfer_data_to = cuda_transfer_data,
498 .transfer_data_from = cuda_transfer_data,
500 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_CUDA, AV_PIX_FMT_NONE },