#if defined(_WIN32)
#include <windows.h>
+
+#define CUDA_LIBNAME TEXT("nvcuda.dll")
+#if ARCH_X86_64
+#define NVENC_LIBNAME TEXT("nvEncodeAPI64.dll")
+#else
+#define NVENC_LIBNAME TEXT("nvEncodeAPI.dll")
+#endif
+
+#define dlopen(filename, flags) LoadLibrary((filename))
+#define dlsym(handle, symbol) GetProcAddress(handle, symbol)
+#define dlclose(handle) FreeLibrary(handle)
#else
#include <dlfcn.h>
+
+#define CUDA_LIBNAME "libcuda.so"
+#define NVENC_LIBNAME "libnvidia-encode.so"
#endif
+#include "libavutil/hwcontext.h"
#include "libavutil/imgutils.h"
#include "libavutil/avassert.h"
#include "libavutil/mem.h"
-#include "libavutil/hwcontext.h"
#include "internal.h"
-#include "thread.h"
-
#include "nvenc.h"
-#if CONFIG_CUDA
-#include "libavutil/hwcontext_cuda.h"
-#endif
-
+#define NVENC_CAP 0x30
#define IS_CBR(rc) (rc == NV_ENC_PARAMS_RC_CBR || \
rc == NV_ENC_PARAMS_RC_2_PASS_QUALITY || \
rc == NV_ENC_PARAMS_RC_2_PASS_FRAMESIZE_CAP)
-#if defined(_WIN32)
-#define LOAD_FUNC(l, s) GetProcAddress(l, s)
-#define DL_CLOSE_FUNC(l) FreeLibrary(l)
-#else
-#define LOAD_FUNC(l, s) dlsym(l, s)
-#define DL_CLOSE_FUNC(l) dlclose(l)
-#endif
+#define LOAD_LIBRARY(l, path) \
+ do { \
+ if (!((l) = dlopen(path, RTLD_LAZY))) { \
+ av_log(avctx, AV_LOG_ERROR, \
+ "Cannot load %s\n", \
+ path); \
+ return AVERROR_UNKNOWN; \
+ } \
+ } while (0)
+
+#define LOAD_SYMBOL(fun, lib, symbol) \
+ do { \
+ if (!((fun) = dlsym(lib, symbol))) { \
+ av_log(avctx, AV_LOG_ERROR, \
+ "Cannot load %s\n", \
+ symbol); \
+ return AVERROR_UNKNOWN; \
+ } \
+ } while (0)
const enum AVPixelFormat ff_nvenc_pix_fmts[] = {
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE
};
-typedef struct NvencData
-{
- union {
- int64_t timestamp;
- NvencSurface *surface;
- } u;
-} NvencData;
-
static const struct {
NVENCSTATUS nverr;
int averr;
return ret;
}
-static void timestamp_queue_enqueue(AVFifoBuffer* queue, int64_t timestamp)
-{
- av_fifo_generic_write(queue, ×tamp, sizeof(timestamp), NULL);
-}
-
-static int64_t timestamp_queue_dequeue(AVFifoBuffer* queue)
-{
- int64_t timestamp = AV_NOPTS_VALUE;
- if (av_fifo_size(queue) > 0)
- av_fifo_generic_read(queue, ×tamp, sizeof(timestamp), NULL);
-
- return timestamp;
-}
-
-#define CHECK_LOAD_FUNC(t, f, s) \
-do { \
- (f) = (t)LOAD_FUNC(dl_fn->cuda_lib, s); \
- if (!(f)) { \
- av_log(avctx, AV_LOG_FATAL, "Failed loading %s from CUDA library\n", s); \
- goto error; \
- } \
-} while (0)
-
-static av_cold int nvenc_dyload_cuda(AVCodecContext *avctx)
+static av_cold int nvenc_load_libraries(AVCodecContext *avctx)
{
NvencContext *ctx = avctx->priv_data;
NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
+ PNVENCODEAPICREATEINSTANCE nvenc_create_instance;
+ NVENCSTATUS err;
#if CONFIG_CUDA
dl_fn->cu_init = cuInit;
dl_fn->cu_ctx_create = cuCtxCreate_v2;
dl_fn->cu_ctx_pop_current = cuCtxPopCurrent_v2;
dl_fn->cu_ctx_destroy = cuCtxDestroy_v2;
-
- return 1;
#else
- if (dl_fn->cuda_lib)
- return 1;
-
-#if defined(_WIN32)
- dl_fn->cuda_lib = LoadLibrary(TEXT("nvcuda.dll"));
-#else
- dl_fn->cuda_lib = dlopen("libcuda.so", RTLD_LAZY);
+ LOAD_LIBRARY(dl_fn->cuda, CUDA_LIBNAME);
+
+ LOAD_SYMBOL(dl_fn->cu_init, dl_fn->cuda, "cuInit");
+ LOAD_SYMBOL(dl_fn->cu_device_get_count, dl_fn->cuda, "cuDeviceGetCount");
+ LOAD_SYMBOL(dl_fn->cu_device_get, dl_fn->cuda, "cuDeviceGet");
+ LOAD_SYMBOL(dl_fn->cu_device_get_name, dl_fn->cuda, "cuDeviceGetName");
+ LOAD_SYMBOL(dl_fn->cu_device_compute_capability, dl_fn->cuda,
+ "cuDeviceComputeCapability");
+ LOAD_SYMBOL(dl_fn->cu_ctx_create, dl_fn->cuda, "cuCtxCreate_v2");
+ LOAD_SYMBOL(dl_fn->cu_ctx_pop_current, dl_fn->cuda, "cuCtxPopCurrent_v2");
+ LOAD_SYMBOL(dl_fn->cu_ctx_destroy, dl_fn->cuda, "cuCtxDestroy_v2");
#endif
- if (!dl_fn->cuda_lib) {
- av_log(avctx, AV_LOG_FATAL, "Failed loading CUDA library\n");
- goto error;
- }
-
- CHECK_LOAD_FUNC(PCUINIT, dl_fn->cu_init, "cuInit");
- CHECK_LOAD_FUNC(PCUDEVICEGETCOUNT, dl_fn->cu_device_get_count, "cuDeviceGetCount");
- CHECK_LOAD_FUNC(PCUDEVICEGET, dl_fn->cu_device_get, "cuDeviceGet");
- CHECK_LOAD_FUNC(PCUDEVICEGETNAME, dl_fn->cu_device_get_name, "cuDeviceGetName");
- CHECK_LOAD_FUNC(PCUDEVICECOMPUTECAPABILITY, dl_fn->cu_device_compute_capability, "cuDeviceComputeCapability");
- CHECK_LOAD_FUNC(PCUCTXCREATE, dl_fn->cu_ctx_create, "cuCtxCreate_v2");
- CHECK_LOAD_FUNC(PCUCTXPOPCURRENT, dl_fn->cu_ctx_pop_current, "cuCtxPopCurrent_v2");
- CHECK_LOAD_FUNC(PCUCTXDESTROY, dl_fn->cu_ctx_destroy, "cuCtxDestroy_v2");
+ LOAD_LIBRARY(dl_fn->nvenc, NVENC_LIBNAME);
- return 1;
+ LOAD_SYMBOL(nvenc_create_instance, dl_fn->nvenc,
+ "NvEncodeAPICreateInstance");
-error:
+ dl_fn->nvenc_funcs.version = NV_ENCODE_API_FUNCTION_LIST_VER;
- if (dl_fn->cuda_lib)
- DL_CLOSE_FUNC(dl_fn->cuda_lib);
+ err = nvenc_create_instance(&dl_fn->nvenc_funcs);
+ if (err != NV_ENC_SUCCESS)
+ return nvenc_print_error(avctx, err, "Failed to create nvenc instance");
- dl_fn->cuda_lib = NULL;
+ av_log(avctx, AV_LOG_VERBOSE, "Nvenc initialized successfully\n");
return 0;
-#endif
}
-static av_cold int check_cuda_errors(AVCodecContext *avctx, CUresult err, const char *func)
+static av_cold int nvenc_open_session(AVCodecContext *avctx)
{
- if (err != CUDA_SUCCESS) {
- av_log(avctx, AV_LOG_FATAL, ">> %s - failed with error code 0x%x\n", func, err);
- return 0;
+ NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS params = { 0 };
+ NvencContext *ctx = avctx->priv_data;
+ NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &ctx->nvenc_dload_funcs.nvenc_funcs;
+ NVENCSTATUS ret;
+
+ params.version = NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER;
+ params.apiVersion = NVENCAPI_VERSION;
+ params.device = ctx->cu_context;
+ params.deviceType = NV_ENC_DEVICE_TYPE_CUDA;
+
+ ret = p_nvenc->nvEncOpenEncodeSessionEx(¶ms, &ctx->nvencoder);
+ if (ret != NV_ENC_SUCCESS) {
+ ctx->nvencoder = NULL;
+ return nvenc_print_error(avctx, ret, "OpenEncodeSessionEx failed");
}
- return 1;
+
+ return 0;
}
-#define check_cuda_errors(f) if (!check_cuda_errors(avctx, f, #f)) goto error
-static av_cold int nvenc_check_cuda(AVCodecContext *avctx)
+static int nvenc_check_codec_support(AVCodecContext *avctx)
{
- int device_count = 0;
- CUdevice cu_device = 0;
- char gpu_name[128];
- int smminor = 0, smmajor = 0;
- int i, smver, target_smver;
-
NvencContext *ctx = avctx->priv_data;
- NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
+ NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &ctx->nvenc_dload_funcs.nvenc_funcs;
+ int i, ret, count = 0;
+ GUID *guids = NULL;
- switch (avctx->codec->id) {
- case AV_CODEC_ID_H264:
- target_smver = ctx->data_pix_fmt == AV_PIX_FMT_YUV444P ? 0x52 : 0x30;
- break;
- case AV_CODEC_ID_H265:
- target_smver = 0x52;
- break;
- default:
- av_log(avctx, AV_LOG_FATAL, "Unknown codec name\n");
- goto error;
+ ret = p_nvenc->nvEncGetEncodeGUIDCount(ctx->nvencoder, &count);
+
+ if (ret != NV_ENC_SUCCESS || !count)
+ return AVERROR(ENOSYS);
+
+ guids = av_malloc(count * sizeof(GUID));
+ if (!guids)
+ return AVERROR(ENOMEM);
+
+ ret = p_nvenc->nvEncGetEncodeGUIDs(ctx->nvencoder, guids, count, &count);
+ if (ret != NV_ENC_SUCCESS) {
+ ret = AVERROR(ENOSYS);
+ goto fail;
}
- if (ctx->preset >= PRESET_LOSSLESS_DEFAULT)
- target_smver = 0x52;
+ ret = AVERROR(ENOSYS);
+ for (i = 0; i < count; i++) {
+ if (!memcmp(&guids[i], &ctx->init_encode_params.encodeGUID, sizeof(*guids))) {
+ ret = 0;
+ break;
+ }
+ }
- if (!nvenc_dyload_cuda(avctx))
- return 0;
+fail:
+ av_free(guids);
- if (dl_fn->nvenc_device_count > 0)
- return 1;
+ return ret;
+}
- check_cuda_errors(dl_fn->cu_init(0));
+static int nvenc_check_cap(AVCodecContext *avctx, NV_ENC_CAPS cap)
+{
+ NvencContext *ctx = avctx->priv_data;
+ NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &ctx->nvenc_dload_funcs.nvenc_funcs;
+ NV_ENC_CAPS_PARAM params = { 0 };
+ int ret, val = 0;
- check_cuda_errors(dl_fn->cu_device_get_count(&device_count));
+ params.version = NV_ENC_CAPS_PARAM_VER;
+ params.capsToQuery = cap;
- if (!device_count) {
- av_log(avctx, AV_LOG_FATAL, "No CUDA capable devices found\n");
- goto error;
- }
+ ret = p_nvenc->nvEncGetEncodeCaps(ctx->nvencoder, ctx->init_encode_params.encodeGUID, ¶ms, &val);
- av_log(avctx, AV_LOG_VERBOSE, "%d CUDA capable devices found\n", device_count);
+ if (ret == NV_ENC_SUCCESS)
+ return val;
+ return 0;
+}
- dl_fn->nvenc_device_count = 0;
+static int nvenc_check_capabilities(AVCodecContext *avctx)
+{
+ NvencContext *ctx = avctx->priv_data;
+ int ret;
- for (i = 0; i < device_count; ++i) {
- check_cuda_errors(dl_fn->cu_device_get(&cu_device, i));
- check_cuda_errors(dl_fn->cu_device_get_name(gpu_name, sizeof(gpu_name), cu_device));
- check_cuda_errors(dl_fn->cu_device_compute_capability(&smmajor, &smminor, cu_device));
+ ret = nvenc_check_codec_support(avctx);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_VERBOSE, "Codec not supported\n");
+ return ret;
+ }
- smver = (smmajor << 4) | smminor;
+ ret = nvenc_check_cap(avctx, NV_ENC_CAPS_SUPPORT_YUV444_ENCODE);
+ if (ctx->data_pix_fmt == AV_PIX_FMT_YUV444P && ret <= 0) {
+ av_log(avctx, AV_LOG_VERBOSE, "YUV444P not supported\n");
+ return AVERROR(ENOSYS);
+ }
- av_log(avctx, AV_LOG_VERBOSE, "[ GPU #%d - < %s > has Compute SM %d.%d, NVENC %s ]\n", i, gpu_name, smmajor, smminor, (smver >= target_smver) ? "Available" : "Not Available");
+ ret = nvenc_check_cap(avctx, NV_ENC_CAPS_SUPPORT_LOSSLESS_ENCODE);
+ if (ctx->preset >= PRESET_LOSSLESS_DEFAULT && ret <= 0) {
+ av_log(avctx, AV_LOG_VERBOSE, "Lossless encoding not supported\n");
+ return AVERROR(ENOSYS);
+ }
- if (smver >= target_smver)
- dl_fn->nvenc_devices[dl_fn->nvenc_device_count++] = cu_device;
+ ret = nvenc_check_cap(avctx, NV_ENC_CAPS_WIDTH_MAX);
+ if (ret < avctx->width) {
+ av_log(avctx, AV_LOG_VERBOSE, "Width %d exceeds %d\n",
+ avctx->width, ret);
+ return AVERROR(ENOSYS);
}
- if (!dl_fn->nvenc_device_count) {
- av_log(avctx, AV_LOG_FATAL, "No NVENC capable devices found\n");
- goto error;
+ ret = nvenc_check_cap(avctx, NV_ENC_CAPS_HEIGHT_MAX);
+ if (ret < avctx->height) {
+ av_log(avctx, AV_LOG_VERBOSE, "Height %d exceeds %d\n",
+ avctx->height, ret);
+ return AVERROR(ENOSYS);
}
- return 1;
+ ret = nvenc_check_cap(avctx, NV_ENC_CAPS_NUM_MAX_BFRAMES);
+ if (ret < avctx->max_b_frames) {
+ av_log(avctx, AV_LOG_VERBOSE, "Max b-frames %d exceed %d\n",
+ avctx->max_b_frames, ret);
-error:
+ return AVERROR(ENOSYS);
+ }
- dl_fn->nvenc_device_count = 0;
+ ret = nvenc_check_cap(avctx, NV_ENC_CAPS_SUPPORT_FIELD_ENCODING);
+ if (ret < 1 && avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
+ av_log(avctx, AV_LOG_VERBOSE,
+ "Interlaced encoding is not supported. Supported level: %d\n",
+ ret);
+ return AVERROR(ENOSYS);
+ }
return 0;
}
-static av_cold int nvenc_dyload_nvenc(AVCodecContext *avctx)
+static av_cold int nvenc_check_device(AVCodecContext *avctx, int idx)
{
- PNVENCODEAPICREATEINSTANCE nvEncodeAPICreateInstance = 0;
- NVENCSTATUS nvstatus;
-
NvencContext *ctx = avctx->priv_data;
NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
+ NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs;
+ char name[128] = { 0};
+ int major, minor, ret;
+ CUresult cu_res;
+ CUdevice cu_device;
+ CUcontext dummy;
+ int loglevel = AV_LOG_VERBOSE;
- if (!nvenc_check_cuda(avctx))
- return 0;
+ if (ctx->device == LIST_DEVICES)
+ loglevel = AV_LOG_INFO;
- if (dl_fn->nvenc_lib)
- return 1;
+ cu_res = dl_fn->cu_device_get(&cu_device, idx);
+ if (cu_res != CUDA_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Cannot access the CUDA device %d\n",
+ idx);
+ return -1;
+ }
-#if defined(_WIN32)
- if (sizeof(void*) == 8) {
- dl_fn->nvenc_lib = LoadLibrary(TEXT("nvEncodeAPI64.dll"));
- } else {
- dl_fn->nvenc_lib = LoadLibrary(TEXT("nvEncodeAPI.dll"));
+ cu_res = dl_fn->cu_device_get_name(name, sizeof(name), cu_device);
+ if (cu_res != CUDA_SUCCESS)
+ return -1;
+
+ cu_res = dl_fn->cu_device_compute_capability(&major, &minor, cu_device);
+ if (cu_res != CUDA_SUCCESS)
+ return -1;
+
+ av_log(avctx, loglevel, "[ GPU #%d - < %s > has Compute SM %d.%d ]\n", idx, name, major, minor);
+ if (((major << 4) | minor) < NVENC_CAP) {
+ av_log(avctx, loglevel, "does not support NVENC\n");
+ goto fail;
}
-#else
- dl_fn->nvenc_lib = dlopen("libnvidia-encode.so.1", RTLD_LAZY);
-#endif
- if (!dl_fn->nvenc_lib) {
- av_log(avctx, AV_LOG_FATAL, "Failed loading the nvenc library\n");
- goto error;
+ cu_res = dl_fn->cu_ctx_create(&ctx->cu_context_internal, 0, cu_device);
+ if (cu_res != CUDA_SUCCESS) {
+ av_log(avctx, AV_LOG_FATAL, "Failed creating CUDA context for NVENC: 0x%x\n", (int)cu_res);
+ goto fail;
}
- nvEncodeAPICreateInstance = (PNVENCODEAPICREATEINSTANCE)LOAD_FUNC(dl_fn->nvenc_lib, "NvEncodeAPICreateInstance");
+ ctx->cu_context = ctx->cu_context_internal;
- if (!nvEncodeAPICreateInstance) {
- av_log(avctx, AV_LOG_FATAL, "Failed to load nvenc entrypoint\n");
- goto error;
+ cu_res = dl_fn->cu_ctx_pop_current(&dummy);
+ if (cu_res != CUDA_SUCCESS) {
+ av_log(avctx, AV_LOG_FATAL, "Failed popping CUDA context: 0x%x\n", (int)cu_res);
+ goto fail2;
}
- dl_fn->nvenc_funcs.version = NV_ENCODE_API_FUNCTION_LIST_VER;
+ if ((ret = nvenc_open_session(avctx)) < 0)
+ goto fail2;
- nvstatus = nvEncodeAPICreateInstance(&dl_fn->nvenc_funcs);
+ if ((ret = nvenc_check_capabilities(avctx)) < 0)
+ goto fail3;
- if (nvstatus != NV_ENC_SUCCESS) {
- nvenc_print_error(avctx, nvstatus, "Failed to create nvenc instance");
- goto error;
- }
+ av_log(avctx, loglevel, "supports NVENC\n");
- av_log(avctx, AV_LOG_VERBOSE, "Nvenc initialized successfully\n");
+ dl_fn->nvenc_device_count++;
- return 1;
+ if (ctx->device == dl_fn->nvenc_device_count - 1 || ctx->device == ANY_DEVICE)
+ return 0;
-error:
- if (dl_fn->nvenc_lib)
- DL_CLOSE_FUNC(dl_fn->nvenc_lib);
+fail3:
+ p_nvenc->nvEncDestroyEncoder(ctx->nvencoder);
+ ctx->nvencoder = NULL;
- dl_fn->nvenc_lib = NULL;
+fail2:
+ dl_fn->cu_ctx_destroy(ctx->cu_context_internal);
+ ctx->cu_context_internal = NULL;
- return 0;
+fail:
+ return AVERROR(ENOSYS);
}
static av_cold int nvenc_setup_device(AVCodecContext *avctx)
NvencContext *ctx = avctx->priv_data;
NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
- CUresult cu_res;
- CUcontext cu_context_curr;
-
switch (avctx->codec->id) {
case AV_CODEC_ID_H264:
ctx->init_encode_params.encodeGUID = NV_ENC_CODEC_H264_GUID;
return AVERROR_BUG;
}
- ctx->data_pix_fmt = avctx->pix_fmt;
-
-#if CONFIG_CUDA
if (avctx->pix_fmt == AV_PIX_FMT_CUDA) {
- AVHWFramesContext *frames_ctx;
+#if CONFIG_CUDA
+ AVHWFramesContext *frames_ctx;
AVCUDADeviceContext *device_hwctx;
+ int ret;
- if (!avctx->hw_frames_ctx) {
- av_log(avctx, AV_LOG_ERROR, "hw_frames_ctx must be set when using GPU frames as input\n");
+ if (!avctx->hw_frames_ctx)
return AVERROR(EINVAL);
- }
- frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
device_hwctx = frames_ctx->device_ctx->hwctx;
- ctx->cu_context = device_hwctx->cuda_ctx;
- ctx->data_pix_fmt = frames_ctx->sw_format;
- return 0;
- }
-#endif
- if (ctx->gpu >= dl_fn->nvenc_device_count) {
- av_log(avctx, AV_LOG_FATAL, "Requested GPU %d, but only %d GPUs are available!\n", ctx->gpu, dl_fn->nvenc_device_count);
- return AVERROR(EINVAL);
- }
+ ctx->cu_context = device_hwctx->cuda_ctx;
- ctx->cu_context = NULL;
- cu_res = dl_fn->cu_ctx_create(&ctx->cu_context_internal, 4, dl_fn->nvenc_devices[ctx->gpu]); // CU_CTX_SCHED_BLOCKING_SYNC=4, avoid CPU spins
+ ret = nvenc_open_session(avctx);
+ if (ret < 0)
+ return ret;
- if (cu_res != CUDA_SUCCESS) {
- av_log(avctx, AV_LOG_FATAL, "Failed creating CUDA context for NVENC: 0x%x\n", (int)cu_res);
- return AVERROR_EXTERNAL;
- }
+ ret = nvenc_check_capabilities(avctx);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_FATAL, "Provided device doesn't support required NVENC features\n");
+ return ret;
+ }
+#else
+ return AVERROR_BUG;
+#endif
+ } else {
+ int i, nb_devices = 0;
- cu_res = dl_fn->cu_ctx_pop_current(&cu_context_curr);
+ if ((dl_fn->cu_init(0)) != CUDA_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Cannot init CUDA\n");
+ return AVERROR_UNKNOWN;
+ }
- if (cu_res != CUDA_SUCCESS) {
- av_log(avctx, AV_LOG_FATAL, "Failed popping CUDA context: 0x%x\n", (int)cu_res);
- return AVERROR_EXTERNAL;
- }
+ if ((dl_fn->cu_device_get_count(&nb_devices)) != CUDA_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Cannot enumerate the CUDA devices\n");
+ return AVERROR_UNKNOWN;
+ }
- ctx->cu_context = ctx->cu_context_internal;
+ if (!nb_devices) {
+ av_log(avctx, AV_LOG_FATAL, "No CUDA capable devices found\n");
+ return AVERROR_EXTERNAL;
+ }
- return 0;
-}
+ av_log(avctx, AV_LOG_VERBOSE, "%d CUDA capable devices found\n", nb_devices);
-static av_cold int nvenc_open_session(AVCodecContext *avctx)
-{
- NvencContext *ctx = avctx->priv_data;
- NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
- NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs;
+ dl_fn->nvenc_device_count = 0;
+ for (i = 0; i < nb_devices; ++i) {
+ if ((nvenc_check_device(avctx, i)) >= 0 && ctx->device != LIST_DEVICES)
+ return 0;
+ }
- NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS encode_session_params = { 0 };
- NVENCSTATUS nv_status;
+ if (ctx->device == LIST_DEVICES)
+ return AVERROR_EXIT;
- encode_session_params.version = NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER;
- encode_session_params.apiVersion = NVENCAPI_VERSION;
- encode_session_params.device = ctx->cu_context;
- encode_session_params.deviceType = NV_ENC_DEVICE_TYPE_CUDA;
+ if (!dl_fn->nvenc_device_count) {
+ av_log(avctx, AV_LOG_FATAL, "No NVENC capable devices found\n");
+ return AVERROR_EXTERNAL;
+ }
- nv_status = p_nvenc->nvEncOpenEncodeSessionEx(&encode_session_params, &ctx->nvencoder);
- if (nv_status != NV_ENC_SUCCESS) {
- ctx->nvencoder = NULL;
- return nvenc_print_error(avctx, nv_status, "OpenEncodeSessionEx failed");
+ av_log(avctx, AV_LOG_FATAL, "Requested GPU %d, but only %d GPUs are available!\n", ctx->device, dl_fn->nvenc_device_count);
+ return AVERROR(EINVAL);
}
return 0;
rc->maxQP.qpIntra = avctx->qmax;
qp_inter_p = (avctx->qmax + 3 * avctx->qmin) / 4; // biased towards Qmin
+ } else if (avctx->qmin >= 0) {
+ rc->enableMinQP = 1;
+
+ rc->minQP.qpInterB = avctx->qmin;
+ rc->minQP.qpInterP = avctx->qmin;
+ rc->minQP.qpIntra = avctx->qmin;
+
+ qp_inter_p = avctx->qmin;
} else {
qp_inter_p = 26; // default to 26
}
set_vbr(avctx);
break;
case NV_ENC_PARAMS_RC_CBR:
- break;
case NV_ENC_PARAMS_RC_2_PASS_QUALITY:
case NV_ENC_PARAMS_RC_2_PASS_FRAMESIZE_CAP:
- if (!(ctx->flags & NVENC_LOWLATENCY)) {
- av_log(avctx, AV_LOG_WARNING,
- "The multipass rate-control requires "
- "a low-latency preset.\n");
- return;
- }
+ break;
}
rc->rateControlMode = ctx->rc;
if (ctx->flags & NVENC_LOSSLESS) {
set_lossless(avctx);
- } else if (ctx->rc > 0) {
+ } else if (ctx->rc >= 0) {
nvenc_override_rate_control(avctx);
} else {
ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_VBR;
NV_ENC_PRESET_CONFIG preset_config = { 0 };
NVENCSTATUS nv_status = NV_ENC_SUCCESS;
AVCPBProperties *cpb_props;
- int num_mbs;
int res = 0;
int dw, dh;
- ctx->last_dts = AV_NOPTS_VALUE;
-
ctx->encode_config.version = NV_ENC_CONFIG_VER;
ctx->init_encode_params.version = NV_ENC_INITIALIZE_PARAMS_VER;
ctx->init_encode_params.frameRateNum = avctx->time_base.den;
ctx->init_encode_params.frameRateDen = avctx->time_base.num * avctx->ticks_per_frame;
- num_mbs = ((avctx->width + 15) >> 4) * ((avctx->height + 15) >> 4);
- ctx->max_surface_count = (num_mbs >= 8160) ? 32 : 48;
-
- if (ctx->buffer_delay >= ctx->max_surface_count)
- ctx->buffer_delay = ctx->max_surface_count - 1;
-
ctx->init_encode_params.enableEncodeAsync = 0;
ctx->init_encode_params.enablePTD = 1;
ctx->encode_config.gopLength = 1;
}
- /* when there're b frames, set dts offset */
- if (ctx->encode_config.frameIntervalP >= 2)
- ctx->last_dts = -2;
+ ctx->initial_pts[0] = AV_NOPTS_VALUE;
+ ctx->initial_pts[1] = AV_NOPTS_VALUE;
nvenc_setup_rate_control(avctx);
{
NvencContext *ctx = avctx->priv_data;
int i, res;
+ int num_mbs = ((avctx->width + 15) >> 4) * ((avctx->height + 15) >> 4);
+ ctx->nb_surfaces = FFMAX((num_mbs >= 8160) ? 32 : 48,
+ ctx->nb_surfaces);
+ ctx->async_depth = FFMIN(ctx->async_depth, ctx->nb_surfaces - 1);
- ctx->surfaces = av_malloc(ctx->max_surface_count * sizeof(*ctx->surfaces));
- if (!ctx->surfaces) {
+ ctx->surfaces = av_mallocz_array(ctx->nb_surfaces, sizeof(*ctx->surfaces));
+ if (!ctx->surfaces)
return AVERROR(ENOMEM);
- }
- ctx->timestamp_list = av_fifo_alloc(ctx->max_surface_count * sizeof(int64_t));
+ ctx->timestamp_list = av_fifo_alloc(ctx->nb_surfaces * sizeof(int64_t));
if (!ctx->timestamp_list)
return AVERROR(ENOMEM);
- ctx->output_surface_queue = av_fifo_alloc(ctx->max_surface_count * sizeof(NvencSurface*));
+ ctx->output_surface_queue = av_fifo_alloc(ctx->nb_surfaces * sizeof(NvencSurface*));
if (!ctx->output_surface_queue)
return AVERROR(ENOMEM);
- ctx->output_surface_ready_queue = av_fifo_alloc(ctx->max_surface_count * sizeof(NvencSurface*));
+ ctx->output_surface_ready_queue = av_fifo_alloc(ctx->nb_surfaces * sizeof(NvencSurface*));
if (!ctx->output_surface_ready_queue)
return AVERROR(ENOMEM);
- for (i = 0; i < ctx->max_surface_count; i++) {
+ for (i = 0; i < ctx->nb_surfaces; i++) {
if ((res = nvenc_alloc_surface(avctx, i)) < 0)
return res;
}
av_fifo_freep(&ctx->output_surface_queue);
if (ctx->surfaces && avctx->pix_fmt == AV_PIX_FMT_CUDA) {
- for (i = 0; i < ctx->max_surface_count; ++i) {
+ for (i = 0; i < ctx->nb_surfaces; ++i) {
if (ctx->surfaces[i].input_surface) {
p_nvenc->nvEncUnmapInputResource(ctx->nvencoder, ctx->surfaces[i].in_map.mappedResource);
}
}
if (ctx->surfaces) {
- for (i = 0; i < ctx->max_surface_count; ++i) {
+ for (i = 0; i < ctx->nb_surfaces; ++i) {
if (avctx->pix_fmt != AV_PIX_FMT_CUDA)
p_nvenc->nvEncDestroyInputBuffer(ctx->nvencoder, ctx->surfaces[i].input_surface);
av_frame_free(&ctx->surfaces[i].in_ref);
}
}
av_freep(&ctx->surfaces);
- ctx->max_surface_count = 0;
+ ctx->nb_surfaces = 0;
if (ctx->nvencoder)
p_nvenc->nvEncDestroyEncoder(ctx->nvencoder);
dl_fn->cu_ctx_destroy(ctx->cu_context_internal);
ctx->cu_context = ctx->cu_context_internal = NULL;
- DL_CLOSE_FUNC(dl_fn->nvenc_lib);
- dl_fn->nvenc_lib = NULL;
+ if (dl_fn->nvenc)
+ dlclose(dl_fn->nvenc);
+ dl_fn->nvenc = NULL;
dl_fn->nvenc_device_count = 0;
#if !CONFIG_CUDA
- DL_CLOSE_FUNC(dl_fn->cuda_lib);
- dl_fn->cuda_lib = NULL;
+ if (dl_fn->cuda)
+ dlclose(dl_fn->cuda);
+ dl_fn->cuda = NULL;
#endif
dl_fn->cu_init = NULL;
av_cold int ff_nvenc_encode_init(AVCodecContext *avctx)
{
- int res;
+ NvencContext *ctx = avctx->priv_data;
+ int ret;
- if (!nvenc_dyload_nvenc(avctx))
- return AVERROR_EXTERNAL;
+ if (avctx->pix_fmt == AV_PIX_FMT_CUDA) {
+ AVHWFramesContext *frames_ctx;
+ if (!avctx->hw_frames_ctx) {
+ av_log(avctx, AV_LOG_ERROR,
+ "hw_frames_ctx must be set when using GPU frames as input\n");
+ return AVERROR(EINVAL);
+ }
+ frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ ctx->data_pix_fmt = frames_ctx->sw_format;
+ } else {
+ ctx->data_pix_fmt = avctx->pix_fmt;
+ }
- if ((res = nvenc_setup_device(avctx)) < 0)
- return res;
+ if ((ret = nvenc_load_libraries(avctx)) < 0)
+ return ret;
- if ((res = nvenc_open_session(avctx)) < 0)
- return res;
+ if ((ret = nvenc_setup_device(avctx)) < 0)
+ return ret;
- if ((res = nvenc_setup_encoder(avctx)) < 0)
- return res;
+ if ((ret = nvenc_setup_encoder(avctx)) < 0)
+ return ret;
- if ((res = nvenc_setup_surfaces(avctx)) < 0)
- return res;
+ if ((ret = nvenc_setup_surfaces(avctx)) < 0)
+ return ret;
if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
- if ((res = nvenc_setup_extradata(avctx)) < 0)
- return res;
+ if ((ret = nvenc_setup_extradata(avctx)) < 0)
+ return ret;
}
return 0;
{
int i;
- for (i = 0; i < ctx->max_surface_count; ++i) {
+ for (i = 0; i < ctx->nb_surfaces; ++i) {
if (!ctx->surfaces[i].lockCount) {
ctx->surfaces[i].lockCount = 1;
return &ctx->surfaces[i];
}
static void nvenc_codec_specific_pic_params(AVCodecContext *avctx,
- NV_ENC_PIC_PARAMS *params)
+ NV_ENC_PIC_PARAMS *params)
{
NvencContext *ctx = avctx->priv_data;
switch (avctx->codec->id) {
case AV_CODEC_ID_H264:
- params->codecPicParams.h264PicParams.sliceMode = ctx->encode_config.encodeCodecConfig.h264Config.sliceMode;
- params->codecPicParams.h264PicParams.sliceModeData = ctx->encode_config.encodeCodecConfig.h264Config.sliceModeData;
- break;
- case AV_CODEC_ID_H265:
- params->codecPicParams.hevcPicParams.sliceMode = ctx->encode_config.encodeCodecConfig.hevcConfig.sliceMode;
- params->codecPicParams.hevcPicParams.sliceModeData = ctx->encode_config.encodeCodecConfig.hevcConfig.sliceModeData;
+ params->codecPicParams.h264PicParams.sliceMode =
+ ctx->encode_config.encodeCodecConfig.h264Config.sliceMode;
+ params->codecPicParams.h264PicParams.sliceModeData =
+ ctx->encode_config.encodeCodecConfig.h264Config.sliceModeData;
break;
+ case AV_CODEC_ID_HEVC:
+ params->codecPicParams.hevcPicParams.sliceMode =
+ ctx->encode_config.encodeCodecConfig.hevcConfig.sliceMode;
+ params->codecPicParams.hevcPicParams.sliceModeData =
+ ctx->encode_config.encodeCodecConfig.hevcConfig.sliceModeData;
+ break;
}
}
+static inline void timestamp_queue_enqueue(AVFifoBuffer* queue, int64_t timestamp)
+{
+ av_fifo_generic_write(queue, ×tamp, sizeof(timestamp), NULL);
+}
+
+static inline int64_t timestamp_queue_dequeue(AVFifoBuffer* queue)
+{
+ int64_t timestamp = AV_NOPTS_VALUE;
+ if (av_fifo_size(queue) > 0)
+ av_fifo_generic_read(queue, ×tamp, sizeof(timestamp), NULL);
+
+ return timestamp;
+}
+
+static int nvenc_set_timestamp(AVCodecContext *avctx,
+ NV_ENC_LOCK_BITSTREAM *params,
+ AVPacket *pkt)
+{
+ NvencContext *ctx = avctx->priv_data;
+
+ pkt->pts = params->outputTimeStamp;
+
+ /* generate the first dts by linearly extrapolating the
+ * first two pts values to the past */
+ if (avctx->max_b_frames > 0 && !ctx->first_packet_output &&
+ ctx->initial_pts[1] != AV_NOPTS_VALUE) {
+ int64_t ts0 = ctx->initial_pts[0], ts1 = ctx->initial_pts[1];
+ int64_t delta;
+
+ if ((ts0 < 0 && ts1 > INT64_MAX + ts0) ||
+ (ts0 > 0 && ts1 < INT64_MIN + ts0))
+ return AVERROR(ERANGE);
+ delta = ts1 - ts0;
+
+ if ((delta < 0 && ts0 > INT64_MAX + delta) ||
+ (delta > 0 && ts0 < INT64_MIN + delta))
+ return AVERROR(ERANGE);
+ pkt->dts = ts0 - delta;
+
+ ctx->first_packet_output = 1;
+ return 0;
+ }
+
+ pkt->dts = timestamp_queue_dequeue(ctx->timestamp_list);
+
+ return 0;
+}
+
static int process_output_surface(AVCodecContext *avctx, AVPacket *pkt, NvencSurface *tmpoutsurf)
{
NvencContext *ctx = avctx->priv_data;
slice_offsets = av_mallocz(slice_mode_data * sizeof(*slice_offsets));
if (!slice_offsets)
- return AVERROR(ENOMEM);
+ goto error;
lock_params.version = NV_ENC_LOCK_BITSTREAM_VER;
ff_side_data_set_encoder_stats(pkt,
(lock_params.frameAvgQP - 1) * FF_QP2LAMBDA, NULL, 0, pict_type);
- pkt->pts = lock_params.outputTimeStamp;
- pkt->dts = timestamp_queue_dequeue(ctx->timestamp_list);
-
- /* when there're b frame(s), set dts offset */
- if (ctx->encode_config.frameIntervalP >= 2)
- pkt->dts -= 1;
-
- if (pkt->dts > pkt->pts)
- pkt->dts = pkt->pts;
-
- if (ctx->last_dts != AV_NOPTS_VALUE && pkt->dts <= ctx->last_dts)
- pkt->dts = ctx->last_dts + 1;
-
- ctx->last_dts = pkt->dts;
+ res = nvenc_set_timestamp(avctx, &lock_params, pkt);
+ if (res < 0)
+ goto error2;
av_free(slice_offsets);
return 0;
error:
+ timestamp_queue_dequeue(ctx->timestamp_list);
+error2:
av_free(slice_offsets);
- timestamp_queue_dequeue(ctx->timestamp_list);
return res;
}
-static int output_ready(NvencContext *ctx, int flush)
+static int output_ready(AVCodecContext *avctx, int flush)
{
+ NvencContext *ctx = avctx->priv_data;
int nb_ready, nb_pending;
+ /* when B-frames are enabled, we wait for two initial timestamps to
+ * calculate the first dts */
+ if (!flush && avctx->max_b_frames > 0 &&
+ (ctx->initial_pts[0] == AV_NOPTS_VALUE || ctx->initial_pts[1] == AV_NOPTS_VALUE))
+ return 0;
+
nb_ready = av_fifo_size(ctx->output_surface_ready_queue) / sizeof(NvencSurface*);
nb_pending = av_fifo_size(ctx->output_surface_queue) / sizeof(NvencSurface*);
- return nb_ready > 0 && (flush || nb_ready + nb_pending >= ctx->buffer_delay);
+ if (flush)
+ return nb_ready > 0;
+ return (nb_ready > 0) && (nb_ready + nb_pending >= ctx->async_depth);
}
int ff_nvenc_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
- const AVFrame *frame, int *got_packet)
+ const AVFrame *frame, int *got_packet)
{
NVENCSTATUS nv_status;
NvencSurface *tmpoutsurf, *inSurf;
if (frame) {
inSurf = get_free_frame(ctx);
- av_assert0(inSurf);
+ if (!inSurf) {
+ av_log(avctx, AV_LOG_ERROR, "No free surfaces\n");
+ return AVERROR_BUG;
+ }
res = nvenc_upload_frame(avctx, frame, inSurf);
if (res) {
pic_params.inputWidth = avctx->width;
pic_params.inputHeight = avctx->height;
pic_params.outputBitstream = inSurf->output_surface;
- pic_params.completionEvent = 0;
if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
- if (frame->top_field_first) {
+ if (frame->top_field_first)
pic_params.pictureStruct = NV_ENC_PIC_STRUCT_FIELD_TOP_BOTTOM;
- } else {
+ else
pic_params.pictureStruct = NV_ENC_PIC_STRUCT_FIELD_BOTTOM_TOP;
- }
} else {
pic_params.pictureStruct = NV_ENC_PIC_STRUCT_FRAME;
}
pic_params.encodePicFlags = 0;
pic_params.inputTimeStamp = frame->pts;
- pic_params.inputDuration = 0;
nvenc_codec_specific_pic_params(avctx, &pic_params);
-
- timestamp_queue_enqueue(ctx->timestamp_list, frame->pts);
} else {
pic_params.encodePicFlags = NV_ENC_PIC_FLAG_EOS;
}
nv_status = p_nvenc->nvEncEncodePicture(ctx->nvencoder, &pic_params);
+ if (nv_status != NV_ENC_SUCCESS &&
+ nv_status != NV_ENC_ERR_NEED_MORE_INPUT)
+ return nvenc_print_error(avctx, nv_status, "EncodePicture failed!");
- if (frame && nv_status == NV_ENC_ERR_NEED_MORE_INPUT)
+ if (frame) {
av_fifo_generic_write(ctx->output_surface_queue, &inSurf, sizeof(inSurf), NULL);
+ timestamp_queue_enqueue(ctx->timestamp_list, frame->pts);
- if (nv_status != NV_ENC_SUCCESS && nv_status != NV_ENC_ERR_NEED_MORE_INPUT) {
- return nvenc_print_error(avctx, nv_status, "EncodePicture failed!");
+ if (ctx->initial_pts[0] == AV_NOPTS_VALUE)
+ ctx->initial_pts[0] = frame->pts;
+ else if (ctx->initial_pts[1] == AV_NOPTS_VALUE)
+ ctx->initial_pts[1] = frame->pts;
}
- if (nv_status != NV_ENC_ERR_NEED_MORE_INPUT) {
+ /* all the pending buffers are now ready for output */
+ if (nv_status == NV_ENC_SUCCESS) {
while (av_fifo_size(ctx->output_surface_queue) > 0) {
av_fifo_generic_read(ctx->output_surface_queue, &tmpoutsurf, sizeof(tmpoutsurf), NULL);
av_fifo_generic_write(ctx->output_surface_ready_queue, &tmpoutsurf, sizeof(tmpoutsurf), NULL);
}
-
- if (frame)
- av_fifo_generic_write(ctx->output_surface_ready_queue, &inSurf, sizeof(inSurf), NULL);
}
- if (output_ready(ctx, !frame)) {
+ if (output_ready(avctx, !frame)) {
av_fifo_generic_read(ctx->output_surface_ready_queue, &tmpoutsurf, sizeof(tmpoutsurf), NULL);
res = process_output_surface(avctx, pkt, tmpoutsurf);