#include "libavutil/pixdesc.h"
#include "internal.h"
#include <pthread.h>
+#include "atsc_a53.h"
#include "h264.h"
#include "h264_sei.h"
#include <dlfcn.h>
enum { kCMVideoCodecType_HEVC = 'hvc1' };
#endif
+#if !HAVE_KCMVIDEOCODECTYPE_HEVCWITHALPHA
+enum { kCMVideoCodecType_HEVCWithAlpha = 'muxa' };
+#endif
+
+#if !HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
+enum { kCVPixelFormatType_420YpCbCr10BiPlanarFullRange = 'xf20' };
+enum { kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange = 'x420' };
+#endif
+
typedef OSStatus (*getParameterSetAtIndex)(CMFormatDescriptionRef videoDesc,
size_t parameterSetIndex,
const uint8_t **parameterSetPointerOut,
CFStringRef kVTProfileLevel_H264_High_5_1;
CFStringRef kVTProfileLevel_H264_High_5_2;
CFStringRef kVTProfileLevel_H264_High_AutoLevel;
+ CFStringRef kVTProfileLevel_H264_Extended_5_0;
+ CFStringRef kVTProfileLevel_H264_Extended_AutoLevel;
CFStringRef kVTProfileLevel_HEVC_Main_AutoLevel;
CFStringRef kVTProfileLevel_HEVC_Main10_AutoLevel;
CFStringRef kVTCompressionPropertyKey_RealTime;
+ CFStringRef kVTCompressionPropertyKey_TargetQualityForAlpha;
CFStringRef kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder;
CFStringRef kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder;
GET_SYM(kVTProfileLevel_H264_High_5_1, "H264_High_5_1");
GET_SYM(kVTProfileLevel_H264_High_5_2, "H264_High_5_2");
GET_SYM(kVTProfileLevel_H264_High_AutoLevel, "H264_High_AutoLevel");
+ GET_SYM(kVTProfileLevel_H264_Extended_5_0, "H264_Extended_5_0");
+ GET_SYM(kVTProfileLevel_H264_Extended_AutoLevel, "H264_Extended_AutoLevel");
GET_SYM(kVTProfileLevel_HEVC_Main_AutoLevel, "HEVC_Main_AutoLevel");
GET_SYM(kVTProfileLevel_HEVC_Main10_AutoLevel, "HEVC_Main10_AutoLevel");
GET_SYM(kVTCompressionPropertyKey_RealTime, "RealTime");
+ GET_SYM(kVTCompressionPropertyKey_TargetQualityForAlpha,
+ "TargetQualityForAlpha");
GET_SYM(kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
"EnableHardwareAcceleratedVideoEncoder");
H264_PROF_BASELINE,
H264_PROF_MAIN,
H264_PROF_HIGH,
+ H264_PROF_EXTENDED,
H264_PROF_COUNT
} VT_H264Profile;
int64_t frames_after;
int64_t allow_sw;
+ int64_t require_sw;
+ double alpha_quality;
bool flushing;
- bool has_b_frames;
+ int has_b_frames;
bool warned_color_range;
- bool a53_cc;
+
+ /* can't be bool type since AVOption will access it as int */
+ int a53_cc;
} VTEncContext;
static int vtenc_populate_extradata(AVCodecContext *avctx,
return 0;
}
- while (!vtctx->q_head && !vtctx->async_error && wait) {
+ while (!vtctx->q_head && !vtctx->async_error && wait && !vtctx->flushing) {
pthread_cond_wait(&vtctx->cv_sample_sent, &vtctx->lock);
}
vtctx->q_tail = NULL;
}
+ vtctx->frame_ct_out++;
pthread_mutex_unlock(&vtctx->lock);
*buf = info->cm_buffer;
}
av_free(info);
- vtctx->frame_ct_out++;
return 0;
}
info->next = NULL;
pthread_mutex_lock(&vtctx->lock);
- pthread_cond_signal(&vtctx->cv_sample_sent);
if (!vtctx->q_head) {
vtctx->q_head = info;
vtctx->q_tail = info;
+ pthread_cond_signal(&vtctx->cv_sample_sent);
pthread_mutex_unlock(&vtctx->lock);
}
return 0;
}
-static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
+static CMVideoCodecType get_cm_codec_type(enum AVCodecID id,
+ enum AVPixelFormat fmt,
+ double alpha_quality)
{
switch (id) {
case AV_CODEC_ID_H264: return kCMVideoCodecType_H264;
- case AV_CODEC_ID_HEVC: return kCMVideoCodecType_HEVC;
+ case AV_CODEC_ID_HEVC:
+ if (fmt == AV_PIX_FMT_BGRA && alpha_quality > 0.0) {
+ return kCMVideoCodecType_HEVCWithAlpha;
+ }
+ return kCMVideoCodecType_HEVC;
default: return 0;
}
}
ExtraSEI *sei = sourceFrameCtx;
if (vtctx->async_error) {
- if(sample_buffer) CFRelease(sample_buffer);
return;
}
- if (status || !sample_buffer) {
+ if (status) {
av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status);
set_async_error(vtctx, AVERROR_EXTERNAL);
return;
}
+ if (!sample_buffer) {
+ return;
+ }
+
if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
int set_status = set_extradata(avctx, sample_buffer);
if (set_status) {
compat_keys.kVTProfileLevel_H264_High_5_2; break;
}
break;
+ case H264_PROF_EXTENDED:
+ switch (vtctx->level) {
+ case 0: *profile_level_val =
+ compat_keys.kVTProfileLevel_H264_Extended_AutoLevel; break;
+ case 50: *profile_level_val =
+ compat_keys.kVTProfileLevel_H264_Extended_5_0; break;
+ }
+ break;
}
if (!*profile_level_val) {
*av_pixel_format = range == AVCOL_RANGE_JPEG ?
kCVPixelFormatType_420YpCbCr8PlanarFullRange :
kCVPixelFormatType_420YpCbCr8Planar;
+ } else if (fmt == AV_PIX_FMT_BGRA) {
+ *av_pixel_format = kCVPixelFormatType_32BGRA;
+ } else if (fmt == AV_PIX_FMT_P010LE) {
+ *av_pixel_format = range == AVCOL_RANGE_JPEG ?
+ kCVPixelFormatType_420YpCbCr10BiPlanarFullRange :
+ kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange;
} else {
return AVERROR(EINVAL);
}
*primaries = NULL;
break;
+ case AVCOL_PRI_BT470BG:
+ *primaries = kCVImageBufferColorPrimaries_EBU_3213;
+ break;
+
+ case AVCOL_PRI_SMPTE170M:
+ *primaries = kCVImageBufferColorPrimaries_SMPTE_C;
+ break;
+
case AVCOL_PRI_BT709:
*primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
break;
*transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
break;
+#if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ
+ case AVCOL_TRC_SMPTE2084:
+ *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_ST_2084_PQ;
+ break;
+#endif
+#if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR
+ case AVCOL_TRC_LINEAR:
+ *transfer_fnc = kCVImageBufferTransferFunction_Linear;
+ break;
+#endif
+#if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG
+ case AVCOL_TRC_ARIB_STD_B67:
+ *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_2100_HLG;
+ break;
+#endif
+
case AVCOL_TRC_GAMMA22:
gamma = 2.2;
*transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
break;
default:
+ *transfer_fnc = NULL;
av_log(avctx, AV_LOG_ERROR, "Transfer function %s is not supported.\n", av_color_transfer_name(trc));
return -1;
}
return 0;
}
+// constant quality only on Macs with Apple Silicon
+static bool vtenc_qscale_enabled(void)
+{
+ return TARGET_OS_OSX && TARGET_CPU_ARM64;
+}
+
static int vtenc_create_encoder(AVCodecContext *avctx,
CMVideoCodecType codec_type,
CFStringRef profile_level,
VTEncContext *vtctx = avctx->priv_data;
SInt32 bit_rate = avctx->bit_rate;
SInt32 max_rate = avctx->rc_max_rate;
+ Float32 quality = avctx->global_quality / FF_QP2LAMBDA;
CFNumberRef bit_rate_num;
+ CFNumberRef quality_num;
CFNumberRef bytes_per_second;
CFNumberRef one_second;
CFArrayRef data_rate_limits;
return AVERROR_EXTERNAL;
}
- bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
- kCFNumberSInt32Type,
- &bit_rate);
- if (!bit_rate_num) return AVERROR(ENOMEM);
+ if (avctx->flags & AV_CODEC_FLAG_QSCALE && !vtenc_qscale_enabled()) {
+ av_log(avctx, AV_LOG_ERROR, "Error: -q:v qscale not available for encoder. Use -b:v bitrate instead.\n");
+ return AVERROR_EXTERNAL;
+ }
- status = VTSessionSetProperty(vtctx->session,
- kVTCompressionPropertyKey_AverageBitRate,
- bit_rate_num);
- CFRelease(bit_rate_num);
+ if (avctx->flags & AV_CODEC_FLAG_QSCALE) {
+ quality = quality >= 100 ? 1.0 : quality / 100;
+ quality_num = CFNumberCreate(kCFAllocatorDefault,
+ kCFNumberFloat32Type,
+ &quality);
+ if (!quality_num) return AVERROR(ENOMEM);
+
+ status = VTSessionSetProperty(vtctx->session,
+ kVTCompressionPropertyKey_Quality,
+ quality_num);
+ CFRelease(quality_num);
+ } else {
+ bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
+ kCFNumberSInt32Type,
+ &bit_rate);
+ if (!bit_rate_num) return AVERROR(ENOMEM);
+
+ status = VTSessionSetProperty(vtctx->session,
+ kVTCompressionPropertyKey_AverageBitRate,
+ bit_rate_num);
+ CFRelease(bit_rate_num);
+ }
if (status) {
av_log(avctx, AV_LOG_ERROR, "Error setting bitrate property: %d\n", status);
}
}
- if (vtctx->codec_id == AV_CODEC_ID_H264) {
- // kVTCompressionPropertyKey_ProfileLevel is not available for HEVC
- if (profile_level) {
+ if (vtctx->codec_id == AV_CODEC_ID_HEVC) {
+ if (avctx->pix_fmt == AV_PIX_FMT_BGRA && vtctx->alpha_quality > 0.0) {
+ CFNumberRef alpha_quality_num = CFNumberCreate(kCFAllocatorDefault,
+ kCFNumberDoubleType,
+ &vtctx->alpha_quality);
+ if (!alpha_quality_num) return AVERROR(ENOMEM);
+
status = VTSessionSetProperty(vtctx->session,
- kVTCompressionPropertyKey_ProfileLevel,
- profile_level);
- if (status) {
- av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d\n", status);
- }
+ compat_keys.kVTCompressionPropertyKey_TargetQualityForAlpha,
+ alpha_quality_num);
+ CFRelease(alpha_quality_num);
+ }
+ }
+
+ if (profile_level) {
+ status = VTSessionSetProperty(vtctx->session,
+ kVTCompressionPropertyKey_ProfileLevel,
+ profile_level);
+ if (status) {
+ av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d. Output will be encoded using a supported profile/level combination.\n", status);
}
}
CFNumberRef gamma_level = NULL;
int status;
- codec_type = get_cm_codec_type(avctx->codec_id);
+ codec_type = get_cm_codec_type(avctx->codec_id, avctx->pix_fmt, vtctx->alpha_quality);
if (!codec_type) {
av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id);
return AVERROR(EINVAL);
}
vtctx->codec_id = avctx->codec_id;
+ avctx->max_b_frames = 16;
if (vtctx->codec_id == AV_CODEC_ID_H264) {
vtctx->get_param_set_func = CMVideoFormatDescriptionGetH264ParameterSetAtIndex;
vtctx->has_b_frames = avctx->max_b_frames > 0;
if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){
av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
- vtctx->has_b_frames = false;
+ vtctx->has_b_frames = 0;
}
if (vtctx->entropy == VT_CABAC && vtctx->profile == H264_PROF_BASELINE) {
vtctx->get_param_set_func = compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
if (!vtctx->get_param_set_func) return AVERROR(EINVAL);
if (!get_vt_hevc_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
+ // HEVC has b-byramid
+ vtctx->has_b_frames = avctx->max_b_frames > 0 ? 2 : 0;
}
enc_info = CFDictionaryCreateMutable(
if (!enc_info) return AVERROR(ENOMEM);
#if !TARGET_OS_IPHONE
- if (!vtctx->allow_sw) {
+ if(vtctx->require_sw) {
+ CFDictionarySetValue(enc_info,
+ compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
+ kCFBooleanFalse);
+ } else if (!vtctx->allow_sw) {
CFDictionarySetValue(enc_info,
compat_keys.kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
kCFBooleanTrue);
if (!status && has_b_frames_cfbool) {
//Some devices don't output B-frames for main profile, even if requested.
- vtctx->has_b_frames = CFBooleanGetValue(has_b_frames_cfbool);
+ // HEVC has b-pyramid
+ vtctx->has_b_frames = (CFBooleanGetValue(has_b_frames_cfbool) && avctx->codec_id == AV_CODEC_ID_HEVC) ? 2 : 1;
CFRelease(has_b_frames_cfbool);
}
avctx->has_b_frames = vtctx->has_b_frames;
remaining_dst_size--;
wrote_bytes = write_sei(sei,
- H264_SEI_TYPE_USER_DATA_REGISTERED,
+ SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35,
dst_data,
remaining_dst_size);
return status;
wrote_bytes = write_sei(sei,
- H264_SEI_TYPE_USER_DATA_REGISTERED,
+ SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35,
new_sei,
remaining_dst_size - old_sei_length);
if (wrote_bytes < 0)
if (sei) {
size_t msg_size = get_sei_msg_bytes(sei,
- H264_SEI_TYPE_USER_DATA_REGISTERED);
+ SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35);
sei_nalu_size = sizeof(start_code) + 1 + msg_size + 1;
}
strides[2] = frame ? frame->linesize[2] : (avctx->width + 1) / 2;
break;
+ case AV_PIX_FMT_BGRA:
+ *plane_count = 1;
+
+ widths [0] = avctx->width;
+ heights[0] = avctx->height;
+ strides[0] = frame ? frame->linesize[0] : avctx->width * 4;
+ break;
+
+ case AV_PIX_FMT_P010LE:
+ *plane_count = 2;
+ widths[0] = avctx->width;
+ heights[0] = avctx->height;
+ strides[0] = frame ? frame->linesize[0] : (avctx->width * 2 + 63) & -64;
+
+ widths[1] = (avctx->width + 1) / 2;
+ heights[1] = (avctx->height + 1) / 2;
+ strides[1] = frame ? frame->linesize[1] : ((avctx->width + 1) / 2 + 63) & -64;
+ break;
+
default:
av_log(
avctx,
return 0;
}
-#if !TARGET_OS_IPHONE
-//Not used on iOS - frame is always copied.
-static void free_avframe(
- void *release_ctx,
- const void *data,
- size_t size,
- size_t plane_count,
- const void *plane_addresses[])
-{
- AVFrame *frame = release_ctx;
- av_frame_free(&frame);
-}
-#else
//Not used on OSX - frame is never copied.
static int copy_avframe_to_pixel_buffer(AVCodecContext *avctx,
const AVFrame *frame,
return 0;
}
-#endif //!TARGET_OS_IPHONE
static int create_cv_pixel_buffer(AVCodecContext *avctx,
const AVFrame *frame,
size_t strides[AV_NUM_DATA_POINTERS];
int status;
size_t contiguous_buf_size;
-#if TARGET_OS_IPHONE
CVPixelBufferPoolRef pix_buf_pool;
VTEncContext* vtctx = avctx->priv_data;
-#else
- CFMutableDictionaryRef pix_buf_attachments = CFDictionaryCreateMutable(
- kCFAllocatorDefault,
- 10,
- &kCFCopyStringDictionaryKeyCallBacks,
- &kCFTypeDictionaryValueCallBacks);
-
- if (!pix_buf_attachments) return AVERROR(ENOMEM);
-#endif
if (avctx->pix_fmt == AV_PIX_FMT_VIDEOTOOLBOX) {
av_assert0(frame->format == AV_PIX_FMT_VIDEOTOOLBOX);
return AVERROR_EXTERNAL;
}
-#if TARGET_OS_IPHONE
pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
if (!pix_buf_pool) {
/* On iOS, the VT session is invalidated when the APP switches from
*cv_img = NULL;
return status;
}
-#else
- AVFrame *enc_frame = av_frame_alloc();
- if (!enc_frame) return AVERROR(ENOMEM);
-
- status = av_frame_ref(enc_frame, frame);
- if (status) {
- av_frame_free(&enc_frame);
- return status;
- }
-
- status = CVPixelBufferCreateWithPlanarBytes(
- kCFAllocatorDefault,
- enc_frame->width,
- enc_frame->height,
- color,
- NULL,
- contiguous_buf_size,
- plane_count,
- (void **)enc_frame->data,
- widths,
- heights,
- strides,
- free_avframe,
- enc_frame,
- NULL,
- cv_img
- );
-
- add_color_attr(avctx, pix_buf_attachments);
- CVBufferSetAttachments(*cv_img, pix_buf_attachments, kCVAttachmentMode_ShouldPropagate);
- CFRelease(pix_buf_attachments);
-
- if (status) {
- av_log(avctx, AV_LOG_ERROR, "Error: Could not create CVPixelBuffer: %d\n", status);
- return AVERROR_EXTERNAL;
- }
-#endif
return 0;
}
if (vtctx->frame_ct_in == 0) {
vtctx->first_pts = frame->pts;
- } else if(vtctx->frame_ct_in == 1 && vtctx->has_b_frames) {
+ } else if(vtctx->frame_ct_in == vtctx->has_b_frames) {
vtctx->dts_delta = frame->pts - vtctx->first_pts;
}
CFDictionaryRef pixel_buffer_info)
{
VTEncContext *vtctx = avctx->priv_data;
- AVFrame *frame = av_frame_alloc();
- int y_size = avctx->width * avctx->height;
- int chroma_size = (avctx->width / 2) * (avctx->height / 2);
- CMSampleBufferRef buf = NULL;
int status;
-
- if (!frame)
- return AVERROR(ENOMEM);
-
- frame->buf[0] = av_buffer_alloc(y_size + 2 * chroma_size);
-
- if(!frame->buf[0]){
- status = AVERROR(ENOMEM);
- goto pe_cleanup;
- }
+ CVPixelBufferPoolRef pool = NULL;
+ CVPixelBufferRef pix_buf = NULL;
+ CMTime time;
+ CMSampleBufferRef buf = NULL;
status = vtenc_create_encoder(avctx,
codec_type,
if (status)
goto pe_cleanup;
- frame->data[0] = frame->buf[0]->data;
- memset(frame->data[0], 0, y_size);
-
- frame->data[1] = frame->buf[0]->data + y_size;
- memset(frame->data[1], 128, chroma_size);
-
-
- if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
- frame->data[2] = frame->buf[0]->data + y_size + chroma_size;
- memset(frame->data[2], 128, chroma_size);
+ pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
+ if(!pool){
+ av_log(avctx, AV_LOG_ERROR, "Error getting pixel buffer pool.\n");
+ goto pe_cleanup;
}
- frame->linesize[0] = avctx->width;
+ status = CVPixelBufferPoolCreatePixelBuffer(NULL,
+ pool,
+ &pix_buf);
- if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
- frame->linesize[1] =
- frame->linesize[2] = (avctx->width + 1) / 2;
- } else {
- frame->linesize[1] = (avctx->width + 1) / 2;
+ if(status != kCVReturnSuccess){
+ av_log(avctx, AV_LOG_ERROR, "Error creating frame from pool: %d\n", status);
+ goto pe_cleanup;
}
- frame->format = avctx->pix_fmt;
- frame->width = avctx->width;
- frame->height = avctx->height;
- frame->colorspace = avctx->colorspace;
- frame->color_range = avctx->color_range;
- frame->color_trc = avctx->color_trc;
- frame->color_primaries = avctx->color_primaries;
+ time = CMTimeMake(0, avctx->time_base.den);
+ status = VTCompressionSessionEncodeFrame(vtctx->session,
+ pix_buf,
+ time,
+ kCMTimeInvalid,
+ NULL,
+ NULL,
+ NULL);
- frame->pts = 0;
- status = vtenc_send_frame(avctx, vtctx, frame);
if (status) {
- av_log(avctx, AV_LOG_ERROR, "Error sending frame: %d\n", status);
+ av_log(avctx,
+ AV_LOG_ERROR,
+ "Error sending frame for extradata: %d\n",
+ status);
+
goto pe_cleanup;
}
vtctx->session = NULL;
vtctx->frame_ct_out = 0;
- av_frame_unref(frame);
- av_frame_free(&frame);
-
av_assert0(status != 0 || (avctx->extradata && avctx->extradata_size > 0));
return status;
{
VTEncContext *vtctx = avctx->priv_data;
- pthread_cond_destroy(&vtctx->cv_sample_sent);
- pthread_mutex_destroy(&vtctx->lock);
-
- if(!vtctx->session) return 0;
+ if(!vtctx->session) {
+ pthread_cond_destroy(&vtctx->cv_sample_sent);
+ pthread_mutex_destroy(&vtctx->lock);
+ return 0;
+ }
VTCompressionSessionCompleteFrames(vtctx->session,
kCMTimeIndefinite);
clear_frame_queue(vtctx);
+ pthread_cond_destroy(&vtctx->cv_sample_sent);
+ pthread_mutex_destroy(&vtctx->lock);
CFRelease(vtctx->session);
vtctx->session = NULL;
return 0;
}
-static const enum AVPixelFormat pix_fmts[] = {
+static const enum AVPixelFormat avc_pix_fmts[] = {
+ AV_PIX_FMT_VIDEOTOOLBOX,
+ AV_PIX_FMT_NV12,
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat hevc_pix_fmts[] = {
AV_PIX_FMT_VIDEOTOOLBOX,
AV_PIX_FMT_NV12,
AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_P010LE,
AV_PIX_FMT_NONE
};
#define COMMON_OPTIONS \
{ "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL, \
{ .i64 = 0 }, 0, 1, VE }, \
+ { "require_sw", "Require software encoding", OFFSET(require_sw), AV_OPT_TYPE_BOOL, \
+ { .i64 = 0 }, 0, 1, VE }, \
{ "realtime", "Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).", \
OFFSET(realtime), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
{ "frames_before", "Other frames will come before the frames in this session. This helps smooth concatenation issues.", \
{ "baseline", "Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_BASELINE }, INT_MIN, INT_MAX, VE, "profile" },
{ "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
{ "high", "High Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_HIGH }, INT_MIN, INT_MAX, VE, "profile" },
+ { "extended", "Extend Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_EXTENDED }, INT_MIN, INT_MAX, VE, "profile" },
{ "level", "Level", OFFSET(level), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, VE, "level" },
{ "1.3", "Level 1.3, only available with Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX, VE, "level" },
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_H264,
.priv_data_size = sizeof(VTEncContext),
- .pix_fmts = pix_fmts,
+ .pix_fmts = avc_pix_fmts,
.init = vtenc_init,
.encode2 = vtenc_frame,
.close = vtenc_close,
{ "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
{ "main10", "Main10 Profile", 0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN10 }, INT_MIN, INT_MAX, VE, "profile" },
+ { "alpha_quality", "Compression quality for the alpha channel", OFFSET(alpha_quality), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0.0, 1.0, VE },
+
COMMON_OPTIONS
{ NULL },
};
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_HEVC,
.priv_data_size = sizeof(VTEncContext),
- .pix_fmts = pix_fmts,
+ .pix_fmts = hevc_pix_fmts,
.init = vtenc_init,
.encode2 = vtenc_frame,
.close = vtenc_close,