2 * OpenH264 video encoder
3 * Copyright (C) 2014 Martin Storsjo
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <wels/codec_api.h>
23 #include <wels/codec_ver.h>
25 #include "libavutil/attributes.h"
26 #include "libavutil/common.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/internal.h"
29 #include "libavutil/intreadwrite.h"
30 #include "libavutil/mathematics.h"
34 #include "libopenh264.h"
36 #if !OPENH264_VER_AT_LEAST(1, 6)
37 #define SM_SIZELIMITED_SLICE SM_DYN_SLICE
40 #define TARGET_BITRATE_DEFAULT 2*1000*1000
42 typedef struct SVCContext {
43 const AVClass *av_class;
51 #if FF_API_OPENH264_CABAC
52 int cabac; // deprecated
60 #define OFFSET(x) offsetof(SVCContext, x)
61 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
62 #define DEPRECATED AV_OPT_FLAG_DEPRECATED
63 static const AVOption options[] = {
64 #if FF_API_OPENH264_SLICE_MODE
65 #if OPENH264_VER_AT_LEAST(1, 6)
66 { "slice_mode", "set slice mode, use slices/max_nal_size", OFFSET(slice_mode), AV_OPT_TYPE_INT, { .i64 = SM_FIXEDSLCNUM_SLICE }, SM_SINGLE_SLICE, SM_RESERVED, VE|DEPRECATED, "slice_mode" },
68 { "slice_mode", "set slice mode, use slices/max_nal_size", OFFSET(slice_mode), AV_OPT_TYPE_INT, { .i64 = SM_AUTO_SLICE }, SM_SINGLE_SLICE, SM_RESERVED, VE|DEPRECATED, "slice_mode" },
70 { "fixed", "a fixed number of slices", 0, AV_OPT_TYPE_CONST, { .i64 = SM_FIXEDSLCNUM_SLICE }, 0, 0, VE, "slice_mode" },
71 #if OPENH264_VER_AT_LEAST(1, 6)
72 { "dyn", "Size limited (compatibility name)", 0, AV_OPT_TYPE_CONST, { .i64 = SM_SIZELIMITED_SLICE }, 0, 0, VE, "slice_mode" },
73 { "sizelimited", "Size limited", 0, AV_OPT_TYPE_CONST, { .i64 = SM_SIZELIMITED_SLICE }, 0, 0, VE, "slice_mode" },
75 { "rowmb", "one slice per row of macroblocks", 0, AV_OPT_TYPE_CONST, { .i64 = SM_ROWMB_SLICE }, 0, 0, VE, "slice_mode" },
76 { "auto", "automatic number of slices according to number of threads", 0, AV_OPT_TYPE_CONST, { .i64 = SM_AUTO_SLICE }, 0, 0, VE, "slice_mode" },
77 { "dyn", "Dynamic slicing", 0, AV_OPT_TYPE_CONST, { .i64 = SM_DYN_SLICE }, 0, 0, VE, "slice_mode" },
80 { "loopfilter", "enable loop filter", OFFSET(loopfilter), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, VE },
81 { "profile", "set profile restrictions", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = FF_PROFILE_UNKNOWN }, FF_PROFILE_UNKNOWN, 0xffff, VE, "profile" },
82 #define PROFILE(name, value) name, NULL, 0, AV_OPT_TYPE_CONST, { .i64 = value }, 0, 0, VE, "profile"
83 { PROFILE("constrained_baseline", FF_PROFILE_H264_CONSTRAINED_BASELINE) },
84 { PROFILE("main", FF_PROFILE_H264_MAIN) },
85 { PROFILE("high", FF_PROFILE_H264_HIGH) },
87 { "max_nal_size", "set maximum NAL size in bytes", OFFSET(max_nal_size), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
88 { "allow_skip_frames", "allow skipping frames to hit the target bitrate", OFFSET(skip_frames), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
89 #if FF_API_OPENH264_CABAC
90 { "cabac", "Enable cabac(deprecated, use coder)", OFFSET(cabac), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE|DEPRECATED },
92 { "coder", "Coder type", OFFSET(coder), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VE, "coder" },
93 { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = -1 }, INT_MIN, INT_MAX, VE, "coder" },
94 { "cavlc", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, "coder" },
95 { "cabac", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, "coder" },
96 { "vlc", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, "coder" },
97 { "ac", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, "coder" },
99 { "rc_mode", "Select rate control mode", OFFSET(rc_mode), AV_OPT_TYPE_INT, { .i64 = RC_QUALITY_MODE }, RC_OFF_MODE, RC_TIMESTAMP_MODE, VE, "rc_mode" },
100 { "off", "bit rate control off", 0, AV_OPT_TYPE_CONST, { .i64 = RC_OFF_MODE }, 0, 0, VE, "rc_mode" },
101 { "quality", "quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = RC_QUALITY_MODE }, 0, 0, VE, "rc_mode" },
102 { "bitrate", "bitrate mode", 0, AV_OPT_TYPE_CONST, { .i64 = RC_BITRATE_MODE }, 0, 0, VE, "rc_mode" },
103 { "buffer", "using buffer status to adjust the video quality (no bitrate control)", 0, AV_OPT_TYPE_CONST, { .i64 = RC_BUFFERBASED_MODE }, 0, 0, VE, "rc_mode" },
104 #if OPENH264_VER_AT_LEAST(1, 4)
105 { "timestamp", "bit rate control based on timestamp", 0, AV_OPT_TYPE_CONST, { .i64 = RC_TIMESTAMP_MODE }, 0, 0, VE, "rc_mode" },
111 static const AVClass class = {
112 .class_name = "libopenh264enc",
113 .item_name = av_default_item_name,
115 .version = LIBAVUTIL_VERSION_INT,
118 static av_cold int svc_encode_close(AVCodecContext *avctx)
120 SVCContext *s = avctx->priv_data;
123 WelsDestroySVCEncoder(s->encoder);
125 av_log(avctx, AV_LOG_WARNING, "%d frames skipped\n", s->skipped);
129 static av_cold int svc_encode_init(AVCodecContext *avctx)
131 SVCContext *s = avctx->priv_data;
132 SEncParamExt param = { 0 };
135 WelsTraceCallback callback_function;
136 AVCPBProperties *props;
138 if ((err = ff_libopenh264_check_version(avctx)) < 0)
141 if (WelsCreateSVCEncoder(&s->encoder)) {
142 av_log(avctx, AV_LOG_ERROR, "Unable to create encoder\n");
143 return AVERROR_UNKNOWN;
146 // Pass all libopenh264 messages to our callback, to allow ourselves to filter them.
147 log_level = WELS_LOG_DETAIL;
148 (*s->encoder)->SetOption(s->encoder, ENCODER_OPTION_TRACE_LEVEL, &log_level);
150 // Set the logging callback function to one that uses av_log() (see implementation above).
151 callback_function = (WelsTraceCallback) ff_libopenh264_trace_callback;
152 (*s->encoder)->SetOption(s->encoder, ENCODER_OPTION_TRACE_CALLBACK, &callback_function);
154 // Set the AVCodecContext as the libopenh264 callback context so that it can be passed to av_log().
155 (*s->encoder)->SetOption(s->encoder, ENCODER_OPTION_TRACE_CALLBACK_CONTEXT, &avctx);
157 (*s->encoder)->GetDefaultParams(s->encoder, ¶m);
159 if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {
160 param.fMaxFrameRate = av_q2d(avctx->framerate);
162 if (avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) {
163 av_log(avctx, AV_LOG_ERROR,
164 "Could not set framerate for libopenh264enc: integer overflow\n");
165 return AVERROR(EINVAL);
167 param.fMaxFrameRate = 1.0 / av_q2d(avctx->time_base) / FFMAX(avctx->ticks_per_frame, 1);
169 param.iPicWidth = avctx->width;
170 param.iPicHeight = avctx->height;
171 param.iTargetBitrate = avctx->bit_rate > 0 ? avctx->bit_rate : TARGET_BITRATE_DEFAULT;
172 param.iMaxBitrate = FFMAX(avctx->rc_max_rate, avctx->bit_rate);
173 param.iRCMode = s->rc_mode;
174 if (avctx->qmax >= 0)
175 param.iMaxQp = av_clip(avctx->qmax, 1, 51);
176 if (avctx->qmin >= 0)
177 param.iMinQp = av_clip(avctx->qmin, 1, param.iMaxQp);
178 param.iTemporalLayerNum = 1;
179 param.iSpatialLayerNum = 1;
180 param.bEnableDenoise = 0;
181 param.bEnableBackgroundDetection = 1;
182 param.bEnableAdaptiveQuant = 1;
183 param.bEnableFrameSkip = s->skip_frames;
184 param.bEnableLongTermReference = 0;
185 param.iLtrMarkPeriod = 30;
186 if (avctx->gop_size >= 0)
187 param.uiIntraPeriod = avctx->gop_size;
188 #if OPENH264_VER_AT_LEAST(1, 4)
189 param.eSpsPpsIdStrategy = CONSTANT_ID;
191 param.bEnableSpsPpsIdAddition = 0;
193 param.bPrefixNalAddingCtrl = 0;
194 param.iLoopFilterDisableIdc = !s->loopfilter;
195 param.iEntropyCodingModeFlag = 0;
196 param.iMultipleThreadIdc = avctx->thread_count;
198 /* Allow specifying the libopenh264 profile through AVCodecContext. */
199 if (FF_PROFILE_UNKNOWN == s->profile &&
200 FF_PROFILE_UNKNOWN != avctx->profile)
201 switch (avctx->profile) {
202 case FF_PROFILE_H264_HIGH:
203 case FF_PROFILE_H264_MAIN:
204 case FF_PROFILE_H264_CONSTRAINED_BASELINE:
205 s->profile = avctx->profile;
208 av_log(avctx, AV_LOG_WARNING,
209 "Unsupported avctx->profile: %d.\n", avctx->profile);
213 if (s->profile == FF_PROFILE_UNKNOWN && s->coder >= 0)
214 s->profile = s->coder == 0 ? FF_PROFILE_H264_CONSTRAINED_BASELINE :
215 #if OPENH264_VER_AT_LEAST(1, 8)
216 FF_PROFILE_H264_HIGH;
218 FF_PROFILE_H264_MAIN;
221 switch (s->profile) {
222 #if OPENH264_VER_AT_LEAST(1, 8)
223 case FF_PROFILE_H264_HIGH:
224 param.iEntropyCodingModeFlag = 1;
225 av_log(avctx, AV_LOG_VERBOSE, "Using CABAC, "
226 "select EProfileIdc PRO_HIGH in libopenh264.\n");
229 case FF_PROFILE_H264_MAIN:
230 param.iEntropyCodingModeFlag = 1;
231 av_log(avctx, AV_LOG_VERBOSE, "Using CABAC, "
232 "select EProfileIdc PRO_MAIN in libopenh264.\n");
235 case FF_PROFILE_H264_CONSTRAINED_BASELINE:
236 case FF_PROFILE_UNKNOWN:
237 param.iEntropyCodingModeFlag = 0;
238 av_log(avctx, AV_LOG_VERBOSE, "Using CAVLC, "
239 "select EProfileIdc PRO_BASELINE in libopenh264.\n");
242 param.iEntropyCodingModeFlag = 0;
243 av_log(avctx, AV_LOG_WARNING, "Unsupported profile, "
244 "select EProfileIdc PRO_BASELINE in libopenh264.\n");
248 param.sSpatialLayers[0].iVideoWidth = param.iPicWidth;
249 param.sSpatialLayers[0].iVideoHeight = param.iPicHeight;
250 param.sSpatialLayers[0].fFrameRate = param.fMaxFrameRate;
251 param.sSpatialLayers[0].iSpatialBitrate = param.iTargetBitrate;
252 param.sSpatialLayers[0].iMaxSpatialBitrate = param.iMaxBitrate;
254 #if OPENH264_VER_AT_LEAST(1, 7)
255 if (avctx->sample_aspect_ratio.num && avctx->sample_aspect_ratio.den) {
257 static const AVRational sar_idc[] = {
258 { 0, 0 }, // Unspecified (never written here).
259 { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 },
260 { 40, 33 }, { 24, 11 }, { 20, 11 }, { 32, 11 },
261 { 80, 33 }, { 18, 11 }, { 15, 11 }, { 64, 33 },
262 { 160, 99 }, // Last 3 are unknown to openh264: { 4, 3 }, { 3, 2 }, { 2, 1 },
264 static const ESampleAspectRatio asp_idc[] = {
266 ASP_1x1, ASP_12x11, ASP_10x11, ASP_16x11,
267 ASP_40x33, ASP_24x11, ASP_20x11, ASP_32x11,
268 ASP_80x33, ASP_18x11, ASP_15x11, ASP_64x33,
273 av_reduce(&num, &den, avctx->sample_aspect_ratio.num,
274 avctx->sample_aspect_ratio.den, 65535);
276 for (i = 1; i < FF_ARRAY_ELEMS(sar_idc); i++) {
277 if (num == sar_idc[i].num &&
278 den == sar_idc[i].den)
281 if (i == FF_ARRAY_ELEMS(sar_idc)) {
282 param.sSpatialLayers[0].eAspectRatio = ASP_EXT_SAR;
283 param.sSpatialLayers[0].sAspectRatioExtWidth = num;
284 param.sSpatialLayers[0].sAspectRatioExtHeight = den;
286 param.sSpatialLayers[0].eAspectRatio = asp_idc[i];
288 param.sSpatialLayers[0].bAspectRatioPresent = true;
290 param.sSpatialLayers[0].bAspectRatioPresent = false;
294 if ((avctx->slices > 1) && (s->max_nal_size)) {
295 av_log(avctx, AV_LOG_ERROR,
296 "Invalid combination -slices %d and -max_nal_size %d.\n",
297 avctx->slices, s->max_nal_size);
298 return AVERROR(EINVAL);
301 if (avctx->slices > 1)
302 s->slice_mode = SM_FIXEDSLCNUM_SLICE;
305 s->slice_mode = SM_SIZELIMITED_SLICE;
307 #if OPENH264_VER_AT_LEAST(1, 6)
308 param.sSpatialLayers[0].sSliceArgument.uiSliceMode = s->slice_mode;
309 param.sSpatialLayers[0].sSliceArgument.uiSliceNum = avctx->slices;
311 param.sSpatialLayers[0].sSliceCfg.uiSliceMode = s->slice_mode;
312 param.sSpatialLayers[0].sSliceCfg.sSliceArgument.uiSliceNum = avctx->slices;
314 if (avctx->slices == 0 && s->slice_mode == SM_FIXEDSLCNUM_SLICE)
315 av_log(avctx, AV_LOG_WARNING, "Slice count will be set automatically\n");
317 if (s->slice_mode == SM_SIZELIMITED_SLICE) {
318 if (s->max_nal_size) {
319 param.uiMaxNalSize = s->max_nal_size;
320 #if OPENH264_VER_AT_LEAST(1, 6)
321 param.sSpatialLayers[0].sSliceArgument.uiSliceSizeConstraint = s->max_nal_size;
323 param.sSpatialLayers[0].sSliceCfg.sSliceArgument.uiSliceSizeConstraint = s->max_nal_size;
326 av_log(avctx, AV_LOG_ERROR, "Invalid -max_nal_size, "
327 "specify a valid max_nal_size to use -slice_mode dyn\n");
328 return AVERROR(EINVAL);
332 if ((*s->encoder)->InitializeExt(s->encoder, ¶m) != cmResultSuccess) {
333 av_log(avctx, AV_LOG_ERROR, "Initialize failed\n");
334 return AVERROR_UNKNOWN;
337 if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
338 SFrameBSInfo fbi = { 0 };
340 (*s->encoder)->EncodeParameterSets(s->encoder, &fbi);
341 for (i = 0; i < fbi.sLayerInfo[0].iNalCount; i++)
342 size += fbi.sLayerInfo[0].pNalLengthInByte[i];
343 avctx->extradata = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
344 if (!avctx->extradata)
345 return AVERROR(ENOMEM);
346 avctx->extradata_size = size;
347 memcpy(avctx->extradata, fbi.sLayerInfo[0].pBsBuf, size);
350 props = ff_add_cpb_side_data(avctx);
352 return AVERROR(ENOMEM);
353 props->max_bitrate = param.iMaxBitrate;
354 props->avg_bitrate = param.iTargetBitrate;
359 static int svc_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
360 const AVFrame *frame, int *got_packet)
362 SVCContext *s = avctx->priv_data;
363 SFrameBSInfo fbi = { 0 };
366 SSourcePicture sp = { 0 };
367 int size = 0, layer, first_layer = 0;
368 int layer_size[MAX_LAYER_NUM_OF_FRAME] = { 0 };
370 sp.iColorFormat = videoFormatI420;
371 for (i = 0; i < 3; i++) {
372 sp.iStride[i] = frame->linesize[i];
373 sp.pData[i] = frame->data[i];
375 sp.iPicWidth = avctx->width;
376 sp.iPicHeight = avctx->height;
378 if (frame->pict_type == AV_PICTURE_TYPE_I) {
379 (*s->encoder)->ForceIntraFrame(s->encoder, true);
382 encoded = (*s->encoder)->EncodeFrame(s->encoder, &sp, &fbi);
383 if (encoded != cmResultSuccess) {
384 av_log(avctx, AV_LOG_ERROR, "EncodeFrame failed\n");
385 return AVERROR_UNKNOWN;
387 if (fbi.eFrameType == videoFrameTypeSkip) {
389 av_log(avctx, AV_LOG_DEBUG, "frame skipped\n");
393 // Normal frames are returned with one single layer, while IDR
394 // frames have two layers, where the first layer contains the SPS/PPS.
395 // If using global headers, don't include the SPS/PPS in the returned
396 // packet - thus, only return one layer.
397 if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)
398 first_layer = fbi.iLayerNum - 1;
400 for (layer = first_layer; layer < fbi.iLayerNum; layer++) {
401 for (i = 0; i < fbi.sLayerInfo[layer].iNalCount; i++)
402 layer_size[layer] += fbi.sLayerInfo[layer].pNalLengthInByte[i];
403 size += layer_size[layer];
405 av_log(avctx, AV_LOG_DEBUG, "%d slices\n", fbi.sLayerInfo[fbi.iLayerNum - 1].iNalCount);
407 if ((ret = ff_alloc_packet2(avctx, avpkt, size, size))) {
408 av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
412 for (layer = first_layer; layer < fbi.iLayerNum; layer++) {
413 memcpy(avpkt->data + size, fbi.sLayerInfo[layer].pBsBuf, layer_size[layer]);
414 size += layer_size[layer];
416 avpkt->pts = frame->pts;
417 if (fbi.eFrameType == videoFrameTypeIDR)
418 avpkt->flags |= AV_PKT_FLAG_KEY;
423 static const AVCodecDefault svc_enc_defaults[] = {
431 AVCodec ff_libopenh264_encoder = {
432 .name = "libopenh264",
433 .long_name = NULL_IF_CONFIG_SMALL("OpenH264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
434 .type = AVMEDIA_TYPE_VIDEO,
435 .id = AV_CODEC_ID_H264,
436 .priv_data_size = sizeof(SVCContext),
437 .init = svc_encode_init,
438 .encode2 = svc_encode_frame,
439 .close = svc_encode_close,
440 .capabilities = AV_CODEC_CAP_OTHER_THREADS,
441 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP |
442 FF_CODEC_CAP_AUTO_THREADS,
443 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P,
445 .defaults = svc_enc_defaults,
446 .priv_class = &class,
447 .wrapper_name = "libopenh264",