X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Flibvpxenc.c;h=0ecc2f913bbc49618e080a69fa39edc46ff5434a;hb=187105ff8a02bafc9c58d9d8363bb3f55a415635;hp=2c04866d5c7559453af86d4e42d43fa577ccf127;hpb=00ce2cbed55d2bfb67ed7c2abb734f452a7b9267;p=ffmpeg diff --git a/libavcodec/libvpxenc.c b/libavcodec/libvpxenc.c index 2c04866d5c7..0ecc2f913bb 100644 --- a/libavcodec/libvpxenc.c +++ b/libavcodec/libvpxenc.c @@ -1,20 +1,20 @@ /* * Copyright (c) 2010, Google, Inc. * - * This file is part of FFmpeg. + * This file is part of Libav. * - * FFmpeg is free software; you can redistribute it and/or + * Libav is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * FFmpeg is distributed in the hope that it will be useful, + * Libav is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software + * License along with Libav; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -29,33 +29,46 @@ #include #include "avcodec.h" +#include "internal.h" #include "libavutil/base64.h" +#include "libavutil/common.h" +#include "libavutil/mathematics.h" +#include "libavutil/opt.h" /** * Portion of struct vpx_codec_cx_pkt from vpx_encoder.h. * One encoded frame returned from the library. */ struct FrameListData { - void *buf; /**≤ compressed data buffer */ - size_t sz; /**≤ length of compressed data */ - int64_t pts; /**≤ time stamp to show frame + void *buf; /**< compressed data buffer */ + size_t sz; /**< length of compressed data */ + int64_t pts; /**< time stamp to show frame (in timebase units) */ - unsigned long duration; /**≤ duration to show frame + unsigned long duration; /**< duration to show frame (in timebase units) */ - uint32_t flags; /**≤ flags for this frame */ + uint32_t flags; /**< flags for this frame */ struct FrameListData *next; }; typedef struct VP8EncoderContext { + AVClass *class; struct vpx_codec_ctx encoder; struct vpx_image rawimg; struct vpx_fixed_buf twopass_stats; unsigned long deadline; //i.e., RT/GOOD/BEST struct FrameListData *coded_frame_list; + int cpu_used; + int auto_alt_ref; + int arnr_max_frames; + int arnr_strength; + int arnr_type; + int lag_in_frames; + int error_resilient; + int crf; } VP8Context; /** String mappings for enum vp8e_enc_control_id */ -static const char *ctlidstr[] = { +static const char *const ctlidstr[] = { [VP8E_UPD_ENTROPY] = "VP8E_UPD_ENTROPY", [VP8E_UPD_REFERENCE] = "VP8E_UPD_REFERENCE", [VP8E_USE_REFERENCE] = "VP8E_USE_REFERENCE", @@ -72,6 +85,7 @@ static const char *ctlidstr[] = { [VP8E_SET_ARNR_MAXFRAMES] = "VP8E_SET_ARNR_MAXFRAMES", [VP8E_SET_ARNR_STRENGTH] = "VP8E_SET_ARNR_STRENGTH", [VP8E_SET_ARNR_TYPE] = "VP8E_SET_ARNR_TYPE", + [VP8E_SET_CQ_LEVEL] = "VP8E_SET_CQ_LEVEL", }; static av_cold void log_encoder_error(AVCodecContext *avctx, const char *desc) @@ -200,11 +214,10 @@ static av_cold int vp8_free(AVCodecContext *avctx) return 0; } -static av_cold int vp8_init(AVCodecContext *avctx) +static av_cold int vpx_init(AVCodecContext *avctx, + const struct vpx_codec_iface *iface) { VP8Context *ctx = avctx->priv_data; - const struct vpx_codec_iface *iface = &vpx_codec_vp8_cx_algo; - int cpuused = 3; struct vpx_codec_enc_cfg enccfg; int res; @@ -224,6 +237,9 @@ static av_cold int vp8_init(AVCodecContext *avctx) enccfg.g_timebase.den = avctx->time_base.den; enccfg.g_threads = avctx->thread_count; + if (ctx->lag_in_frames >= 0) + enccfg.g_lag_in_frames = ctx->lag_in_frames; + if (avctx->flags & CODEC_FLAG_PASS1) enccfg.g_pass = VPX_RC_FIRST_PASS; else if (avctx->flags & CODEC_FLAG_PASS2) @@ -231,21 +247,45 @@ static av_cold int vp8_init(AVCodecContext *avctx) else enccfg.g_pass = VPX_RC_ONE_PASS; - if (avctx->rc_min_rate == avctx->rc_max_rate && - avctx->rc_min_rate == avctx->bit_rate) - enccfg.rc_end_usage = VPX_CBR; - enccfg.rc_target_bitrate = av_rescale_rnd(avctx->bit_rate, 1, 1000, + if (!avctx->bit_rate) + avctx->bit_rate = enccfg.rc_target_bitrate * 1000; + else + enccfg.rc_target_bitrate = av_rescale_rnd(avctx->bit_rate, 1, 1000, AV_ROUND_NEAR_INF); - //convert [1,51] -> [0,63] - enccfg.rc_min_quantizer = ((avctx->qmin * 5 + 1) >> 2) - 1; - enccfg.rc_max_quantizer = ((avctx->qmax * 5 + 1) >> 2) - 1; + if (ctx->crf) + enccfg.rc_end_usage = VPX_CQ; + else if (avctx->rc_min_rate == avctx->rc_max_rate && + avctx->rc_min_rate == avctx->bit_rate) + enccfg.rc_end_usage = VPX_CBR; + + if (avctx->qmin > 0) + enccfg.rc_min_quantizer = avctx->qmin; + if (avctx->qmax > 0) + enccfg.rc_max_quantizer = avctx->qmax; enccfg.rc_dropframe_thresh = avctx->frame_skip_threshold; + //0-100 (0 => CBR, 100 => VBR) + enccfg.rc_2pass_vbr_bias_pct = round(avctx->qcompress * 100); + enccfg.rc_2pass_vbr_minsection_pct = + avctx->rc_min_rate * 100LL / avctx->bit_rate; + if (avctx->rc_max_rate) + enccfg.rc_2pass_vbr_maxsection_pct = + avctx->rc_max_rate * 100LL / avctx->bit_rate; + + if (avctx->rc_buffer_size) + enccfg.rc_buf_sz = + avctx->rc_buffer_size * 1000LL / avctx->bit_rate; + if (avctx->rc_initial_buffer_occupancy) + enccfg.rc_buf_initial_sz = + avctx->rc_initial_buffer_occupancy * 1000LL / avctx->bit_rate; + enccfg.rc_buf_optimal_sz = enccfg.rc_buf_sz * 5 / 6; + //_enc_init() will balk if kf_min_dist differs from max w/VPX_KF_AUTO - if (avctx->keyint_min == avctx->gop_size) + if (avctx->keyint_min >= 0 && avctx->keyint_min == avctx->gop_size) enccfg.kf_min_dist = avctx->keyint_min; - enccfg.kf_max_dist = avctx->gop_size; + if (avctx->gop_size >= 0) + enccfg.kf_max_dist = avctx->gop_size; if (enccfg.g_pass == VPX_RC_FIRST_PASS) enccfg.g_lag_in_frames = 0; @@ -276,7 +316,13 @@ static av_cold int vp8_init(AVCodecContext *avctx) enccfg.rc_twopass_stats_in = ctx->twopass_stats; } - ctx->deadline = VPX_DL_GOOD_QUALITY; + /* 0-3: For non-zero values the encoder increasingly optimizes for reduced + complexity playback on low powered devices at the expense of encode + quality. */ + if (avctx->profile != FF_PROFILE_UNKNOWN) + enccfg.g_profile = avctx->profile; + + enccfg.g_error_resilient = ctx->error_resilient; dump_enc_cfg(avctx, &enccfg); /* Construct Encoder Context */ @@ -288,8 +334,20 @@ static av_cold int vp8_init(AVCodecContext *avctx) //codec control failures are currently treated only as warnings av_log(avctx, AV_LOG_DEBUG, "vpx_codec_control\n"); - codecctl_int(avctx, VP8E_SET_CPUUSED, cpuused); + if (ctx->cpu_used != INT_MIN) + codecctl_int(avctx, VP8E_SET_CPUUSED, ctx->cpu_used); + if (ctx->auto_alt_ref >= 0) + codecctl_int(avctx, VP8E_SET_ENABLEAUTOALTREF, ctx->auto_alt_ref); + if (ctx->arnr_max_frames >= 0) + codecctl_int(avctx, VP8E_SET_ARNR_MAXFRAMES, ctx->arnr_max_frames); + if (ctx->arnr_strength >= 0) + codecctl_int(avctx, VP8E_SET_ARNR_STRENGTH, ctx->arnr_strength); + if (ctx->arnr_type >= 0) + codecctl_int(avctx, VP8E_SET_ARNR_TYPE, ctx->arnr_type); codecctl_int(avctx, VP8E_SET_NOISE_SENSITIVITY, avctx->noise_reduction); + codecctl_int(avctx, VP8E_SET_TOKEN_PARTITIONS, av_log2(avctx->slices)); + codecctl_int(avctx, VP8E_SET_STATIC_THRESHOLD, avctx->mb_threshold); + codecctl_int(avctx, VP8E_SET_CQ_LEVEL, ctx->crf); //provide dummy value to initialize wrapper, values will be updated each _encode() vpx_img_wrap(&ctx->rawimg, VPX_IMG_FMT_I420, avctx->width, avctx->height, 1, @@ -315,33 +373,33 @@ static inline void cx_pktcpy(struct FrameListData *dst, } /** - * Store coded frame information in format suitable for return from encode(). + * Store coded frame information in format suitable for return from encode2(). * - * Write buffer information from @a cx_frame to @a buf & @a buf_size. - * Timing/frame details to @a coded_frame. - * @return Frame size written to @a buf on success - * @return AVERROR(EINVAL) on error + * Write information from @a cx_frame to @a pkt + * @return packet data size on success + * @return a negative AVERROR on error */ static int storeframe(AVCodecContext *avctx, struct FrameListData *cx_frame, - uint8_t *buf, int buf_size, AVFrame *coded_frame) + AVPacket *pkt, AVFrame *coded_frame) { - if ((int) cx_frame->sz <= buf_size) { - buf_size = cx_frame->sz; - memcpy(buf, cx_frame->buf, buf_size); + int ret = ff_alloc_packet(pkt, cx_frame->sz); + if (ret >= 0) { + memcpy(pkt->data, cx_frame->buf, pkt->size); + pkt->pts = pkt->dts = cx_frame->pts; coded_frame->pts = cx_frame->pts; coded_frame->key_frame = !!(cx_frame->flags & VPX_FRAME_IS_KEY); - if (coded_frame->key_frame) - coded_frame->pict_type = FF_I_TYPE; - else - coded_frame->pict_type = FF_P_TYPE; + if (coded_frame->key_frame) { + coded_frame->pict_type = AV_PICTURE_TYPE_I; + pkt->flags |= AV_PKT_FLAG_KEY; + } else + coded_frame->pict_type = AV_PICTURE_TYPE_P; } else { av_log(avctx, AV_LOG_ERROR, - "Compressed frame larger than storage provided! (%zu/%d)\n", - cx_frame->sz, buf_size); - return AVERROR(EINVAL); + "Error getting output packet of size %zu.\n", cx_frame->sz); + return ret; } - return buf_size; + return pkt->size; } /** @@ -352,7 +410,7 @@ static int storeframe(AVCodecContext *avctx, struct FrameListData *cx_frame, * @return AVERROR(EINVAL) on output size error * @return AVERROR(ENOMEM) on coded frame queue data allocation error */ -static int queue_frames(AVCodecContext *avctx, uint8_t *buf, int buf_size, +static int queue_frames(AVCodecContext *avctx, AVPacket *pkt_out, AVFrame *coded_frame) { VP8Context *ctx = avctx->priv_data; @@ -363,9 +421,9 @@ static int queue_frames(AVCodecContext *avctx, uint8_t *buf, int buf_size, if (ctx->coded_frame_list) { struct FrameListData *cx_frame = ctx->coded_frame_list; /* return the leading frame if we've already begun queueing */ - size = storeframe(avctx, cx_frame, buf, buf_size, coded_frame); + size = storeframe(avctx, cx_frame, pkt_out, coded_frame); if (size < 0) - return AVERROR(EINVAL); + return size; ctx->coded_frame_list = cx_frame->next; free_coded_frame(cx_frame); } @@ -382,9 +440,9 @@ static int queue_frames(AVCodecContext *avctx, uint8_t *buf, int buf_size, provided a frame for output */ assert(!ctx->coded_frame_list); cx_pktcpy(&cx_frame, pkt); - size = storeframe(avctx, &cx_frame, buf, buf_size, coded_frame); + size = storeframe(avctx, &cx_frame, pkt_out, coded_frame); if (size < 0) - return AVERROR(EINVAL); + return size; } else { struct FrameListData *cx_frame = av_malloc(sizeof(struct FrameListData)); @@ -430,14 +488,14 @@ static int queue_frames(AVCodecContext *avctx, uint8_t *buf, int buf_size, return size; } -static int vp8_encode(AVCodecContext *avctx, uint8_t *buf, int buf_size, - void *data) +static int vp8_encode(AVCodecContext *avctx, AVPacket *pkt, + const AVFrame *frame, int *got_packet) { VP8Context *ctx = avctx->priv_data; - AVFrame *frame = data; struct vpx_image *rawimg = NULL; int64_t timestamp = 0; int res, coded_size; + vpx_enc_frame_flags_t flags = 0; if (frame) { rawimg = &ctx->rawimg; @@ -448,15 +506,17 @@ static int vp8_encode(AVCodecContext *avctx, uint8_t *buf, int buf_size, rawimg->stride[VPX_PLANE_U] = frame->linesize[1]; rawimg->stride[VPX_PLANE_V] = frame->linesize[2]; timestamp = frame->pts; + if (frame->pict_type == AV_PICTURE_TYPE_I) + flags |= VPX_EFLAG_FORCE_KF; } res = vpx_codec_encode(&ctx->encoder, rawimg, timestamp, - avctx->ticks_per_frame, 0, ctx->deadline); + avctx->ticks_per_frame, flags, ctx->deadline); if (res != VPX_CODEC_OK) { log_encoder_error(avctx, "Error encoding frame"); return AVERROR_INVALIDDATA; } - coded_size = queue_frames(avctx, buf, buf_size, avctx->coded_frame); + coded_size = queue_frames(avctx, pkt, avctx->coded_frame); if (!frame && avctx->flags & CODEC_FLAG_PASS1) { unsigned int b64_size = AV_BASE64_SIZE(ctx->twopass_stats.sz); @@ -470,19 +530,103 @@ static int vp8_encode(AVCodecContext *avctx, uint8_t *buf, int buf_size, av_base64_encode(avctx->stats_out, b64_size, ctx->twopass_stats.buf, ctx->twopass_stats.sz); } - return coded_size; + + *got_packet = !!coded_size; + return 0; +} + +#define OFFSET(x) offsetof(VP8Context, x) +#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM +static const AVOption options[] = { + { "cpu-used", "Quality/Speed ratio modifier", OFFSET(cpu_used), AV_OPT_TYPE_INT, {.i64 = INT_MIN}, INT_MIN, INT_MAX, VE}, + { "auto-alt-ref", "Enable use of alternate reference " + "frames (2-pass only)", OFFSET(auto_alt_ref), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 1, VE}, + { "lag-in-frames", "Number of frames to look ahead for " + "alternate reference frame selection", OFFSET(lag_in_frames), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, VE}, + { "arnr-maxframes", "altref noise reduction max frame count", OFFSET(arnr_max_frames), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, VE}, + { "arnr-strength", "altref noise reduction filter strength", OFFSET(arnr_strength), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, VE}, + { "arnr-type", "altref noise reduction filter type", OFFSET(arnr_type), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, VE, "arnr_type"}, + { "backward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1}, 0, 0, VE, "arnr_type" }, + { "forward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2}, 0, 0, VE, "arnr_type" }, + { "centered", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 3}, 0, 0, VE, "arnr_type" }, + { "deadline", "Time to spend encoding, in microseconds.", OFFSET(deadline), AV_OPT_TYPE_INT, {.i64 = VPX_DL_GOOD_QUALITY}, INT_MIN, INT_MAX, VE, "quality"}, + { "best", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = VPX_DL_BEST_QUALITY}, 0, 0, VE, "quality"}, + { "good", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = VPX_DL_GOOD_QUALITY}, 0, 0, VE, "quality"}, + { "realtime", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = VPX_DL_REALTIME}, 0, 0, VE, "quality"}, + { "error-resilient", "Error resilience configuration", OFFSET(error_resilient), AV_OPT_TYPE_FLAGS, {.i64 = 0}, INT_MIN, INT_MAX, VE, "er"}, +#ifdef VPX_ERROR_RESILIENT_DEFAULT + { "default", "Improve resiliency against losses of whole frames", 0, AV_OPT_TYPE_CONST, {.i64 = VPX_ERROR_RESILIENT_DEFAULT}, 0, 0, VE, "er"}, + { "partitions", "The frame partitions are independently decodable " + "by the bool decoder, meaning that partitions can be decoded even " + "though earlier partitions have been lost. Note that intra predicition" + " is still done over the partition boundary.", 0, AV_OPT_TYPE_CONST, {.i64 = VPX_ERROR_RESILIENT_PARTITIONS}, 0, 0, VE, "er"}, +#endif + { "crf", "Select the quality for constant quality mode", offsetof(VP8Context, crf), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 63, VE }, + { NULL } +}; + +static const AVCodecDefault defaults[] = { + { "qmin", "-1" }, + { "qmax", "-1" }, + { "g", "-1" }, + { "keyint_min", "-1" }, + { NULL }, +}; + +#if CONFIG_LIBVPX_VP8_ENCODER +static av_cold int vp8_init(AVCodecContext *avctx) +{ + return vpx_init(avctx, &vpx_codec_vp8_cx_algo); +} + +static const AVClass class_vp8 = { + .class_name = "libvpx encoder", + .item_name = av_default_item_name, + .option = options, + .version = LIBAVUTIL_VERSION_INT, +}; + +AVCodec ff_libvpx_vp8_encoder = { + .name = "libvpx", + .type = AVMEDIA_TYPE_VIDEO, + .id = AV_CODEC_ID_VP8, + .priv_data_size = sizeof(VP8Context), + .init = vp8_init, + .encode2 = vp8_encode, + .close = vp8_free, + .capabilities = CODEC_CAP_DELAY | CODEC_CAP_AUTO_THREADS, + .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, + .long_name = NULL_IF_CONFIG_SMALL("libvpx VP8"), + .priv_class = &class_vp8, + .defaults = defaults, +}; +#endif /* CONFIG_LIBVPX_VP8_ENCODER */ + +#if CONFIG_LIBVPX_VP9_ENCODER +static av_cold int vp9_init(AVCodecContext *avctx) +{ + return vpx_init(avctx, &vpx_codec_vp9_cx_algo); } -AVCodec libvpx_encoder = { - "libvpx", - AVMEDIA_TYPE_VIDEO, - CODEC_ID_VP8, - sizeof(VP8Context), - vp8_init, - vp8_encode, - vp8_free, - NULL, - CODEC_CAP_DELAY, - .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, - .long_name = NULL_IF_CONFIG_SMALL("libvpx VP8"), +static const AVClass class_vp9 = { + .class_name = "libvpx encoder", + .item_name = av_default_item_name, + .option = options, + .version = LIBAVUTIL_VERSION_INT, +}; + +AVCodec ff_libvpx_vp9_encoder = { + .name = "libvpx-vp9", + .type = AVMEDIA_TYPE_VIDEO, + .id = AV_CODEC_ID_VP9, + .priv_data_size = sizeof(VP8Context), + .init = vp9_init, + .encode2 = vp8_encode, + .close = vp8_free, + .capabilities = CODEC_CAP_DELAY | CODEC_CAP_AUTO_THREADS | CODEC_CAP_EXPERIMENTAL, + .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, + .long_name = NULL_IF_CONFIG_SMALL("libvpx VP9"), + .priv_class = &class_vp9, + .defaults = defaults, }; +#endif /* CONFIG_LIBVPX_VP9_ENCODER */