API changes, most recent first:
-2017-xx-xx - xxxxxxx - lavc 58.5.0 - avcodec.h
++2017-xx-xx - xxxxxxx - lavc 58.3.100 - avcodec.h
+ Add avcodec_get_hw_frames_parameters().
+
-2017-xx-xx - xxxxxxx - lavu 56.6.0 - pixdesc.h
+-------- 8< --------- FFmpeg 3.4 was cut here -------- 8< ---------
+
+2017-09-28 - b6cf66ae1c - lavc 57.106.104 - avcodec.h
+ Add AV_PKT_DATA_A53_CC packet side data, to export closed captions
+
+2017-09-27 - 7aa6b8a68f - lavu 55.77.101 / lavu 55.31.1 - frame.h
+ Allow passing the value of 0 (meaning "automatic") as the required alignment
+ to av_frame_get_buffer().
+
+2017-09-27 - 522f877086 - lavu 55.77.100 / lavu 55.31.0 - cpu.h
+ Add av_cpu_max_align() for querying maximum required data alignment.
+
+2017-09-26 - b1cf151c4d - lavc 57.106.102 - avcodec.h
+ Deprecate AVCodecContext.refcounted_frames. This was useful for deprecated
+ API only (avcodec_decode_video2/avcodec_decode_audio4). The new decode APIs
+ (avcodec_send_packet/avcodec_receive_frame) always work with reference
+ counted frames.
+
+2017-09-21 - 6f15f1cdc8 - lavu 55.76.100 / 56.6.0 - pixdesc.h
Add av_color_range_from_name(), av_color_primaries_from_name(),
av_color_transfer_from_name(), av_color_space_from_name(), and
av_chroma_location_from_name().
*/
int caps_internal;
+ /**
+ * Fill the given hw_frames context with current codec parameters. Called
+ * from get_format. Refer to avcodec_get_hw_frames_parameters() for
+ * details.
+ *
+ * This CAN be called before AVHWAccel.init is called, and you must assume
+ * that avctx->hwaccel_priv_data is invalid.
+ */
+ int (*frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx);
++
+ /**
+ * Some hwaccels are ambiguous if only the id and pix_fmt fields are used.
+ * If non-NULL, the associated AVCodec must have
+ * FF_CODEC_CAP_HWACCEL_REQUIRE_CLASS set.
+ */
+ const AVClass *decoder_class;
} AVHWAccel;
+/**
+ * HWAccel is experimental and is thus avoided in favor of non experimental
+ * codecs
+ */
+#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL 0x0200
+
/**
* Hardware acceleration should be used for decoding even if the codec level
* used is unknown or higher than the maximum supported level reported by the
return NULL;
}
- avctx->hw_frames_ctx);
- if (ret < 0) {
- av_buffer_unref(&avctx->hw_frames_ctx);
+ int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx,
+ enum AVHWDeviceType dev_type)
+ {
+ AVHWDeviceContext *device_ctx;
+ AVHWFramesContext *frames_ctx;
+ int ret;
+
+ if (!avctx->hwaccel)
+ return AVERROR(ENOSYS);
+
+ if (avctx->hw_frames_ctx)
+ return 0;
+ if (!avctx->hw_device_ctx) {
+ av_log(avctx, AV_LOG_ERROR, "A hardware frames or device context is "
+ "required for hardware accelerated decoding.\n");
+ return AVERROR(EINVAL);
+ }
+
+ device_ctx = (AVHWDeviceContext *)avctx->hw_device_ctx->data;
+ if (device_ctx->type != dev_type) {
+ av_log(avctx, AV_LOG_ERROR, "Device type %s expected for hardware "
+ "decoding, but got %s.\n", av_hwdevice_get_type_name(dev_type),
+ av_hwdevice_get_type_name(device_ctx->type));
+ return AVERROR(EINVAL);
+ }
+
+ ret = avcodec_get_hw_frames_parameters(avctx,
+ avctx->hw_device_ctx,
+ avctx->hwaccel->pix_fmt,
- }
++ &avctx->hw_frames_ctx);
++ if (ret < 0)
+ return ret;
- AVHWAccel *hwa = find_hwaccel(avctx->codec_id, hw_pix_fmt);
+
+ frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+
+
+ if (frames_ctx->initial_pool_size) {
+ // We guarantee 4 base work surfaces. The function above guarantees 1
+ // (the absolute minimum), so add the missing count.
+ frames_ctx->initial_pool_size += 3;
+
+ // Add an additional surface per thread is frame threading is enabled.
+ if (avctx->active_thread_type & FF_THREAD_FRAME)
+ frames_ctx->initial_pool_size += avctx->thread_count;
+ }
+
+ ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
+ if (ret < 0) {
+ av_buffer_unref(&avctx->hw_frames_ctx);
+ return ret;
+ }
+
+ return 0;
+ }
+
+ int avcodec_get_hw_frames_parameters(AVCodecContext *avctx,
+ AVBufferRef *device_ref,
+ enum AVPixelFormat hw_pix_fmt,
+ AVBufferRef **out_frames_ref)
+ {
+ AVBufferRef *frames_ref = NULL;
++ AVHWAccel *hwa = find_hwaccel(avctx, hw_pix_fmt);
+ int ret;
+
+ if (!hwa || !hwa->frame_params)
+ return AVERROR(ENOENT);
+
+ frames_ref = av_hwframe_ctx_alloc(device_ref);
+ if (!frames_ref)
+ return AVERROR(ENOMEM);
+
+ ret = hwa->frame_params(avctx, frames_ref);
+ if (ret >= 0) {
+ *out_frames_ref = frames_ref;
+ } else {
+ av_buffer_unref(&frames_ref);
+ }
+ return ret;
+ }
+
static int setup_hwaccel(AVCodecContext *avctx,
const enum AVPixelFormat fmt,
const char *name)
#ifndef AVCODEC_DECODE_H
#define AVCODEC_DECODE_H
+#include "libavutil/buffer.h"
+#include "libavutil/frame.h"
+
+ #include "libavutil/buffer.h"
+ #include "libavutil/frame.h"
+ #include "libavutil/hwcontext.h"
+
#include "avcodec.h"
/**
void ff_decode_bsfs_uninit(AVCodecContext *avctx);
+ /**
+ * Make sure avctx.hw_frames_ctx is set. If it's not set, the function will
+ * try to allocate it from hw_device_ctx. If that is not possible, an error
+ * message is printed, and an error code is returned.
+ */
+ int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx,
+ enum AVHWDeviceType dev_type);
+
+int ff_attach_decode_data(AVFrame *frame);
+
#endif /* AVCODEC_DECODE_H */
--- /dev/null
+/*
+ * DXVA2 VP9 HW acceleration.
+ *
+ * copyright (c) 2015 Hendrik Leppkes
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/pixdesc.h"
+
+#include "vp9shared.h"
+
+// The headers above may include w32threads.h, which uses the original
+// _WIN32_WINNT define, while dxva2_internal.h redefines it to target a
+// potentially newer version.
+#include "dxva2_internal.h"
+
+struct vp9_dxva2_picture_context {
+ DXVA_PicParams_VP9 pp;
+ DXVA_Slice_VPx_Short slice;
+ const uint8_t *bitstream;
+ unsigned bitstream_size;
+};
+
+static void fill_picture_entry(DXVA_PicEntry_VPx *pic,
+ unsigned index, unsigned flag)
+{
+ av_assert0((index & 0x7f) == index && (flag & 0x01) == flag);
+ pic->bPicEntry = index | (flag << 7);
+}
+
+static int fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *ctx, const VP9SharedContext *h,
+ DXVA_PicParams_VP9 *pp)
+{
+ int i;
+ const AVPixFmtDescriptor * pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
+
+ if (!pixdesc)
+ return -1;
+
+ memset(pp, 0, sizeof(*pp));
+
+ fill_picture_entry(&pp->CurrPic, ff_dxva2_get_surface_index(avctx, ctx, h->frames[CUR_FRAME].tf.f), 0);
+
+ pp->profile = h->h.profile;
+ pp->wFormatAndPictureInfoFlags = ((h->h.keyframe == 0) << 0) |
+ ((h->h.invisible == 0) << 1) |
+ (h->h.errorres << 2) |
+ (pixdesc->log2_chroma_w << 3) | /* subsampling_x */
+ (pixdesc->log2_chroma_h << 4) | /* subsampling_y */
+ (0 << 5) | /* extra_plane */
+ (h->h.refreshctx << 6) |
+ (h->h.parallelmode << 7) |
+ (h->h.intraonly << 8) |
+ (h->h.framectxid << 9) |
+ (h->h.resetctx << 11) |
+ ((h->h.keyframe ? 0 : h->h.highprecisionmvs) << 13) |
+ (0 << 14); /* ReservedFormatInfo2Bits */
+
+ pp->width = avctx->width;
+ pp->height = avctx->height;
+ pp->BitDepthMinus8Luma = pixdesc->comp[0].depth - 8;
+ pp->BitDepthMinus8Chroma = pixdesc->comp[1].depth - 8;
+ /* swap 0/1 to match the reference */
+ pp->interp_filter = h->h.filtermode ^ (h->h.filtermode <= 1);
+ pp->Reserved8Bits = 0;
+
+ for (i = 0; i < 8; i++) {
+ if (h->refs[i].f->buf[0]) {
+ fill_picture_entry(&pp->ref_frame_map[i], ff_dxva2_get_surface_index(avctx, ctx, h->refs[i].f), 0);
+ pp->ref_frame_coded_width[i] = h->refs[i].f->width;
+ pp->ref_frame_coded_height[i] = h->refs[i].f->height;
+ } else
+ pp->ref_frame_map[i].bPicEntry = 0xFF;
+ }
+
+ for (i = 0; i < 3; i++) {
+ uint8_t refidx = h->h.refidx[i];
+ if (h->refs[refidx].f->buf[0])
+ fill_picture_entry(&pp->frame_refs[i], ff_dxva2_get_surface_index(avctx, ctx, h->refs[refidx].f), 0);
+ else
+ pp->frame_refs[i].bPicEntry = 0xFF;
+
+ pp->ref_frame_sign_bias[i + 1] = h->h.signbias[i];
+ }
+
+ pp->filter_level = h->h.filter.level;
+ pp->sharpness_level = h->h.filter.sharpness;
+
+ pp->wControlInfoFlags = (h->h.lf_delta.enabled << 0) |
+ (h->h.lf_delta.updated << 1) |
+ (h->h.use_last_frame_mvs << 2) |
+ (0 << 3); /* ReservedControlInfo5Bits */
+
+ for (i = 0; i < 4; i++)
+ pp->ref_deltas[i] = h->h.lf_delta.ref[i];
+
+ for (i = 0; i < 2; i++)
+ pp->mode_deltas[i] = h->h.lf_delta.mode[i];
+
+ pp->base_qindex = h->h.yac_qi;
+ pp->y_dc_delta_q = h->h.ydc_qdelta;
+ pp->uv_dc_delta_q = h->h.uvdc_qdelta;
+ pp->uv_ac_delta_q = h->h.uvac_qdelta;
+
+ /* segmentation data */
+ pp->stVP9Segments.wSegmentInfoFlags = (h->h.segmentation.enabled << 0) |
+ (h->h.segmentation.update_map << 1) |
+ (h->h.segmentation.temporal << 2) |
+ (h->h.segmentation.absolute_vals << 3) |
+ (0 << 4); /* ReservedSegmentFlags4Bits */
+
+ for (i = 0; i < 7; i++)
+ pp->stVP9Segments.tree_probs[i] = h->h.segmentation.prob[i];
+
+ if (h->h.segmentation.temporal)
+ for (i = 0; i < 3; i++)
+ pp->stVP9Segments.pred_probs[i] = h->h.segmentation.pred_prob[i];
+ else
+ memset(pp->stVP9Segments.pred_probs, 255, sizeof(pp->stVP9Segments.pred_probs));
+
+ for (i = 0; i < 8; i++) {
+ pp->stVP9Segments.feature_mask[i] = (h->h.segmentation.feat[i].q_enabled << 0) |
+ (h->h.segmentation.feat[i].lf_enabled << 1) |
+ (h->h.segmentation.feat[i].ref_enabled << 2) |
+ (h->h.segmentation.feat[i].skip_enabled << 3);
+
+ pp->stVP9Segments.feature_data[i][0] = h->h.segmentation.feat[i].q_val;
+ pp->stVP9Segments.feature_data[i][1] = h->h.segmentation.feat[i].lf_val;
+ pp->stVP9Segments.feature_data[i][2] = h->h.segmentation.feat[i].ref_val;
+ pp->stVP9Segments.feature_data[i][3] = 0; /* no data for skip */
+ }
+
+ pp->log2_tile_cols = h->h.tiling.log2_tile_cols;
+ pp->log2_tile_rows = h->h.tiling.log2_tile_rows;
+
+ pp->uncompressed_header_size_byte_aligned = h->h.uncompressed_header_size;
+ pp->first_partition_size = h->h.compressed_header_size;
+
+ pp->StatusReportFeedbackNumber = 1 + DXVA_CONTEXT_REPORT_ID(avctx, ctx)++;
+ return 0;
+}
+
+static void fill_slice_short(DXVA_Slice_VPx_Short *slice,
+ unsigned position, unsigned size)
+{
+ memset(slice, 0, sizeof(*slice));
+ slice->BSNALunitDataLocation = position;
+ slice->SliceBytesInBuffer = size;
+ slice->wBadSliceChopping = 0;
+}
+
+static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
+ DECODER_BUFFER_DESC *bs,
+ DECODER_BUFFER_DESC *sc)
+{
+ const VP9SharedContext *h = avctx->priv_data;
+ AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
+ struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private;
+ void *dxva_data_ptr;
+ uint8_t *dxva_data;
+ unsigned dxva_size;
+ unsigned padding;
+ unsigned type;
+
+#if CONFIG_D3D11VA
+ if (ff_dxva2_is_d3d11(avctx)) {
+ type = D3D11_VIDEO_DECODER_BUFFER_BITSTREAM;
+ if (FAILED(ID3D11VideoContext_GetDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context,
+ D3D11VA_CONTEXT(ctx)->decoder,
+ type,
+ &dxva_size, &dxva_data_ptr)))
+ return -1;
+ }
+#endif
+#if CONFIG_DXVA2
+ if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
+ type = DXVA2_BitStreamDateBufferType;
+ if (FAILED(IDirectXVideoDecoder_GetBuffer(DXVA2_CONTEXT(ctx)->decoder,
+ type,
+ &dxva_data_ptr, &dxva_size)))
+ return -1;
+ }
+#endif
+
+ dxva_data = dxva_data_ptr;
+
+ if (ctx_pic->slice.SliceBytesInBuffer > dxva_size) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to build bitstream");
+ return -1;
+ }
+
+ memcpy(dxva_data, ctx_pic->bitstream, ctx_pic->slice.SliceBytesInBuffer);
+
+ padding = FFMIN(128 - ((ctx_pic->slice.SliceBytesInBuffer) & 127), dxva_size - ctx_pic->slice.SliceBytesInBuffer);
+ if (padding > 0) {
+ memset(dxva_data + ctx_pic->slice.SliceBytesInBuffer, 0, padding);
+ ctx_pic->slice.SliceBytesInBuffer += padding;
+ }
+
+#if CONFIG_D3D11VA
+ if (ff_dxva2_is_d3d11(avctx))
+ if (FAILED(ID3D11VideoContext_ReleaseDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context, D3D11VA_CONTEXT(ctx)->decoder, type)))
+ return -1;
+#endif
+#if CONFIG_DXVA2
+ if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD)
+ if (FAILED(IDirectXVideoDecoder_ReleaseBuffer(DXVA2_CONTEXT(ctx)->decoder, type)))
+ return -1;
+#endif
+
+#if CONFIG_D3D11VA
+ if (ff_dxva2_is_d3d11(avctx)) {
+ D3D11_VIDEO_DECODER_BUFFER_DESC *dsc11 = bs;
+ memset(dsc11, 0, sizeof(*dsc11));
+ dsc11->BufferType = type;
+ dsc11->DataSize = ctx_pic->slice.SliceBytesInBuffer;
+ dsc11->NumMBsInBuffer = 0;
+
+ type = D3D11_VIDEO_DECODER_BUFFER_SLICE_CONTROL;
+ }
+#endif
+#if CONFIG_DXVA2
+ if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
+ DXVA2_DecodeBufferDesc *dsc2 = bs;
+ memset(dsc2, 0, sizeof(*dsc2));
+ dsc2->CompressedBufferType = type;
+ dsc2->DataSize = ctx_pic->slice.SliceBytesInBuffer;
+ dsc2->NumMBsInBuffer = 0;
+
+ type = DXVA2_SliceControlBufferType;
+ }
+#endif
+
+ return ff_dxva2_commit_buffer(avctx, ctx, sc,
+ type,
+ &ctx_pic->slice, sizeof(ctx_pic->slice), 0);
+}
+
+
+static int dxva2_vp9_start_frame(AVCodecContext *avctx,
+ av_unused const uint8_t *buffer,
+ av_unused uint32_t size)
+{
+ const VP9SharedContext *h = avctx->priv_data;
+ AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
+ struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private;
+
+ if (!DXVA_CONTEXT_VALID(avctx, ctx))
+ return -1;
+ av_assert0(ctx_pic);
+
+ /* Fill up DXVA_PicParams_VP9 */
+ if (fill_picture_parameters(avctx, ctx, h, &ctx_pic->pp) < 0)
+ return -1;
+
+ ctx_pic->bitstream_size = 0;
+ ctx_pic->bitstream = NULL;
+ return 0;
+}
+
+static int dxva2_vp9_decode_slice(AVCodecContext *avctx,
+ const uint8_t *buffer,
+ uint32_t size)
+{
+ const VP9SharedContext *h = avctx->priv_data;
+ struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private;
+ unsigned position;
+
+ if (!ctx_pic->bitstream)
+ ctx_pic->bitstream = buffer;
+ ctx_pic->bitstream_size += size;
+
+ position = buffer - ctx_pic->bitstream;
+ fill_slice_short(&ctx_pic->slice, position, size);
+
+ return 0;
+}
+
+static int dxva2_vp9_end_frame(AVCodecContext *avctx)
+{
+ VP9SharedContext *h = avctx->priv_data;
+ struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private;
+ int ret;
+
+ if (ctx_pic->bitstream_size <= 0)
+ return -1;
+
+ ret = ff_dxva2_common_end_frame(avctx, h->frames[CUR_FRAME].tf.f,
+ &ctx_pic->pp, sizeof(ctx_pic->pp),
+ NULL, 0,
+ commit_bitstream_and_slice_buffer);
+ return ret;
+}
+
+#if CONFIG_VP9_DXVA2_HWACCEL
+AVHWAccel ff_vp9_dxva2_hwaccel = {
+ .name = "vp9_dxva2",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_VP9,
+ .pix_fmt = AV_PIX_FMT_DXVA2_VLD,
+ .init = ff_dxva2_decode_init,
+ .uninit = ff_dxva2_decode_uninit,
+ .start_frame = dxva2_vp9_start_frame,
+ .decode_slice = dxva2_vp9_decode_slice,
+ .end_frame = dxva2_vp9_end_frame,
++ .frame_params = ff_dxva2_common_frame_params,
+ .frame_priv_data_size = sizeof(struct vp9_dxva2_picture_context),
+ .priv_data_size = sizeof(FFDXVASharedContext),
+};
+#endif
+
+#if CONFIG_VP9_D3D11VA_HWACCEL
+AVHWAccel ff_vp9_d3d11va_hwaccel = {
+ .name = "vp9_d3d11va",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_VP9,
+ .pix_fmt = AV_PIX_FMT_D3D11VA_VLD,
+ .init = ff_dxva2_decode_init,
+ .uninit = ff_dxva2_decode_uninit,
+ .start_frame = dxva2_vp9_start_frame,
+ .decode_slice = dxva2_vp9_decode_slice,
+ .end_frame = dxva2_vp9_end_frame,
++ .frame_params = ff_dxva2_common_frame_params,
+ .frame_priv_data_size = sizeof(struct vp9_dxva2_picture_context),
+ .priv_data_size = sizeof(FFDXVASharedContext),
+};
+#endif
+
+#if CONFIG_VP9_D3D11VA2_HWACCEL
+AVHWAccel ff_vp9_d3d11va2_hwaccel = {
+ .name = "vp9_d3d11va2",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_VP9,
+ .pix_fmt = AV_PIX_FMT_D3D11,
+ .init = ff_dxva2_decode_init,
+ .uninit = ff_dxva2_decode_uninit,
+ .start_frame = dxva2_vp9_start_frame,
+ .decode_slice = dxva2_vp9_decode_slice,
+ .end_frame = dxva2_vp9_end_frame,
++ .frame_params = ff_dxva2_common_frame_params,
+ .frame_priv_data_size = sizeof(struct vp9_dxva2_picture_context),
+ .priv_data_size = sizeof(FFDXVASharedContext),
+};
+#endif
ctx->hwctx->driver_quirks =
AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS;
- } else
- #endif
- if (avctx->hw_frames_ctx) {
- // This structure has a shorter lifetime than the enclosing
- // AVCodecContext, so we inherit the references from there
- // and do not need to make separate ones.
-
- ctx->frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
- ctx->hwfc = ctx->frames->hwctx;
- ctx->device = ctx->frames->device_ctx;
- ctx->hwctx = ctx->device->hwctx;
-
- } else if (avctx->hw_device_ctx) {
- ctx->device = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
- ctx->hwctx = ctx->device->hwctx;
-
- if (ctx->device->type != AV_HWDEVICE_TYPE_VAAPI) {
- av_log(avctx, AV_LOG_ERROR, "Device supplied for VAAPI "
- "decoding must be a VAAPI device (not %d).\n",
- ctx->device->type);
- err = AVERROR(EINVAL);
- goto fail;
- }
-
- } else {
- av_log(avctx, AV_LOG_ERROR, "A hardware device or frames context "
- "is required for VAAPI decoding.\n");
- err = AVERROR(EINVAL);
- goto fail;
}
+ #endif
-#if FF_API_VAAPI_CONTEXT
+#if FF_API_STRUCT_VAAPI_CONTEXT
if (ctx->have_old_context) {
ctx->va_config = ctx->old_context->config_id;
ctx->va_context = ctx->old_context->context_id;
--- /dev/null
+/*
+ * VP9 HW decode acceleration through VA API
+ *
+ * Copyright (C) 2015 Timo Rothenpieler <timo@rothenpieler.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/pixdesc.h"
+
+#include "hwaccel.h"
+#include "vaapi_decode.h"
+#include "vp9shared.h"
+
+static VASurfaceID vaapi_vp9_surface_id(const VP9Frame *vf)
+{
+ if (vf)
+ return ff_vaapi_get_surface_id(vf->tf.f);
+ else
+ return VA_INVALID_SURFACE;
+}
+
+static int vaapi_vp9_start_frame(AVCodecContext *avctx,
+ av_unused const uint8_t *buffer,
+ av_unused uint32_t size)
+{
+ const VP9SharedContext *h = avctx->priv_data;
+ VAAPIDecodePicture *pic = h->frames[CUR_FRAME].hwaccel_picture_private;
+ VADecPictureParameterBufferVP9 pic_param;
+ const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
+ int err, i;
+
+ pic->output_surface = vaapi_vp9_surface_id(&h->frames[CUR_FRAME]);
+
+ pic_param = (VADecPictureParameterBufferVP9) {
+ .frame_width = avctx->width,
+ .frame_height = avctx->height,
+
+ .pic_fields.bits = {
+ .subsampling_x = pixdesc->log2_chroma_w,
+ .subsampling_y = pixdesc->log2_chroma_h,
+ .frame_type = !h->h.keyframe,
+ .show_frame = !h->h.invisible,
+ .error_resilient_mode = h->h.errorres,
+ .intra_only = h->h.intraonly,
+ .allow_high_precision_mv = h->h.keyframe ? 0 : h->h.highprecisionmvs,
+ .mcomp_filter_type = h->h.filtermode ^ (h->h.filtermode <= 1),
+ .frame_parallel_decoding_mode = h->h.parallelmode,
+ .reset_frame_context = h->h.resetctx,
+ .refresh_frame_context = h->h.refreshctx,
+ .frame_context_idx = h->h.framectxid,
+
+ .segmentation_enabled = h->h.segmentation.enabled,
+ .segmentation_temporal_update = h->h.segmentation.temporal,
+ .segmentation_update_map = h->h.segmentation.update_map,
+
+ .last_ref_frame = h->h.refidx[0],
+ .last_ref_frame_sign_bias = h->h.signbias[0],
+ .golden_ref_frame = h->h.refidx[1],
+ .golden_ref_frame_sign_bias = h->h.signbias[1],
+ .alt_ref_frame = h->h.refidx[2],
+ .alt_ref_frame_sign_bias = h->h.signbias[2],
+ .lossless_flag = h->h.lossless,
+ },
+
+ .filter_level = h->h.filter.level,
+ .sharpness_level = h->h.filter.sharpness,
+ .log2_tile_rows = h->h.tiling.log2_tile_rows,
+ .log2_tile_columns = h->h.tiling.log2_tile_cols,
+
+ .frame_header_length_in_bytes = h->h.uncompressed_header_size,
+ .first_partition_size = h->h.compressed_header_size,
+
+ .profile = h->h.profile,
+ .bit_depth = h->h.bpp,
+ };
+
+ for (i = 0; i < 7; i++)
+ pic_param.mb_segment_tree_probs[i] = h->h.segmentation.prob[i];
+
+ if (h->h.segmentation.temporal) {
+ for (i = 0; i < 3; i++)
+ pic_param.segment_pred_probs[i] = h->h.segmentation.pred_prob[i];
+ } else {
+ memset(pic_param.segment_pred_probs, 255, sizeof(pic_param.segment_pred_probs));
+ }
+
+ for (i = 0; i < 8; i++) {
+ if (h->refs[i].f->buf[0])
+ pic_param.reference_frames[i] = ff_vaapi_get_surface_id(h->refs[i].f);
+ else
+ pic_param.reference_frames[i] = VA_INVALID_ID;
+ }
+
+ err = ff_vaapi_decode_make_param_buffer(avctx, pic,
+ VAPictureParameterBufferType,
+ &pic_param, sizeof(pic_param));
+ if (err < 0) {
+ ff_vaapi_decode_cancel(avctx, pic);
+ return err;
+ }
+
+ return 0;
+}
+
+static int vaapi_vp9_end_frame(AVCodecContext *avctx)
+{
+ const VP9SharedContext *h = avctx->priv_data;
+ VAAPIDecodePicture *pic = h->frames[CUR_FRAME].hwaccel_picture_private;
+
+ return ff_vaapi_decode_issue(avctx, pic);
+}
+
+static int vaapi_vp9_decode_slice(AVCodecContext *avctx,
+ const uint8_t *buffer,
+ uint32_t size)
+{
+ const VP9SharedContext *h = avctx->priv_data;
+ VAAPIDecodePicture *pic = h->frames[CUR_FRAME].hwaccel_picture_private;
+ VASliceParameterBufferVP9 slice_param;
+ int err, i;
+
+ slice_param = (VASliceParameterBufferVP9) {
+ .slice_data_size = size,
+ .slice_data_offset = 0,
+ .slice_data_flag = VA_SLICE_DATA_FLAG_ALL,
+ };
+
+ for (i = 0; i < 8; i++) {
+ slice_param.seg_param[i] = (VASegmentParameterVP9) {
+ .segment_flags.fields = {
+ .segment_reference_enabled = h->h.segmentation.feat[i].ref_enabled,
+ .segment_reference = h->h.segmentation.feat[i].ref_val,
+ .segment_reference_skipped = h->h.segmentation.feat[i].skip_enabled,
+ },
+
+ .luma_dc_quant_scale = h->h.segmentation.feat[i].qmul[0][0],
+ .luma_ac_quant_scale = h->h.segmentation.feat[i].qmul[0][1],
+ .chroma_dc_quant_scale = h->h.segmentation.feat[i].qmul[1][0],
+ .chroma_ac_quant_scale = h->h.segmentation.feat[i].qmul[1][1],
+ };
+
+ memcpy(slice_param.seg_param[i].filter_level, h->h.segmentation.feat[i].lflvl, sizeof(slice_param.seg_param[i].filter_level));
+ }
+
+ err = ff_vaapi_decode_make_slice_buffer(avctx, pic,
+ &slice_param, sizeof(slice_param),
+ buffer, size);
+ if (err) {
+ ff_vaapi_decode_cancel(avctx, pic);
+ return err;
+ }
+
+ return 0;
+}
+
+AVHWAccel ff_vp9_vaapi_hwaccel = {
+ .name = "vp9_vaapi",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_VP9,
+ .pix_fmt = AV_PIX_FMT_VAAPI,
+ .start_frame = vaapi_vp9_start_frame,
+ .end_frame = vaapi_vp9_end_frame,
+ .decode_slice = vaapi_vp9_decode_slice,
+ .frame_priv_data_size = sizeof(VAAPIDecodePicture),
+ .init = ff_vaapi_decode_init,
+ .uninit = ff_vaapi_decode_uninit,
++ .frame_params = ff_vaapi_common_frame_params,
+ .priv_data_size = sizeof(VAAPIDecodeContext),
+ .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
+};
#include "libavutil/version.h"
-#define LIBAVCODEC_VERSION_MAJOR 58
-#define LIBAVCODEC_VERSION_MINOR 5
-#define LIBAVCODEC_VERSION_MICRO 0
+#define LIBAVCODEC_VERSION_MAJOR 58
- #define LIBAVCODEC_VERSION_MINOR 2
++#define LIBAVCODEC_VERSION_MINOR 3
+#define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
LIBAVCODEC_VERSION_MINOR, \