#include "avcodec.h"
#include "get_bits.h"
-#include "hwaccel.h"
+#include "hwconfig.h"
#include "internal.h"
#include "profiles.h"
#include "thread.h"
#include "vp9dec.h"
#include "libavutil/avassert.h"
#include "libavutil/pixdesc.h"
+#include "libavutil/video_enc_params.h"
#define VP9_SYNCCODE 0x498342
static int vp9_alloc_entries(AVCodecContext *avctx, int n) { return 0; }
#endif
+static void vp9_tile_data_free(VP9TileData *td)
+{
+ av_freep(&td->b_base);
+ av_freep(&td->block_base);
+ av_freep(&td->block_structure);
+}
+
static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
{
ff_thread_release_buffer(avctx, &f->tf);
return ret;
sz = 64 * s->sb_cols * s->sb_rows;
- f->extradata = av_buffer_allocz(sz * (1 + sizeof(VP9mvrefPair)));
+ if (sz != s->frame_extradata_pool_size) {
+ av_buffer_pool_uninit(&s->frame_extradata_pool);
+ s->frame_extradata_pool = av_buffer_pool_init(sz * (1 + sizeof(VP9mvrefPair)), NULL);
+ if (!s->frame_extradata_pool) {
+ s->frame_extradata_pool_size = 0;
+ goto fail;
+ }
+ s->frame_extradata_pool_size = sz;
+ }
+ f->extradata = av_buffer_pool_get(s->frame_extradata_pool);
if (!f->extradata) {
goto fail;
}
+ memset(f->extradata->data, 0, f->extradata->size);
f->segmentation_map = f->extradata->data;
f->mv = (VP9mvrefPair *) (f->extradata->data + sz);
#define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
CONFIG_VP9_NVDEC_HWACCEL + \
- CONFIG_VP9_VAAPI_HWACCEL)
+ CONFIG_VP9_VAAPI_HWACCEL + \
+ CONFIG_VP9_VDPAU_HWACCEL)
enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
VP9Context *s = avctx->priv_data;
uint8_t *p;
#endif
#if CONFIG_VP9_VAAPI_HWACCEL
*fmtp++ = AV_PIX_FMT_VAAPI;
+#endif
+#if CONFIG_VP9_VDPAU_HWACCEL
+ *fmtp++ = AV_PIX_FMT_VDPAU;
#endif
break;
case AV_PIX_FMT_YUV420P12:
#endif
#if CONFIG_VP9_VAAPI_HWACCEL
*fmtp++ = AV_PIX_FMT_VAAPI;
+#endif
+#if CONFIG_VP9_VDPAU_HWACCEL
+ *fmtp++ = AV_PIX_FMT_VDPAU;
#endif
break;
}
#undef assign
if (s->td) {
- for (i = 0; i < s->active_tile_cols; i++) {
- av_freep(&s->td[i].b_base);
- av_freep(&s->td[i].block_base);
- }
+ for (i = 0; i < s->active_tile_cols; i++)
+ vp9_tile_data_free(&s->td[i]);
}
if (s->s.h.bpp != s->last_bpp) {
if (td->b_base && td->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass)
return 0;
- av_free(td->b_base);
- av_free(td->block_base);
+ vp9_tile_data_free(td);
chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
if (s->s.frames[CUR_FRAME].uses_2pass) {
td->eob_base = (uint8_t *) (td->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
td->uveob_base[0] = td->eob_base + 16 * 16 * sbs;
td->uveob_base[1] = td->uveob_base[0] + chroma_eobs * sbs;
- } else {
- for (i = 1; i < s->active_tile_cols; i++) {
- if (s->td[i].b_base && s->td[i].block_base) {
- av_free(s->td[i].b_base);
- av_free(s->td[i].block_base);
- }
+
+ if (avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS) {
+ td->block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
+ if (!td->block_structure)
+ return AVERROR(ENOMEM);
}
+ } else {
+ for (i = 1; i < s->active_tile_cols; i++)
+ vp9_tile_data_free(&s->td[i]);
+
for (i = 0; i < s->active_tile_cols; i++) {
s->td[i].b_base = av_malloc(sizeof(VP9Block));
s->td[i].block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
s->td[i].eob_base = (uint8_t *) (s->td[i].uvblock_base[1] + chroma_blocks * bytesperpixel);
s->td[i].uveob_base[0] = s->td[i].eob_base + 16 * 16;
s->td[i].uveob_base[1] = s->td[i].uveob_base[0] + chroma_eobs;
+
+ if (avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS) {
+ s->td[i].block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
+ if (!s->td[i].block_structure)
+ return AVERROR(ENOMEM);
+ }
}
}
s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass;
// differential forward probability updates
static int update_prob(VP56RangeCoder *c, int p)
{
- static const int inv_map_table[255] = {
+ static const uint8_t inv_map_table[255] = {
7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
if (s->s.h.keyframe) {
- if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
+ if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
return AVERROR_INVALIDDATA;
}
s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
if (s->s.h.intraonly) {
- if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
+ if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
return AVERROR_INVALIDDATA;
}
VP56RangeCoder *rc;
if (s->td) {
- for (i = 0; i < s->active_tile_cols; i++) {
- av_free(s->td[i].b_base);
- av_free(s->td[i].block_base);
- }
+ for (i = 0; i < s->active_tile_cols; i++)
+ vp9_tile_data_free(&s->td[i]);
av_free(s->td);
}
/* check reference frames */
if (!s->s.h.keyframe && !s->s.h.intraonly) {
+ int valid_ref_frame = 0;
for (i = 0; i < 3; i++) {
AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
int refw = ref->width, refh = ref->height;
} else if (refw == w && refh == h) {
s->mvscale[i][0] = s->mvscale[i][1] = 0;
} else {
+ /* Check to make sure at least one of frames that */
+ /* this frame references has valid dimensions */
if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
- av_log(avctx, AV_LOG_ERROR,
+ av_log(avctx, AV_LOG_WARNING,
"Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
refw, refh, w, h);
- return AVERROR_INVALIDDATA;
+ s->mvscale[i][0] = s->mvscale[i][1] = REF_INVALID_SCALE;
+ continue;
}
s->mvscale[i][0] = (refw << 14) / w;
s->mvscale[i][1] = (refh << 14) / h;
s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
}
+ valid_ref_frame++;
+ }
+ if (!valid_ref_frame) {
+ av_log(avctx, AV_LOG_ERROR, "No valid reference frame is found, bitstream not supported\n");
+ return AVERROR_INVALIDDATA;
}
}
} else {
memset(&s->td[i].counts, 0, sizeof(s->td[0].counts));
}
+ s->td[i].nb_block_structure = 0;
}
/* FIXME is it faster to not copy here, but do it down in the fw updates
int i;
av_freep(&s->intra_pred_data[0]);
- for (i = 0; i < s->active_tile_cols; i++) {
- av_freep(&s->td[i].b_base);
- av_freep(&s->td[i].block_base);
- }
+ for (i = 0; i < s->active_tile_cols; i++)
+ vp9_tile_data_free(&s->td[i]);
}
static av_cold int vp9_decode_free(AVCodecContext *avctx)
int i;
for (i = 0; i < 3; i++) {
- if (s->s.frames[i].tf.f->buf[0])
- vp9_frame_unref(avctx, &s->s.frames[i]);
+ vp9_frame_unref(avctx, &s->s.frames[i]);
av_frame_free(&s->s.frames[i].tf.f);
}
+ av_buffer_pool_uninit(&s->frame_extradata_pool);
for (i = 0; i < 8; i++) {
- if (s->s.refs[i].f->buf[0])
- ff_thread_release_buffer(avctx, &s->s.refs[i]);
+ ff_thread_release_buffer(avctx, &s->s.refs[i]);
av_frame_free(&s->s.refs[i].f);
- if (s->next_refs[i].f->buf[0])
- ff_thread_release_buffer(avctx, &s->next_refs[i]);
+ ff_thread_release_buffer(avctx, &s->next_refs[i]);
av_frame_free(&s->next_refs[i].f);
}
decode_sb_mem(td, row, col, lflvl_ptr,
yoff2, uvoff2, BL_64X64);
} else {
+ if (vpX_rac_is_end(td->c)) {
+ return AVERROR_INVALIDDATA;
+ }
decode_sb(td, row, col, lflvl_ptr,
yoff2, uvoff2, BL_64X64);
}
}
#endif
+static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame)
+{
+ AVVideoEncParams *par;
+ unsigned int tile, nb_blocks = 0;
+
+ if (s->s.h.segmentation.enabled) {
+ for (tile = 0; tile < s->active_tile_cols; tile++)
+ nb_blocks += s->td[tile].nb_block_structure;
+ }
+
+ par = av_video_enc_params_create_side_data(frame->tf.f,
+ AV_VIDEO_ENC_PARAMS_VP9, nb_blocks);
+ if (!par)
+ return AVERROR(ENOMEM);
+
+ par->qp = s->s.h.yac_qi;
+ par->delta_qp[0][0] = s->s.h.ydc_qdelta;
+ par->delta_qp[1][0] = s->s.h.uvdc_qdelta;
+ par->delta_qp[2][0] = s->s.h.uvdc_qdelta;
+ par->delta_qp[1][1] = s->s.h.uvac_qdelta;
+ par->delta_qp[2][1] = s->s.h.uvac_qdelta;
+
+ if (nb_blocks) {
+ unsigned int block = 0;
+ unsigned int tile, block_tile;
+
+ for (tile = 0; tile < s->active_tile_cols; tile++) {
+ VP9TileData *td = &s->td[tile];
+
+ for (block_tile = 0; block_tile < td->nb_block_structure; block_tile++) {
+ AVVideoBlockParams *b = av_video_enc_params_block(par, block++);
+ unsigned int row = td->block_structure[block_tile].row;
+ unsigned int col = td->block_structure[block_tile].col;
+ uint8_t seg_id = frame->segmentation_map[row * 8 * s->sb_cols + col];
+
+ b->src_x = col * 8;
+ b->src_y = row * 8;
+ b->w = 1 << (3 + td->block_structure[block_tile].block_size_idx_x);
+ b->h = 1 << (3 + td->block_structure[block_tile].block_size_idx_y);
+
+ if (s->s.h.segmentation.feat[seg_id].q_enabled) {
+ b->delta_qp = s->s.h.segmentation.feat[seg_id].q_val;
+ if (s->s.h.segmentation.absolute_vals)
+ b->delta_qp -= par->qp;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
static int vp9_decode_frame(AVCodecContext *avctx, void *frame,
int *got_frame, AVPacket *pkt)
{
if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
return ret;
((AVFrame *)frame)->pts = pkt->pts;
-#if FF_API_PKT_PTS
-FF_DISABLE_DEPRECATION_WARNINGS
- ((AVFrame *)frame)->pkt_pts = pkt->pts;
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
((AVFrame *)frame)->pkt_dts = pkt->dts;
for (i = 0; i < 8; i++) {
if (s->next_refs[i].f->buf[0])
s->td[i].eob = s->td[i].eob_base;
s->td[i].uveob[0] = s->td[i].uveob_base[0];
s->td[i].uveob[1] = s->td[i].uveob_base[1];
+ s->td[i].error_info = 0;
}
#if HAVE_THREADS
} while (s->pass++ == 1);
ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
+ if (s->td->error_info < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to decode tile data\n");
+ s->td->error_info = 0;
+ return AVERROR_INVALIDDATA;
+ }
+ if (avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS) {
+ ret = vp9_export_enc_params(s, &s->s.frames[CUR_FRAME]);
+ if (ret < 0)
+ return ret;
+ }
+
finish:
// ref frame setup
for (i = 0; i < 8; i++) {
{
VP9Context *s = avctx->priv_data;
- avctx->internal->allocate_progress = 1;
s->last_bpp = 0;
s->s.h.filter.sharpness = -1;
}
#if HAVE_THREADS
-static av_cold int vp9_decode_init_thread_copy(AVCodecContext *avctx)
-{
- return init_frames(avctx);
-}
-
static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
{
int i, ret;
}
#endif
-AVCodec ff_vp9_decoder = {
+const AVCodec ff_vp9_decoder = {
.name = "vp9",
.long_name = NULL_IF_CONFIG_SMALL("Google VP9"),
.type = AVMEDIA_TYPE_VIDEO,
.close = vp9_decode_free,
.decode = vp9_decode_frame,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS,
- .caps_internal = FF_CODEC_CAP_SLICE_THREAD_HAS_MF,
+ .caps_internal = FF_CODEC_CAP_SLICE_THREAD_HAS_MF |
+ FF_CODEC_CAP_ALLOCATE_PROGRESS,
.flush = vp9_decode_flush,
- .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp9_decode_init_thread_copy),
.update_thread_context = ONLY_IF_THREADS_ENABLED(vp9_decode_update_thread_context),
.profiles = NULL_IF_CONFIG_SMALL(ff_vp9_profiles),
.bsfs = "vp9_superframe_split",
- .hw_configs = (const AVCodecHWConfigInternal*[]) {
+ .hw_configs = (const AVCodecHWConfigInternal *const []) {
#if CONFIG_VP9_DXVA2_HWACCEL
HWACCEL_DXVA2(vp9),
#endif
#endif
#if CONFIG_VP9_VAAPI_HWACCEL
HWACCEL_VAAPI(vp9),
+#endif
+#if CONFIG_VP9_VDPAU_HWACCEL
+ HWACCEL_VDPAU(vp9),
#endif
NULL
},