enum CompPredMode comppredmode;
// contextual (left/above) cache
- uint8_t left_partition_ctx[8], *above_partition_ctx;
- uint8_t left_mode_ctx[16], *above_mode_ctx;
+ DECLARE_ALIGNED(16, uint8_t, left_y_nnz_ctx)[16];
+ DECLARE_ALIGNED(16, uint8_t, left_mode_ctx)[16];
+ DECLARE_ALIGNED(16, VP56mv, left_mv_ctx)[16][2];
+ DECLARE_ALIGNED(8, uint8_t, left_uv_nnz_ctx)[2][8];
+ DECLARE_ALIGNED(8, uint8_t, left_partition_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_skip_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_txfm_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_segpred_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_intra_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_comp_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_ref_ctx)[8];
+ DECLARE_ALIGNED(8, uint8_t, left_filter_ctx)[8];
+ uint8_t *above_partition_ctx;
+ uint8_t *above_mode_ctx;
// FIXME maybe merge some of the below in a flags field?
- uint8_t left_y_nnz_ctx[16], *above_y_nnz_ctx;
- uint8_t left_uv_nnz_ctx[2][8], *above_uv_nnz_ctx[2];
- uint8_t left_skip_ctx[8], *above_skip_ctx; // 1bit
- uint8_t left_txfm_ctx[8], *above_txfm_ctx; // 2bit
- uint8_t left_segpred_ctx[8], *above_segpred_ctx; // 1bit
- uint8_t left_intra_ctx[8], *above_intra_ctx; // 1bit
- uint8_t left_comp_ctx[8], *above_comp_ctx; // 1bit
- uint8_t left_ref_ctx[8], *above_ref_ctx; // 2bit
- uint8_t left_filter_ctx[8], *above_filter_ctx;
- VP56mv left_mv_ctx[16][2], (*above_mv_ctx)[2];
+ uint8_t *above_y_nnz_ctx;
+ uint8_t *above_uv_nnz_ctx[2];
+ uint8_t *above_skip_ctx; // 1bit
+ uint8_t *above_txfm_ctx; // 2bit
+ uint8_t *above_segpred_ctx; // 1bit
+ uint8_t *above_intra_ctx; // 1bit
+ uint8_t *above_comp_ctx; // 1bit
+ uint8_t *above_ref_ctx; // 2bit
+ uint8_t *above_filter_ctx;
+ VP56mv (*above_mv_ctx)[2];
// whole-frame cache
uint8_t *intra_pred_data[3];
DECLARE_ALIGNED(32, uint8_t, edge_emu_buffer)[71*80];
// block reconstruction intermediates
+ int block_alloc_using_2pass;
int16_t *block_base, *block, *uvblock_base[2], *uvblock[2];
uint8_t *eob_base, *uveob_base[2], *eob, *uveob[2];
struct { int x, y; } min_mv, max_mv;
// retain segmentation map if it doesn't update
if (s->segmentation.enabled && !s->segmentation.update_map &&
- !s->keyframe && !s->intraonly) {
+ !s->intraonly && !s->keyframe) {
memcpy(f->segmentation_map, s->frames[LAST_FRAME].segmentation_map, sz);
}
av_assert0(w > 0 && h > 0);
- if (s->above_partition_ctx && w == ctx->width && h == ctx->height)
+ if (s->intra_pred_data[0] && w == ctx->width && h == ctx->height)
return 0;
ctx->width = w;
s->rows = (h + 7) >> 3;
#define assign(var, type, n) var = (type) p; p += s->sb_cols * n * sizeof(*var)
- av_freep(&s->above_partition_ctx);
+ av_freep(&s->intra_pred_data[0]);
p = av_malloc(s->sb_cols * (240 + sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
if (!p)
return AVERROR(ENOMEM);
+ assign(s->intra_pred_data[0], uint8_t *, 64);
+ assign(s->intra_pred_data[1], uint8_t *, 32);
+ assign(s->intra_pred_data[2], uint8_t *, 32);
+ assign(s->above_y_nnz_ctx, uint8_t *, 16);
+ assign(s->above_mode_ctx, uint8_t *, 16);
+ assign(s->above_mv_ctx, VP56mv(*)[2], 16);
assign(s->above_partition_ctx, uint8_t *, 8);
assign(s->above_skip_ctx, uint8_t *, 8);
assign(s->above_txfm_ctx, uint8_t *, 8);
- assign(s->above_mode_ctx, uint8_t *, 16);
- assign(s->above_y_nnz_ctx, uint8_t *, 16);
assign(s->above_uv_nnz_ctx[0], uint8_t *, 8);
assign(s->above_uv_nnz_ctx[1], uint8_t *, 8);
- assign(s->intra_pred_data[0], uint8_t *, 64);
- assign(s->intra_pred_data[1], uint8_t *, 32);
- assign(s->intra_pred_data[2], uint8_t *, 32);
assign(s->above_segpred_ctx, uint8_t *, 8);
assign(s->above_intra_ctx, uint8_t *, 8);
assign(s->above_comp_ctx, uint8_t *, 8);
assign(s->above_ref_ctx, uint8_t *, 8);
assign(s->above_filter_ctx, uint8_t *, 8);
assign(s->lflvl, struct VP9Filter *, 1);
- assign(s->above_mv_ctx, VP56mv(*)[2], 16);
#undef assign
+ // these will be re-allocated a little later
+ av_freep(&s->b_base);
+ av_freep(&s->block_base);
+
+ return 0;
+}
+
+static int update_block_buffers(AVCodecContext *ctx)
+{
+ VP9Context *s = ctx->priv_data;
+
+ if (s->b_base && s->block_base && s->block_alloc_using_2pass == s->uses_2pass)
+ return 0;
+
av_free(s->b_base);
av_free(s->block_base);
- if (ctx->active_thread_type == FF_THREAD_FRAME && s->refreshctx && !s->parallelmode) {
+ if (s->uses_2pass) {
int sbs = s->sb_cols * s->sb_rows;
s->b_base = av_malloc(sizeof(VP9Block) * s->cols * s->rows);
s->uveob_base[0] = s->eob_base + 256;
s->uveob_base[1] = s->uveob_base[0] + 64;
}
+ s->block_alloc_using_2pass = s->uses_2pass;
return 0;
}
w = get_bits(&s->gb, 16) + 1;
h = get_bits(&s->gb, 16) + 1;
}
- s->use_last_frame_mvs &= s->frames[LAST_FRAME].tf.f->width == w &&
- s->frames[LAST_FRAME].tf.f->height == h;
+ // Note that in this code, "CUR_FRAME" is actually before we
+ // have formally allocated a frame, and thus actually represents
+ // the _last_ frame
+ s->use_last_frame_mvs &= s->frames[CUR_FRAME].tf.f->width == w &&
+ s->frames[CUR_FRAME].tf.f->height == h;
if (get_bits1(&s->gb)) // display size
skip_bits(&s->gb, 32);
s->highprecisionmvs = get_bits1(&s->gb);
for (i = 0; i < 7; i++)
s->prob.seg[i] = get_bits1(&s->gb) ?
get_bits(&s->gb, 8) : 255;
- if ((s->segmentation.temporal = get_bits1(&s->gb)))
+ if ((s->segmentation.temporal = get_bits1(&s->gb))) {
for (i = 0; i < 3; i++)
s->prob.segpred[i] = get_bits1(&s->gb) ?
get_bits(&s->gb, 8) : 255;
+ }
+ }
+ if ((!s->segmentation.update_map || s->segmentation.temporal) &&
+ (w != s->frames[CUR_FRAME].tf.f->width ||
+ h != s->frames[CUR_FRAME].tf.f->height)) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Reference segmap (temp=%d,update=%d) enabled on size-change!\n",
+ s->segmentation.temporal, s->segmentation.update_map);
+ return AVERROR_INVALIDDATA;
}
if (get_bits1(&s->gb)) {
VP9Block *b = s->b;
if (mode == ZEROMV) {
- memset(mv, 0, sizeof(*mv) * 2);
+ AV_ZERO64(mv);
} else {
int hp;
}
}
+static av_always_inline void setctx_2d(uint8_t *ptr, int w, int h,
+ ptrdiff_t stride, int v)
+{
+ switch (w) {
+ case 1:
+ do {
+ *ptr = v;
+ ptr += stride;
+ } while (--h);
+ break;
+ case 2: {
+ int v16 = v * 0x0101;
+ do {
+ AV_WN16A(ptr, v16);
+ ptr += stride;
+ } while (--h);
+ break;
+ }
+ case 4: {
+ uint32_t v32 = v * 0x01010101;
+ do {
+ AV_WN32A(ptr, v32);
+ ptr += stride;
+ } while (--h);
+ break;
+ }
+ case 8: {
+#if HAVE_FAST_64BIT
+ uint64_t v64 = v * 0x0101010101010101ULL;
+ do {
+ AV_WN64A(ptr, v64);
+ ptr += stride;
+ } while (--h);
+#else
+ uint32_t v32 = v * 0x01010101;
+ do {
+ AV_WN32A(ptr, v32);
+ AV_WN32A(ptr + 4, v32);
+ ptr += stride;
+ } while (--h);
+#endif
+ break;
+ }
+ }
+}
+
static void decode_mode(AVCodecContext *ctx)
{
static const uint8_t left_ctx[N_BS_SIZES] = {
int w4 = FFMIN(s->cols - col, bwh_tab[1][b->bs][0]);
int h4 = FFMIN(s->rows - row, bwh_tab[1][b->bs][1]), y;
int have_a = row > 0, have_l = col > s->tiling.tile_col_start;
+ int vref, filter_id;
if (!s->segmentation.enabled) {
b->seg_id = 0;
} else if (s->keyframe || s->intraonly) {
- b->seg_id = s->segmentation.update_map ?
- vp8_rac_get_tree(&s->c, vp9_segmentation_tree, s->prob.seg) : 0;
+ b->seg_id = vp8_rac_get_tree(&s->c, vp9_segmentation_tree, s->prob.seg);
} else if (!s->segmentation.update_map ||
(s->segmentation.temporal &&
vp56_rac_get_prob_branchy(&s->c,
memset(&s->above_segpred_ctx[col], 0, w4);
memset(&s->left_segpred_ctx[row7], 0, h4);
}
- if ((s->segmentation.enabled && s->segmentation.update_map) || s->keyframe) {
- uint8_t *segmap = s->frames[CUR_FRAME].segmentation_map;
-
- for (y = 0; y < h4; y++)
- memset(&segmap[(y + row) * 8 * s->sb_cols + col], b->seg_id, w4);
+ if (s->segmentation.enabled &&
+ (s->segmentation.update_map || s->keyframe || s->intraonly)) {
+ setctx_2d(&s->frames[CUR_FRAME].segmentation_map[row * 8 * s->sb_cols + col],
+ w4, h4, 8 * s->sb_cols, b->seg_id);
}
b->skip = s->segmentation.enabled &&
c = 3;
}
- b->filter = vp8_rac_get_tree(&s->c, vp9_filter_tree,
+ filter_id = vp8_rac_get_tree(&s->c, vp9_filter_tree,
s->prob.p.filter[c]);
- s->counts.filter[c][b->filter]++;
+ s->counts.filter[c][filter_id]++;
+ b->filter = vp9_filter_lut[filter_id];
} else {
b->filter = s->filtermode;
}
AV_COPY32(&b->mv[2][1], &b->mv[0][1]);
AV_COPY32(&b->mv[3][1], &b->mv[0][1]);
}
+
+ vref = b->ref[b->comp ? s->signbias[s->varcompref[0]] : 0];
+ }
+
+#if HAVE_FAST_64BIT
+#define SPLAT_CTX(var, val, n) \
+ switch (n) { \
+ case 1: var = val; break; \
+ case 2: AV_WN16A(&var, val * 0x0101); break; \
+ case 4: AV_WN32A(&var, val * 0x01010101); break; \
+ case 8: AV_WN64A(&var, val * 0x0101010101010101ULL); break; \
+ case 16: { \
+ uint64_t v64 = val * 0x0101010101010101ULL; \
+ AV_WN64A( &var, v64); \
+ AV_WN64A(&((uint8_t *) &var)[8], v64); \
+ break; \
+ } \
+ }
+#else
+#define SPLAT_CTX(var, val, n) \
+ switch (n) { \
+ case 1: var = val; break; \
+ case 2: AV_WN16A(&var, val * 0x0101); break; \
+ case 4: AV_WN32A(&var, val * 0x01010101); break; \
+ case 8: { \
+ uint32_t v32 = val * 0x01010101; \
+ AV_WN32A( &var, v32); \
+ AV_WN32A(&((uint8_t *) &var)[4], v32); \
+ break; \
+ } \
+ case 16: { \
+ uint32_t v32 = val * 0x01010101; \
+ AV_WN32A( &var, v32); \
+ AV_WN32A(&((uint8_t *) &var)[4], v32); \
+ AV_WN32A(&((uint8_t *) &var)[8], v32); \
+ AV_WN32A(&((uint8_t *) &var)[12], v32); \
+ break; \
+ } \
+ }
+#endif
+
+ switch (bwh_tab[1][b->bs][0]) {
+#define SET_CTXS(dir, off, n) \
+ do { \
+ SPLAT_CTX(s->dir##_skip_ctx[off], b->skip, n); \
+ SPLAT_CTX(s->dir##_txfm_ctx[off], b->tx, n); \
+ SPLAT_CTX(s->dir##_partition_ctx[off], dir##_ctx[b->bs], n); \
+ if (!s->keyframe && !s->intraonly) { \
+ SPLAT_CTX(s->dir##_intra_ctx[off], b->intra, n); \
+ SPLAT_CTX(s->dir##_comp_ctx[off], b->comp, n); \
+ SPLAT_CTX(s->dir##_mode_ctx[off], b->mode[3], n); \
+ if (!b->intra) { \
+ SPLAT_CTX(s->dir##_ref_ctx[off], vref, n); \
+ if (s->filtermode == FILTER_SWITCHABLE) { \
+ SPLAT_CTX(s->dir##_filter_ctx[off], filter_id, n); \
+ } \
+ } \
+ } \
+ } while (0)
+ case 1: SET_CTXS(above, col, 1); break;
+ case 2: SET_CTXS(above, col, 2); break;
+ case 4: SET_CTXS(above, col, 4); break;
+ case 8: SET_CTXS(above, col, 8); break;
+ }
+ switch (bwh_tab[1][b->bs][1]) {
+ case 1: SET_CTXS(left, row7, 1); break;
+ case 2: SET_CTXS(left, row7, 2); break;
+ case 4: SET_CTXS(left, row7, 4); break;
+ case 8: SET_CTXS(left, row7, 8); break;
}
+#undef SPLAT_CTX
+#undef SET_CTXS
- // FIXME this can probably be optimized
- memset(&s->above_skip_ctx[col], b->skip, w4);
- memset(&s->left_skip_ctx[row7], b->skip, h4);
- memset(&s->above_txfm_ctx[col], b->tx, w4);
- memset(&s->left_txfm_ctx[row7], b->tx, h4);
- memset(&s->above_partition_ctx[col], above_ctx[b->bs], w4);
- memset(&s->left_partition_ctx[row7], left_ctx[b->bs], h4);
if (!s->keyframe && !s->intraonly) {
- memset(&s->above_intra_ctx[col], b->intra, w4);
- memset(&s->left_intra_ctx[row7], b->intra, h4);
- memset(&s->above_comp_ctx[col], b->comp, w4);
- memset(&s->left_comp_ctx[row7], b->comp, h4);
- memset(&s->above_mode_ctx[col], b->mode[3], w4);
- memset(&s->left_mode_ctx[row7], b->mode[3], h4);
- if (s->filtermode == FILTER_SWITCHABLE && !b->intra ) {
- memset(&s->above_filter_ctx[col], b->filter, w4);
- memset(&s->left_filter_ctx[row7], b->filter, h4);
- b->filter = vp9_filter_lut[b->filter];
- }
if (b->bs > BS_8x8) {
int mv0 = AV_RN32A(&b->mv[3][0]), mv1 = AV_RN32A(&b->mv[3][1]);
AV_WN32A(&s->left_mv_ctx[row7 * 2 + n][1], mv1);
}
}
-
- if (!b->intra) { // FIXME write 0xff or -1 if intra, so we can use this
- // as a direct check in above branches
- int vref = b->ref[b->comp ? s->signbias[s->varcompref[0]] : 0];
-
- memset(&s->above_ref_ctx[col], vref, w4);
- memset(&s->left_ref_ctx[row7], vref, h4);
- }
}
// FIXME kinda ugly
const int16_t *y_band_counts = band_counts[b->tx];
const int16_t *uv_band_counts = band_counts[b->uvtx];
+#define MERGE(la, end, step, rd) \
+ for (n = 0; n < end; n += step) \
+ la[n] = !!rd(&la[n])
+#define MERGE_CTX(step, rd) \
+ do { \
+ MERGE(l, end_y, step, rd); \
+ MERGE(a, end_x, step, rd); \
+ } while (0)
+
/* y tokens */
- if (b->tx > TX_4X4) { // FIXME slow
- for (y = 0; y < end_y; y += step1d)
- for (x = 1; x < step1d; x++)
- l[y] |= l[y + x];
- for (x = 0; x < end_x; x += step1d)
- for (y = 1; y < step1d; y++)
- a[x] |= a[x + y];
+ switch (b->tx) {
+ case TX_8X8: MERGE_CTX(2, AV_RN16A); break;
+ case TX_16X16: MERGE_CTX(4, AV_RN32A); break;
+ case TX_32X32: MERGE_CTX(8, AV_RN64A); break;
}
for (n = 0, y = 0; y < end_y; y += step1d) {
for (x = 0; x < end_x; x += step1d, n += step) {
}
}
}
- if (b->tx > TX_4X4) { // FIXME slow
- for (y = 0; y < end_y; y += step1d)
- memset(&l[y + 1], l[y], FFMIN(end_y - y - 1, step1d - 1));
- for (x = 0; x < end_x; x += step1d)
- memset(&a[x + 1], a[x], FFMIN(end_x - x - 1, step1d - 1));
+#define SPLAT(la, end, step, cond) \
+ if (step == 2) { \
+ for (n = 1; n < end; n += step) \
+ la[n] = la[n - 1]; \
+ } else if (step == 4) { \
+ if (cond) { \
+ for (n = 0; n < end; n += step) \
+ AV_WN32A(&la[n], la[n] * 0x01010101); \
+ } else { \
+ for (n = 0; n < end; n += step) \
+ memset(&la[n + 1], la[n], FFMIN(end - n - 1, 3)); \
+ } \
+ } else /* step == 8 */ { \
+ if (cond) { \
+ if (HAVE_FAST_64BIT) { \
+ for (n = 0; n < end; n += step) \
+ AV_WN64A(&la[n], la[n] * 0x0101010101010101ULL); \
+ } else { \
+ for (n = 0; n < end; n += step) { \
+ uint32_t v32 = la[n] * 0x01010101; \
+ AV_WN32A(&la[n], v32); \
+ AV_WN32A(&la[n + 4], v32); \
+ } \
+ } \
+ } else { \
+ for (n = 0; n < end; n += step) \
+ memset(&la[n + 1], la[n], FFMIN(end - n - 1, 7)); \
+ } \
+ }
+#define SPLAT_CTX(step) \
+ do { \
+ SPLAT(a, end_x, step, end_x == w4); \
+ SPLAT(l, end_y, step, end_y == h4); \
+ } while (0)
+ switch (b->tx) {
+ case TX_8X8: SPLAT_CTX(2); break;
+ case TX_16X16: SPLAT_CTX(4); break;
+ case TX_32X32: SPLAT_CTX(8); break;
}
p = s->prob.coef[b->uvtx][1 /* uv */][!b->intra];
for (pl = 0; pl < 2; pl++) {
a = &s->above_uv_nnz_ctx[pl][col];
l = &s->left_uv_nnz_ctx[pl][row & 7];
- if (b->uvtx > TX_4X4) { // FIXME slow
- for (y = 0; y < end_y; y += uvstep1d)
- for (x = 1; x < uvstep1d; x++)
- l[y] |= l[y + x];
- for (x = 0; x < end_x; x += uvstep1d)
- for (y = 1; y < uvstep1d; y++)
- a[x] |= a[x + y];
+ switch (b->uvtx) {
+ case TX_8X8: MERGE_CTX(2, AV_RN16A); break;
+ case TX_16X16: MERGE_CTX(4, AV_RN32A); break;
+ case TX_32X32: MERGE_CTX(8, AV_RN64A); break;
}
for (n = 0, y = 0; y < end_y; y += uvstep1d) {
for (x = 0; x < end_x; x += uvstep1d, n += uvstep) {
}
}
}
- if (b->uvtx > TX_4X4) { // FIXME slow
- for (y = 0; y < end_y; y += uvstep1d)
- memset(&l[y + 1], l[y], FFMIN(end_y - y - 1, uvstep1d - 1));
- for (x = 0; x < end_x; x += uvstep1d)
- memset(&a[x + 1], a[x], FFMIN(end_x - x - 1, uvstep1d - 1));
+ switch (b->uvtx) {
+ case TX_8X8: SPLAT_CTX(2); break;
+ case TX_16X16: SPLAT_CTX(4); break;
+ case TX_32X32: SPLAT_CTX(8); break;
}
}
}
if (n_px_need <= n_px_have) {
for (i = 0; i < n_px_need; i++)
- l[i] = dst[i * stride - 1];
+ l[n_px_need - 1 - i] = dst[i * stride - 1];
} else {
for (i = 0; i < n_px_have; i++)
- l[i] = dst[i * stride - 1];
- memset(&l[i], l[i - 1], n_px_need - n_px_have);
+ l[n_px_need - 1 - i] = dst[i * stride - 1];
+ memset(l, l[n_px_need - n_px_have], n_px_need - n_px_have);
}
} else {
memset(l, 129, 4 << tx);
int tx = 4 * s->lossless + b->tx, uvtx = b->uvtx + 4 * s->lossless;
int uvstep1d = 1 << b->uvtx, p;
uint8_t *dst = s->dst[0], *dst_r = s->frames[CUR_FRAME].tf.f->data[0] + y_off;
+ LOCAL_ALIGNED_16(uint8_t, a_buf, [48]);
+ LOCAL_ALIGNED_16(uint8_t, l, [32]);
for (n = 0, y = 0; y < end_y; y += step1d) {
uint8_t *ptr = dst, *ptr_r = dst_r;
ptr_r += 4 * step1d, n += step) {
int mode = b->mode[b->bs > BS_8x8 && b->tx == TX_4X4 ?
y * 2 + x : 0];
- LOCAL_ALIGNED_16(uint8_t, a_buf, [48]);
- uint8_t *a = &a_buf[16], l[32];
+ uint8_t *a = &a_buf[16];
enum TxfmType txtp = vp9_intra_txfm_type[mode];
int eob = b->skip ? 0 : b->tx > TX_8X8 ? AV_RN16A(&s->eob[n]) : s->eob[n];
for (x = 0; x < end_x; x += uvstep1d, ptr += 4 * uvstep1d,
ptr_r += 4 * uvstep1d, n += step) {
int mode = b->uvmode;
- LOCAL_ALIGNED_16(uint8_t, a_buf, [48]);
- uint8_t *a = &a_buf[16], l[32];
+ uint8_t *a = &a_buf[16];
int eob = b->skip ? 0 : b->uvtx > TX_8X8 ? AV_RN16A(&s->uveob[p][n]) : s->uveob[p][n];
mode = check_intra_mode(s, mode, &a, ptr_r,
VP9Context *s = ctx->priv_data;
VP9Block *b = s->b;
enum BlockSize bs = bl * 3 + bp;
- int y, w4 = bwh_tab[1][bs][0], h4 = bwh_tab[1][bs][1], lvl;
+ int w4 = bwh_tab[1][bs][0], h4 = bwh_tab[1][bs][1], lvl;
int emu[2];
AVFrame *f = s->frames[CUR_FRAME].tf.f;
if (!b->skip) {
decode_coeffs(ctx);
} else {
- int pl;
+ int row7 = s->row7;
- memset(&s->above_y_nnz_ctx[col * 2], 0, w4 * 2);
- memset(&s->left_y_nnz_ctx[(row & 7) << 1], 0, h4 * 2);
- for (pl = 0; pl < 2; pl++) {
- memset(&s->above_uv_nnz_ctx[pl][col], 0, w4);
- memset(&s->left_uv_nnz_ctx[pl][row & 7], 0, h4);
+#define SPLAT_ZERO_CTX(v, n) \
+ switch (n) { \
+ case 1: v = 0; break; \
+ case 2: AV_ZERO16(&v); break; \
+ case 4: AV_ZERO32(&v); break; \
+ case 8: AV_ZERO64(&v); break; \
+ case 16: AV_ZERO128(&v); break; \
+ }
+#define SPLAT_ZERO_YUV(dir, var, off, n) \
+ do { \
+ SPLAT_ZERO_CTX(s->dir##_y_##var[off * 2], n * 2); \
+ SPLAT_ZERO_CTX(s->dir##_uv_##var[0][off], n); \
+ SPLAT_ZERO_CTX(s->dir##_uv_##var[1][off], n); \
+ } while (0)
+
+ switch (w4) {
+ case 1: SPLAT_ZERO_YUV(above, nnz_ctx, col, 1); break;
+ case 2: SPLAT_ZERO_YUV(above, nnz_ctx, col, 2); break;
+ case 4: SPLAT_ZERO_YUV(above, nnz_ctx, col, 4); break;
+ case 8: SPLAT_ZERO_YUV(above, nnz_ctx, col, 8); break;
+ }
+ switch (h4) {
+ case 1: SPLAT_ZERO_YUV(left, nnz_ctx, row7, 1); break;
+ case 2: SPLAT_ZERO_YUV(left, nnz_ctx, row7, 2); break;
+ case 4: SPLAT_ZERO_YUV(left, nnz_ctx, row7, 4); break;
+ case 8: SPLAT_ZERO_YUV(left, nnz_ctx, row7, 8); break;
}
}
if (s->pass == 1) {
(lvl = s->segmentation.feat[b->seg_id].lflvl[b->intra ? 0 : b->ref[0] + 1]
[b->mode[3] != ZEROMV]) > 0) {
int x_end = FFMIN(s->cols - col, w4), y_end = FFMIN(s->rows - row, h4);
- int skip_inter = !b->intra && b->skip;
+ int skip_inter = !b->intra && b->skip, col7 = s->col7, row7 = s->row7;
- for (y = 0; y < h4; y++)
- memset(&lflvl->level[((row & 7) + y) * 8 + (col & 7)], lvl, w4);
- mask_edges(lflvl, 0, row & 7, col & 7, x_end, y_end, 0, 0, b->tx, skip_inter);
- mask_edges(lflvl, 1, row & 7, col & 7, x_end, y_end,
+ setctx_2d(&lflvl->level[row7 * 8 + col7], w4, h4, 8, lvl);
+ mask_edges(lflvl, 0, row7, col7, x_end, y_end, 0, 0, b->tx, skip_inter);
+ mask_edges(lflvl, 1, row7, col7, x_end, y_end,
s->cols & 1 && col + w4 >= s->cols ? s->cols & 7 : 0,
s->rows & 1 && row + h4 >= s->rows ? s->rows & 7 : 0,
b->uvtx, skip_inter);
static void free_buffers(VP9Context *s)
{
- av_freep(&s->above_partition_ctx);
+ av_freep(&s->intra_pred_data[0]);
av_freep(&s->b_base);
av_freep(&s->block_base);
}
memset(s->above_segpred_ctx, 0, s->cols);
s->pass = s->uses_2pass =
ctx->active_thread_type == FF_THREAD_FRAME && s->refreshctx && !s->parallelmode;
+ if ((res = update_block_buffers(ctx)) < 0) {
+ av_log(ctx, AV_LOG_ERROR,
+ "Failed to allocate block buffers\n");
+ return res;
+ }
if (s->refreshctx && s->parallelmode) {
int j, k, l, m;
VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
// detect size changes in other threads
- if (s->above_partition_ctx &&
- (!ssrc->above_partition_ctx || s->cols != ssrc->cols || s->rows != ssrc->rows)) {
+ if (s->intra_pred_data[0] &&
+ (!ssrc->intra_pred_data[0] || s->cols != ssrc->cols || s->rows != ssrc->rows)) {
free_buffers(s);
}