2 * VP8 compatible video decoder
4 * Copyright (C) 2010 David Conrad
5 * Copyright (C) 2010 Ronald S. Bultje
6 * Copyright (C) 2010 Jason Garrett-Glaser
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
30 #include "rectangle.h"
40 // todo: make it possible to check for at least (i4x4 or split_mv)
41 // in one op. are others needed?
50 AVCodecContext *avctx;
54 vp8_mc_func put_pixels_tab[3][3][3];
57 uint8_t *edge_emu_buffer;
58 VP56RangeCoder c; ///< header context, includes mb modes and motion vectors
61 int mb_width; /* number of horizontal MB */
62 int mb_height; /* number of vertical MB */
68 int update_last; ///< update VP56_FRAME_PREVIOUS with the current one
69 int update_golden; ///< VP56_FRAME_NONE if not updated, or which frame to copy if so
74 * If this flag is not set, all the probability updates
75 * are discarded after this frame is decoded.
77 int update_probabilities;
80 * All coefficients are contained in separate arith coding contexts.
81 * There can be 1, 2, 4, or 8 of these after the header context.
83 int num_coeff_partitions;
84 VP56RangeCoder coeff_partition[8];
86 VP8Macroblock *macroblocks;
87 VP8Macroblock *macroblocks_base;
88 VP8FilterStrength *filter_strength;
91 uint8_t *intra4x4_pred_mode_top;
92 uint8_t intra4x4_pred_mode_left[4];
93 uint8_t *segmentation_map;
97 * Cache of the top row needed for intra prediction
98 * 16 for luma, 8 for each chroma plane
100 uint8_t (*top_border)[16+8+8];
103 * For coeff decode, we need to know whether the above block had non-zero
104 * coefficients. This means for each macroblock, we need data for 4 luma
105 * blocks, 2 u blocks, 2 v blocks, and the luma dc block, for a total of 9
106 * per macroblock. We keep the last row in top_nnz.
108 uint8_t (*top_nnz)[9];
109 DECLARE_ALIGNED(8, uint8_t, left_nnz)[9];
112 * This is the index plus one of the last non-zero coeff
113 * for each of the blocks in the current macroblock.
115 * 1 -> dc-only (special transform)
116 * 2+-> full transform
118 DECLARE_ALIGNED(16, uint8_t, non_zero_count_cache)[6][4];
119 DECLARE_ALIGNED(16, DCTELEM, block)[6][4][16];
120 DECLARE_ALIGNED(16, DCTELEM, block_dc)[16];
121 uint8_t intra4x4_pred_mode_mb[16];
123 int chroma_pred_mode; ///< 8x8c pred mode of the current macroblock
124 int segment; ///< segment of the current macroblock
127 int sign_bias[4]; ///< one state [0, 1] per ref frame type
131 * Base parameters for segmentation, i.e. per-macroblock parameters.
132 * These must be kept unchanged even if segmentation is not used for
133 * a frame, since the values persist between interframes.
139 int8_t base_quant[4];
140 int8_t filter_level[4]; ///< base loop filter level
144 * Macroblocks can have one of 4 different quants in a frame when
145 * segmentation is enabled.
146 * If segmentation is disabled, only the first segment's values are used.
149 // [0] - DC qmul [1] - AC qmul
150 int16_t luma_qmul[2];
151 int16_t luma_dc_qmul[2]; ///< luma dc-only block quant
152 int16_t chroma_qmul[2];
162 int enabled; ///< whether each mb can have a different strength based on mode/ref
165 * filter strength adjustment for the following macroblock modes:
168 * [2] - inter modes except for zero or split mv
170 * i16x16 modes never have any adjustment
175 * filter strength adjustment for macroblocks that reference:
176 * [0] - intra / VP56_FRAME_CURRENT
177 * [1] - VP56_FRAME_PREVIOUS
178 * [2] - VP56_FRAME_GOLDEN
179 * [3] - altref / VP56_FRAME_GOLDEN2
185 * These are all of the updatable probabilities for binary decisions.
186 * They are only implictly reset on keyframes, making it quite likely
187 * for an interframe to desync if a prior frame's header was corrupt
188 * or missing outright!
191 uint8_t segmentid[3];
196 uint8_t pred16x16[4];
198 /* Padded to allow overreads */
199 uint8_t token[4][17][3][NUM_DCT_TOKENS-1];
204 static void vp8_decode_flush(AVCodecContext *avctx)
206 VP8Context *s = avctx->priv_data;
209 for (i = 0; i < 4; i++)
210 if (s->frames[i].data[0])
211 avctx->release_buffer(avctx, &s->frames[i]);
212 memset(s->framep, 0, sizeof(s->framep));
214 av_freep(&s->macroblocks_base);
215 av_freep(&s->filter_strength);
216 av_freep(&s->intra4x4_pred_mode_top);
217 av_freep(&s->top_nnz);
218 av_freep(&s->edge_emu_buffer);
219 av_freep(&s->top_border);
220 av_freep(&s->segmentation_map);
222 s->macroblocks = NULL;
225 static int update_dimensions(VP8Context *s, int width, int height)
227 if (avcodec_check_dimensions(s->avctx, width, height))
228 return AVERROR_INVALIDDATA;
230 vp8_decode_flush(s->avctx);
232 avcodec_set_dimensions(s->avctx, width, height);
234 s->mb_width = (s->avctx->coded_width +15) / 16;
235 s->mb_height = (s->avctx->coded_height+15) / 16;
237 // we allocate a border around the top/left of intra4x4 modes
238 // this is 4 blocks for intra4x4 to keep 4-byte alignment for fill_rectangle
239 s->mb_stride = s->mb_width+1;
240 s->b4_stride = 4*s->mb_stride;
242 s->macroblocks_base = av_mallocz((s->mb_stride+s->mb_height*2+2)*sizeof(*s->macroblocks));
243 s->filter_strength = av_mallocz(s->mb_stride*sizeof(*s->filter_strength));
244 s->intra4x4_pred_mode_top = av_mallocz(s->b4_stride*4);
245 s->top_nnz = av_mallocz(s->mb_width*sizeof(*s->top_nnz));
246 s->top_border = av_mallocz((s->mb_width+1)*sizeof(*s->top_border));
247 s->segmentation_map = av_mallocz(s->mb_stride*s->mb_height);
249 if (!s->macroblocks_base || !s->filter_strength || !s->intra4x4_pred_mode_top ||
250 !s->top_nnz || !s->top_border || !s->segmentation_map)
251 return AVERROR(ENOMEM);
253 s->macroblocks = s->macroblocks_base + 1;
258 static void parse_segment_info(VP8Context *s)
260 VP56RangeCoder *c = &s->c;
263 s->segmentation.update_map = vp8_rac_get(c);
265 if (vp8_rac_get(c)) { // update segment feature data
266 s->segmentation.absolute_vals = vp8_rac_get(c);
268 for (i = 0; i < 4; i++)
269 s->segmentation.base_quant[i] = vp8_rac_get_sint(c, 7);
271 for (i = 0; i < 4; i++)
272 s->segmentation.filter_level[i] = vp8_rac_get_sint(c, 6);
274 if (s->segmentation.update_map)
275 for (i = 0; i < 3; i++)
276 s->prob->segmentid[i] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
279 static void update_lf_deltas(VP8Context *s)
281 VP56RangeCoder *c = &s->c;
284 for (i = 0; i < 4; i++)
285 s->lf_delta.ref[i] = vp8_rac_get_sint(c, 6);
287 for (i = 0; i < 4; i++)
288 s->lf_delta.mode[i] = vp8_rac_get_sint(c, 6);
291 static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
293 const uint8_t *sizes = buf;
296 s->num_coeff_partitions = 1 << vp8_rac_get_uint(&s->c, 2);
298 buf += 3*(s->num_coeff_partitions-1);
299 buf_size -= 3*(s->num_coeff_partitions-1);
303 for (i = 0; i < s->num_coeff_partitions-1; i++) {
304 int size = AV_RL24(sizes + 3*i);
305 if (buf_size - size < 0)
308 vp56_init_range_decoder(&s->coeff_partition[i], buf, size);
312 vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
317 static void get_quants(VP8Context *s)
319 VP56RangeCoder *c = &s->c;
322 int yac_qi = vp8_rac_get_uint(c, 7);
323 int ydc_delta = vp8_rac_get_sint(c, 4);
324 int y2dc_delta = vp8_rac_get_sint(c, 4);
325 int y2ac_delta = vp8_rac_get_sint(c, 4);
326 int uvdc_delta = vp8_rac_get_sint(c, 4);
327 int uvac_delta = vp8_rac_get_sint(c, 4);
329 for (i = 0; i < 4; i++) {
330 if (s->segmentation.enabled) {
331 base_qi = s->segmentation.base_quant[i];
332 if (!s->segmentation.absolute_vals)
337 s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip(base_qi + ydc_delta , 0, 127)];
338 s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip(base_qi , 0, 127)];
339 s->qmat[i].luma_dc_qmul[0] = 2 * vp8_dc_qlookup[av_clip(base_qi + y2dc_delta, 0, 127)];
340 s->qmat[i].luma_dc_qmul[1] = 155 * vp8_ac_qlookup[av_clip(base_qi + y2ac_delta, 0, 127)] / 100;
341 s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip(base_qi + uvdc_delta, 0, 127)];
342 s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip(base_qi + uvac_delta, 0, 127)];
344 s->qmat[i].luma_dc_qmul[1] = FFMAX(s->qmat[i].luma_dc_qmul[1], 8);
345 s->qmat[i].chroma_qmul[0] = FFMIN(s->qmat[i].chroma_qmul[0], 132);
350 * Determine which buffers golden and altref should be updated with after this frame.
351 * The spec isn't clear here, so I'm going by my understanding of what libvpx does
353 * Intra frames update all 3 references
354 * Inter frames update VP56_FRAME_PREVIOUS if the update_last flag is set
355 * If the update (golden|altref) flag is set, it's updated with the current frame
356 * if update_last is set, and VP56_FRAME_PREVIOUS otherwise.
357 * If the flag is not set, the number read means:
359 * 1: VP56_FRAME_PREVIOUS
360 * 2: update golden with altref, or update altref with golden
362 static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
364 VP56RangeCoder *c = &s->c;
367 return VP56_FRAME_CURRENT;
369 switch (vp8_rac_get_uint(c, 2)) {
371 return VP56_FRAME_PREVIOUS;
373 return (ref == VP56_FRAME_GOLDEN) ? VP56_FRAME_GOLDEN2 : VP56_FRAME_GOLDEN;
375 return VP56_FRAME_NONE;
378 static void update_refs(VP8Context *s)
380 VP56RangeCoder *c = &s->c;
382 int update_golden = vp8_rac_get(c);
383 int update_altref = vp8_rac_get(c);
385 s->update_golden = ref_to_update(s, update_golden, VP56_FRAME_GOLDEN);
386 s->update_altref = ref_to_update(s, update_altref, VP56_FRAME_GOLDEN2);
389 static int decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
391 VP56RangeCoder *c = &s->c;
392 int header_size, hscale, vscale, i, j, k, l, m, ret;
393 int width = s->avctx->width;
394 int height = s->avctx->height;
396 s->keyframe = !(buf[0] & 1);
397 s->profile = (buf[0]>>1) & 7;
398 s->invisible = !(buf[0] & 0x10);
399 header_size = AV_RL24(buf) >> 5;
404 av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
407 memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
408 else // profile 1-3 use bilinear, 4+ aren't defined so whatever
409 memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_bilinear_pixels_tab, sizeof(s->put_pixels_tab));
411 if (header_size > buf_size - 7*s->keyframe) {
412 av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
413 return AVERROR_INVALIDDATA;
417 if (AV_RL24(buf) != 0x2a019d) {
418 av_log(s->avctx, AV_LOG_ERROR, "Invalid start code 0x%x\n", AV_RL24(buf));
419 return AVERROR_INVALIDDATA;
421 width = AV_RL16(buf+3) & 0x3fff;
422 height = AV_RL16(buf+5) & 0x3fff;
423 hscale = buf[4] >> 6;
424 vscale = buf[6] >> 6;
428 if (hscale || vscale)
429 av_log_missing_feature(s->avctx, "Upscaling", 1);
431 s->update_golden = s->update_altref = VP56_FRAME_CURRENT;
432 for (i = 0; i < 4; i++)
433 for (j = 0; j < 16; j++)
434 memcpy(s->prob->token[i][j], vp8_token_default_probs[i][vp8_coeff_band[j]],
435 sizeof(s->prob->token[i][j]));
436 memcpy(s->prob->pred16x16, vp8_pred16x16_prob_inter, sizeof(s->prob->pred16x16));
437 memcpy(s->prob->pred8x8c , vp8_pred8x8c_prob_inter , sizeof(s->prob->pred8x8c));
438 memcpy(s->prob->mvc , vp8_mv_default_prob , sizeof(s->prob->mvc));
439 memset(&s->segmentation, 0, sizeof(s->segmentation));
442 if (!s->macroblocks_base || /* first frame */
443 width != s->avctx->width || height != s->avctx->height) {
444 if ((ret = update_dimensions(s, width, height) < 0))
448 vp56_init_range_decoder(c, buf, header_size);
450 buf_size -= header_size;
454 av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
455 vp8_rac_get(c); // whether we can skip clamping in dsp functions
458 if ((s->segmentation.enabled = vp8_rac_get(c)))
459 parse_segment_info(s);
461 s->segmentation.update_map = 0; // FIXME: move this to some init function?
463 s->filter.simple = vp8_rac_get(c);
464 s->filter.level = vp8_rac_get_uint(c, 6);
465 s->filter.sharpness = vp8_rac_get_uint(c, 3);
467 if ((s->lf_delta.enabled = vp8_rac_get(c)))
471 if (setup_partitions(s, buf, buf_size)) {
472 av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
473 return AVERROR_INVALIDDATA;
480 s->sign_bias[VP56_FRAME_GOLDEN] = vp8_rac_get(c);
481 s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp8_rac_get(c);
484 // if we aren't saving this frame's probabilities for future frames,
485 // make a copy of the current probabilities
486 if (!(s->update_probabilities = vp8_rac_get(c)))
487 s->prob[1] = s->prob[0];
489 s->update_last = s->keyframe || vp8_rac_get(c);
491 for (i = 0; i < 4; i++)
492 for (j = 0; j < 8; j++)
493 for (k = 0; k < 3; k++)
494 for (l = 0; l < NUM_DCT_TOKENS-1; l++)
495 if (vp56_rac_get_prob_branchy(c, vp8_token_update_probs[i][j][k][l])) {
496 int prob = vp8_rac_get_uint(c, 8);
497 for (m = 0; m < 16; m++)
498 if (vp8_coeff_band[m] == j)
499 s->prob->token[i][m][k][l] = prob;
502 if ((s->mbskip_enabled = vp8_rac_get(c)))
503 s->prob->mbskip = vp8_rac_get_uint(c, 8);
506 s->prob->intra = vp8_rac_get_uint(c, 8);
507 s->prob->last = vp8_rac_get_uint(c, 8);
508 s->prob->golden = vp8_rac_get_uint(c, 8);
511 for (i = 0; i < 4; i++)
512 s->prob->pred16x16[i] = vp8_rac_get_uint(c, 8);
514 for (i = 0; i < 3; i++)
515 s->prob->pred8x8c[i] = vp8_rac_get_uint(c, 8);
517 // 17.2 MV probability update
518 for (i = 0; i < 2; i++)
519 for (j = 0; j < 19; j++)
520 if (vp56_rac_get_prob_branchy(c, vp8_mv_update_prob[i][j]))
521 s->prob->mvc[i][j] = vp8_rac_get_nn(c);
527 static av_always_inline
528 void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src, int mb_x, int mb_y)
530 #define MARGIN (16 << 2)
531 dst->x = av_clip(src->x, -((mb_x << 6) + MARGIN),
532 ((s->mb_width - 1 - mb_x) << 6) + MARGIN);
533 dst->y = av_clip(src->y, -((mb_y << 6) + MARGIN),
534 ((s->mb_height - 1 - mb_y) << 6) + MARGIN);
537 static av_always_inline
538 void find_near_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
539 VP56mv near[2], VP56mv *best, uint8_t cnt[4])
541 VP8Macroblock *mb_edge[3] = { mb + 2 /* top */,
543 mb + 1 /* top-left */ };
544 enum { EDGE_TOP, EDGE_LEFT, EDGE_TOPLEFT };
545 VP56mv near_mv[4] = {{ 0 }};
546 enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
548 int best_idx = CNT_ZERO;
549 int cur_sign_bias = s->sign_bias[mb->ref_frame];
550 int *sign_bias = s->sign_bias;
552 /* Process MB on top, left and top-left */
553 #define MV_EDGE_CHECK(n)\
555 VP8Macroblock *edge = mb_edge[n];\
556 int edge_ref = edge->ref_frame;\
557 if (edge_ref != VP56_FRAME_CURRENT) {\
558 uint32_t mv = AV_RN32A(&edge->mv);\
560 if (cur_sign_bias != sign_bias[edge_ref]) {\
561 /* SWAR negate of the values in mv. */\
563 mv = ((mv&0x7fff7fff) + 0x00010001) ^ (mv&0x80008000);\
565 if (!n || mv != AV_RN32A(&near_mv[idx]))\
566 AV_WN32A(&near_mv[++idx], mv);\
567 cnt[idx] += 1 + (n != 2);\
569 cnt[CNT_ZERO] += 1 + (n != 2);\
576 /* If we have three distinct MVs, merge first and last if they're the same */
577 if (cnt[CNT_SPLITMV] && AV_RN32A(&near_mv[1+EDGE_TOP]) == AV_RN32A(&near_mv[1+EDGE_TOPLEFT]))
578 cnt[CNT_NEAREST] += 1;
580 cnt[CNT_SPLITMV] = ((mb_edge[EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
581 (mb_edge[EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
582 (mb_edge[EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
584 /* Swap near and nearest if necessary */
585 if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
586 FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
587 FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
590 /* Choose the best mv out of 0,0 and the nearest mv */
591 if (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])
592 best_idx = CNT_NEAREST;
594 mb->mv = near_mv[best_idx];
595 near[0] = near_mv[CNT_NEAREST];
596 near[1] = near_mv[CNT_NEAR];
600 * Motion vector coding, 17.1.
602 static int read_mv_component(VP56RangeCoder *c, const uint8_t *p)
606 if (vp56_rac_get_prob_branchy(c, p[0])) {
609 for (i = 0; i < 3; i++)
610 x += vp56_rac_get_prob(c, p[9 + i]) << i;
611 for (i = 9; i > 3; i--)
612 x += vp56_rac_get_prob(c, p[9 + i]) << i;
613 if (!(x & 0xFFF0) || vp56_rac_get_prob(c, p[12]))
617 const uint8_t *ps = p+2;
618 bit = vp56_rac_get_prob(c, *ps);
621 bit = vp56_rac_get_prob(c, *ps);
624 x += vp56_rac_get_prob(c, *ps);
627 return (x && vp56_rac_get_prob(c, p[1])) ? -x : x;
630 static av_always_inline
631 const uint8_t *get_submv_prob(uint32_t left, uint32_t top)
634 return vp8_submv_prob[4-!!left];
636 return vp8_submv_prob[2];
637 return vp8_submv_prob[1-!!left];
641 * Split motion vector prediction, 16.4.
642 * @returns the number of motion vectors parsed (2, 4 or 16)
644 static av_always_inline
645 int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb)
647 int part_idx = mb->partitioning =
648 vp8_rac_get_tree(c, vp8_mbsplit_tree, vp8_mbsplit_prob);
649 int n, num = vp8_mbsplit_count[part_idx];
650 VP8Macroblock *top_mb = &mb[2];
651 VP8Macroblock *left_mb = &mb[-1];
652 const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning],
653 *mbsplits_top = vp8_mbsplits[top_mb->partitioning],
654 *mbsplits_cur = vp8_mbsplits[part_idx],
655 *firstidx = vp8_mbfirstidx[part_idx];
656 VP56mv *top_mv = top_mb->bmv;
657 VP56mv *left_mv = left_mb->bmv;
658 VP56mv *cur_mv = mb->bmv;
660 for (n = 0; n < num; n++) {
662 uint32_t left, above;
663 const uint8_t *submv_prob;
666 left = AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
668 left = AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
670 above = AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
672 above = AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
674 submv_prob = get_submv_prob(left, above);
676 switch (vp8_rac_get_tree(c, vp8_submv_ref_tree, submv_prob)) {
677 case VP8_SUBMVMODE_NEW4X4:
678 mb->bmv[n].y = mb->mv.y + read_mv_component(c, s->prob->mvc[0]);
679 mb->bmv[n].x = mb->mv.x + read_mv_component(c, s->prob->mvc[1]);
681 case VP8_SUBMVMODE_ZERO4X4:
682 AV_ZERO32(&mb->bmv[n]);
684 case VP8_SUBMVMODE_LEFT4X4:
685 AV_WN32A(&mb->bmv[n], left);
687 case VP8_SUBMVMODE_TOP4X4:
688 AV_WN32A(&mb->bmv[n], above);
696 static av_always_inline
697 void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c,
698 int mb_x, int keyframe)
700 uint8_t *intra4x4 = s->intra4x4_pred_mode_mb;
703 uint8_t* const top = s->intra4x4_pred_mode_top + 4 * mb_x;
704 uint8_t* const left = s->intra4x4_pred_mode_left;
705 for (y = 0; y < 4; y++) {
706 for (x = 0; x < 4; x++) {
708 ctx = vp8_pred4x4_prob_intra[top[x]][left[y]];
709 *intra4x4 = vp8_rac_get_tree(c, vp8_pred4x4_tree, ctx);
710 left[y] = top[x] = *intra4x4;
716 for (i = 0; i < 16; i++)
717 intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree, vp8_pred4x4_prob_inter);
721 static av_always_inline
722 void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment)
724 VP56RangeCoder *c = &s->c;
726 if (s->segmentation.update_map)
727 *segment = vp8_rac_get_tree(c, vp8_segmentid_tree, s->prob->segmentid);
728 s->segment = *segment;
730 mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0;
733 mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_intra, vp8_pred16x16_prob_intra);
735 if (mb->mode == MODE_I4x4) {
736 decode_intra4x4_modes(s, c, mb_x, 1);
738 const uint32_t modes = vp8_pred4x4_mode[mb->mode] * 0x01010101u;
739 AV_WN32A(s->intra4x4_pred_mode_top + 4 * mb_x, modes);
740 AV_WN32A(s->intra4x4_pred_mode_left, modes);
743 s->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, vp8_pred8x8c_prob_intra);
744 mb->ref_frame = VP56_FRAME_CURRENT;
745 } else if (vp56_rac_get_prob_branchy(c, s->prob->intra)) {
746 VP56mv near[2], best;
747 uint8_t cnt[4] = { 0 };
750 if (vp56_rac_get_prob_branchy(c, s->prob->last))
751 mb->ref_frame = vp56_rac_get_prob(c, s->prob->golden) ?
752 VP56_FRAME_GOLDEN2 /* altref */ : VP56_FRAME_GOLDEN;
754 mb->ref_frame = VP56_FRAME_PREVIOUS;
755 s->ref_count[mb->ref_frame-1]++;
757 // motion vectors, 16.3
758 find_near_mvs(s, mb, mb_x, mb_y, near, &best, cnt);
759 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[0]][0])) {
760 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[1]][1])) {
761 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[2]][2])) {
762 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[3]][3])) {
763 mb->mode = VP8_MVMODE_SPLIT;
764 clamp_mv(s, &mb->mv, &mb->mv, mb_x, mb_y);
765 mb->mv = mb->bmv[decode_splitmvs(s, c, mb) - 1];
767 mb->mode = VP8_MVMODE_NEW;
768 clamp_mv(s, &mb->mv, &mb->mv, mb_x, mb_y);
769 mb->mv.y += + read_mv_component(c, s->prob->mvc[0]);
770 mb->mv.x += + read_mv_component(c, s->prob->mvc[1]);
773 mb->mode = VP8_MVMODE_NEAR;
774 clamp_mv(s, &mb->mv, &near[1], mb_x, mb_y);
777 mb->mode = VP8_MVMODE_NEAREST;
778 clamp_mv(s, &mb->mv, &near[0], mb_x, mb_y);
781 mb->mode = VP8_MVMODE_ZERO;
784 if (mb->mode != VP8_MVMODE_SPLIT) {
785 mb->partitioning = VP8_SPLITMVMODE_NONE;
790 mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_inter, s->prob->pred16x16);
792 if (mb->mode == MODE_I4x4)
793 decode_intra4x4_modes(s, c, mb_x, 0);
795 s->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, s->prob->pred8x8c);
796 mb->ref_frame = VP56_FRAME_CURRENT;
797 mb->partitioning = VP8_SPLITMVMODE_NONE;
798 AV_ZERO32(&mb->bmv[0]);
803 * @param c arithmetic bitstream reader context
804 * @param block destination for block coefficients
805 * @param probs probabilities to use when reading trees from the bitstream
806 * @param i initial coeff index, 0 unless a separate DC block is coded
807 * @param zero_nhood the initial prediction context for number of surrounding
808 * all-zero blocks (only left/top, so 0-2)
809 * @param qmul array holding the dc/ac dequant factor at position 0/1
810 * @return 0 if no coeffs were decoded
811 * otherwise, the index of the last coeff decoded plus one
813 static int decode_block_coeffs(VP56RangeCoder *c, DCTELEM block[16],
814 uint8_t probs[8][3][NUM_DCT_TOKENS-1],
815 int i, int zero_nhood, int16_t qmul[2])
817 uint8_t *token_prob = probs[i][zero_nhood];
822 if (!vp56_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
826 if (!vp56_rac_get_prob_branchy(c, token_prob[1])) { // DCT_0
828 return nonzero; // invalid input; blocks should end with EOB
829 token_prob = probs[i][0];
833 if (!vp56_rac_get_prob_branchy(c, token_prob[2])) { // DCT_1
835 token_prob = probs[i+1][1];
837 if (!vp56_rac_get_prob_branchy(c, token_prob[3])) { // DCT 2,3,4
838 coeff = vp56_rac_get_prob(c, token_prob[4]);
840 coeff += vp56_rac_get_prob(c, token_prob[5]);
844 if (!vp56_rac_get_prob_branchy(c, token_prob[6])) {
845 if (!vp56_rac_get_prob_branchy(c, token_prob[7])) { // DCT_CAT1
846 coeff = 5 + vp56_rac_get_prob(c, vp8_dct_cat1_prob[0]);
849 coeff += vp56_rac_get_prob(c, vp8_dct_cat2_prob[0]) << 1;
850 coeff += vp56_rac_get_prob(c, vp8_dct_cat2_prob[1]);
852 } else { // DCT_CAT3 and up
853 int a = vp56_rac_get_prob(c, token_prob[8]);
854 int b = vp56_rac_get_prob(c, token_prob[9+a]);
855 int cat = (a<<1) + b;
856 coeff = 3 + (8<<cat);
857 coeff += vp8_rac_get_coeff(c, vp8_dct_cat_prob[cat]);
860 token_prob = probs[i+1][2];
863 // todo: full [16] qmat? load into register?
864 block[zigzag_scan[i]] = (vp8_rac_get(c) ? -coeff : coeff) * qmul[!!i];
871 static av_always_inline
872 void decode_mb_coeffs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb,
873 uint8_t t_nnz[9], uint8_t l_nnz[9])
875 int i, x, y, luma_start = 0, luma_ctx = 3;
876 int nnz_pred, nnz, nnz_total = 0;
877 int segment = s->segment;
880 if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
881 nnz_pred = t_nnz[8] + l_nnz[8];
883 // decode DC values and do hadamard
884 nnz = decode_block_coeffs(c, s->block_dc, s->prob->token[1], 0, nnz_pred,
885 s->qmat[segment].luma_dc_qmul);
886 l_nnz[8] = t_nnz[8] = !!nnz;
891 s->vp8dsp.vp8_luma_dc_wht_dc(s->block, s->block_dc);
893 s->vp8dsp.vp8_luma_dc_wht(s->block, s->block_dc);
900 for (y = 0; y < 4; y++)
901 for (x = 0; x < 4; x++) {
902 nnz_pred = l_nnz[y] + t_nnz[x];
903 nnz = decode_block_coeffs(c, s->block[y][x], s->prob->token[luma_ctx], luma_start,
904 nnz_pred, s->qmat[segment].luma_qmul);
905 // nnz+block_dc may be one more than the actual last index, but we don't care
906 s->non_zero_count_cache[y][x] = nnz + block_dc;
907 t_nnz[x] = l_nnz[y] = !!nnz;
912 // TODO: what to do about dimensions? 2nd dim for luma is x,
913 // but for chroma it's (y<<1)|x
914 for (i = 4; i < 6; i++)
915 for (y = 0; y < 2; y++)
916 for (x = 0; x < 2; x++) {
917 nnz_pred = l_nnz[i+2*y] + t_nnz[i+2*x];
918 nnz = decode_block_coeffs(c, s->block[i][(y<<1)+x], s->prob->token[2], 0,
919 nnz_pred, s->qmat[segment].chroma_qmul);
920 s->non_zero_count_cache[i][(y<<1)+x] = nnz;
921 t_nnz[i+2*x] = l_nnz[i+2*y] = !!nnz;
925 // if there were no coded coeffs despite the macroblock not being marked skip,
926 // we MUST not do the inner loop filter and should not do IDCT
927 // Since skip isn't used for bitstream prediction, just manually set it.
932 static av_always_inline
933 void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr,
934 int linesize, int uvlinesize, int simple)
936 AV_COPY128(top_border, src_y + 15*linesize);
938 AV_COPY64(top_border+16, src_cb + 7*uvlinesize);
939 AV_COPY64(top_border+24, src_cr + 7*uvlinesize);
943 static av_always_inline
944 void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr,
945 int linesize, int uvlinesize, int mb_x, int mb_y, int mb_width,
946 int simple, int xchg)
948 uint8_t *top_border_m1 = top_border-32; // for TL prediction
950 src_cb -= uvlinesize;
951 src_cr -= uvlinesize;
953 #define XCHG(a,b,xchg) do { \
954 if (xchg) AV_SWAP64(b,a); \
955 else AV_COPY64(b,a); \
958 XCHG(top_border_m1+8, src_y-8, xchg);
959 XCHG(top_border, src_y, xchg);
960 XCHG(top_border+8, src_y+8, 1);
961 if (mb_x < mb_width-1)
962 XCHG(top_border+32, src_y+16, 1);
964 // only copy chroma for normal loop filter
965 // or to initialize the top row to 127
966 if (!simple || !mb_y) {
967 XCHG(top_border_m1+16, src_cb-8, xchg);
968 XCHG(top_border_m1+24, src_cr-8, xchg);
969 XCHG(top_border+16, src_cb, 1);
970 XCHG(top_border+24, src_cr, 1);
974 static av_always_inline
975 int check_intra_pred_mode(int mode, int mb_x, int mb_y)
977 if (mode == DC_PRED8x8) {
979 mode = mb_y ? TOP_DC_PRED8x8 : DC_128_PRED8x8;
981 mode = LEFT_DC_PRED8x8;
987 static av_always_inline
988 void intra_predict(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb,
991 int x, y, mode, nnz, tr;
993 // for the first row, we need to run xchg_mb_border to init the top edge to 127
994 // otherwise, skip it if we aren't going to deblock
995 if (s->deblock_filter || !mb_y)
996 xchg_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2],
997 s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
998 s->filter.simple, 1);
1000 if (mb->mode < MODE_I4x4) {
1001 mode = check_intra_pred_mode(mb->mode, mb_x, mb_y);
1002 s->hpc.pred16x16[mode](dst[0], s->linesize);
1004 uint8_t *ptr = dst[0];
1005 uint8_t *intra4x4 = s->intra4x4_pred_mode_mb;
1007 // all blocks on the right edge of the macroblock use bottom edge
1008 // the top macroblock for their topright edge
1009 uint8_t *tr_right = ptr - s->linesize + 16;
1011 // if we're on the right edge of the frame, said edge is extended
1012 // from the top macroblock
1013 if (mb_x == s->mb_width-1) {
1014 tr = tr_right[-1]*0x01010101;
1015 tr_right = (uint8_t *)&tr;
1019 AV_ZERO128(s->non_zero_count_cache);
1021 for (y = 0; y < 4; y++) {
1022 uint8_t *topright = ptr + 4 - s->linesize;
1023 for (x = 0; x < 4; x++) {
1025 topright = tr_right;
1027 s->hpc.pred4x4[intra4x4[x]](ptr+4*x, topright, s->linesize);
1029 nnz = s->non_zero_count_cache[y][x];
1032 s->vp8dsp.vp8_idct_dc_add(ptr+4*x, s->block[y][x], s->linesize);
1034 s->vp8dsp.vp8_idct_add(ptr+4*x, s->block[y][x], s->linesize);
1039 ptr += 4*s->linesize;
1044 mode = check_intra_pred_mode(s->chroma_pred_mode, mb_x, mb_y);
1045 s->hpc.pred8x8[mode](dst[1], s->uvlinesize);
1046 s->hpc.pred8x8[mode](dst[2], s->uvlinesize);
1048 if (s->deblock_filter || !mb_y)
1049 xchg_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2],
1050 s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1051 s->filter.simple, 0);
1055 * Generic MC function.
1057 * @param s VP8 decoding context
1058 * @param luma 1 for luma (Y) planes, 0 for chroma (Cb/Cr) planes
1059 * @param dst target buffer for block data at block position
1060 * @param src reference picture buffer at origin (0, 0)
1061 * @param mv motion vector (relative to block position) to get pixel data from
1062 * @param x_off horizontal position of block from origin (0, 0)
1063 * @param y_off vertical position of block from origin (0, 0)
1064 * @param block_w width of block (16, 8 or 4)
1065 * @param block_h height of block (always same as block_w)
1066 * @param width width of src/dst plane data
1067 * @param height height of src/dst plane data
1068 * @param linesize size of a single line of plane data, including padding
1069 * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1071 static av_always_inline
1072 void vp8_mc(VP8Context *s, int luma,
1073 uint8_t *dst, uint8_t *src, const VP56mv *mv,
1074 int x_off, int y_off, int block_w, int block_h,
1075 int width, int height, int linesize,
1076 vp8_mc_func mc_func[3][3])
1079 static const uint8_t idx[8] = { 0, 1, 2, 1, 2, 1, 2, 1 };
1080 int mx = (mv->x << luma)&7, mx_idx = idx[mx];
1081 int my = (mv->y << luma)&7, my_idx = idx[my];
1083 x_off += mv->x >> (3 - luma);
1084 y_off += mv->y >> (3 - luma);
1087 src += y_off * linesize + x_off;
1088 if (x_off < 2 || x_off >= width - block_w - 3 ||
1089 y_off < 2 || y_off >= height - block_h - 3) {
1090 ff_emulated_edge_mc(s->edge_emu_buffer, src - 2 * linesize - 2, linesize,
1091 block_w + 5, block_h + 5,
1092 x_off - 2, y_off - 2, width, height);
1093 src = s->edge_emu_buffer + 2 + linesize * 2;
1095 mc_func[my_idx][mx_idx](dst, linesize, src, linesize, block_h, mx, my);
1097 mc_func[0][0](dst, linesize, src + y_off * linesize + x_off, linesize, block_h, 0, 0);
1100 static av_always_inline
1101 void vp8_mc_part(VP8Context *s, uint8_t *dst[3],
1102 AVFrame *ref_frame, int x_off, int y_off,
1103 int bx_off, int by_off,
1104 int block_w, int block_h,
1105 int width, int height, VP56mv *mv)
1110 vp8_mc(s, 1, dst[0] + by_off * s->linesize + bx_off,
1111 ref_frame->data[0], mv, x_off + bx_off, y_off + by_off,
1112 block_w, block_h, width, height, s->linesize,
1113 s->put_pixels_tab[block_w == 8]);
1116 if (s->profile == 3) {
1120 x_off >>= 1; y_off >>= 1;
1121 bx_off >>= 1; by_off >>= 1;
1122 width >>= 1; height >>= 1;
1123 block_w >>= 1; block_h >>= 1;
1124 vp8_mc(s, 0, dst[1] + by_off * s->uvlinesize + bx_off,
1125 ref_frame->data[1], &uvmv, x_off + bx_off, y_off + by_off,
1126 block_w, block_h, width, height, s->uvlinesize,
1127 s->put_pixels_tab[1 + (block_w == 4)]);
1128 vp8_mc(s, 0, dst[2] + by_off * s->uvlinesize + bx_off,
1129 ref_frame->data[2], &uvmv, x_off + bx_off, y_off + by_off,
1130 block_w, block_h, width, height, s->uvlinesize,
1131 s->put_pixels_tab[1 + (block_w == 4)]);
1134 /* Fetch pixels for estimated mv 4 macroblocks ahead.
1135 * Optimized for 64-byte cache lines. Inspired by ffh264 prefetch_motion. */
1136 static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
1138 /* Don't prefetch refs that haven't been used very often this frame. */
1139 if (s->ref_count[ref-1] > (mb_xy >> 5)) {
1140 int x_off = mb_x << 4, y_off = mb_y << 4;
1141 int mx = mb->mv.x + x_off + 8;
1142 int my = mb->mv.y + y_off;
1143 uint8_t **src= s->framep[ref]->data;
1144 int off= mx + (my + (mb_x&3)*4)*s->linesize + 64;
1145 s->dsp.prefetch(src[0]+off, s->linesize, 4);
1146 off= (mx>>1) + ((my>>1) + (mb_x&7))*s->uvlinesize + 64;
1147 s->dsp.prefetch(src[1]+off, src[2]-src[1], 2);
1152 * Apply motion vectors to prediction buffer, chapter 18.
1154 static av_always_inline
1155 void inter_predict(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb,
1158 int x_off = mb_x << 4, y_off = mb_y << 4;
1159 int width = 16*s->mb_width, height = 16*s->mb_height;
1160 AVFrame *ref = s->framep[mb->ref_frame];
1161 VP56mv *bmv = mb->bmv;
1163 if (mb->mode < VP8_MVMODE_SPLIT) {
1164 vp8_mc_part(s, dst, ref, x_off, y_off,
1165 0, 0, 16, 16, width, height, &mb->mv);
1166 } else switch (mb->partitioning) {
1167 case VP8_SPLITMVMODE_4x4: {
1172 for (y = 0; y < 4; y++) {
1173 for (x = 0; x < 4; x++) {
1174 vp8_mc(s, 1, dst[0] + 4*y*s->linesize + x*4,
1175 ref->data[0], &bmv[4*y + x],
1176 4*x + x_off, 4*y + y_off, 4, 4,
1177 width, height, s->linesize,
1178 s->put_pixels_tab[2]);
1183 x_off >>= 1; y_off >>= 1; width >>= 1; height >>= 1;
1184 for (y = 0; y < 2; y++) {
1185 for (x = 0; x < 2; x++) {
1186 uvmv.x = mb->bmv[ 2*y * 4 + 2*x ].x +
1187 mb->bmv[ 2*y * 4 + 2*x+1].x +
1188 mb->bmv[(2*y+1) * 4 + 2*x ].x +
1189 mb->bmv[(2*y+1) * 4 + 2*x+1].x;
1190 uvmv.y = mb->bmv[ 2*y * 4 + 2*x ].y +
1191 mb->bmv[ 2*y * 4 + 2*x+1].y +
1192 mb->bmv[(2*y+1) * 4 + 2*x ].y +
1193 mb->bmv[(2*y+1) * 4 + 2*x+1].y;
1194 uvmv.x = (uvmv.x + 2 + (uvmv.x >> (INT_BIT-1))) >> 2;
1195 uvmv.y = (uvmv.y + 2 + (uvmv.y >> (INT_BIT-1))) >> 2;
1196 if (s->profile == 3) {
1200 vp8_mc(s, 0, dst[1] + 4*y*s->uvlinesize + x*4,
1201 ref->data[1], &uvmv,
1202 4*x + x_off, 4*y + y_off, 4, 4,
1203 width, height, s->uvlinesize,
1204 s->put_pixels_tab[2]);
1205 vp8_mc(s, 0, dst[2] + 4*y*s->uvlinesize + x*4,
1206 ref->data[2], &uvmv,
1207 4*x + x_off, 4*y + y_off, 4, 4,
1208 width, height, s->uvlinesize,
1209 s->put_pixels_tab[2]);
1214 case VP8_SPLITMVMODE_16x8:
1215 vp8_mc_part(s, dst, ref, x_off, y_off,
1216 0, 0, 16, 8, width, height, &bmv[0]);
1217 vp8_mc_part(s, dst, ref, x_off, y_off,
1218 0, 8, 16, 8, width, height, &bmv[1]);
1220 case VP8_SPLITMVMODE_8x16:
1221 vp8_mc_part(s, dst, ref, x_off, y_off,
1222 0, 0, 8, 16, width, height, &bmv[0]);
1223 vp8_mc_part(s, dst, ref, x_off, y_off,
1224 8, 0, 8, 16, width, height, &bmv[1]);
1226 case VP8_SPLITMVMODE_8x8:
1227 vp8_mc_part(s, dst, ref, x_off, y_off,
1228 0, 0, 8, 8, width, height, &bmv[0]);
1229 vp8_mc_part(s, dst, ref, x_off, y_off,
1230 8, 0, 8, 8, width, height, &bmv[1]);
1231 vp8_mc_part(s, dst, ref, x_off, y_off,
1232 0, 8, 8, 8, width, height, &bmv[2]);
1233 vp8_mc_part(s, dst, ref, x_off, y_off,
1234 8, 8, 8, 8, width, height, &bmv[3]);
1239 static av_always_inline void idct_mb(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb)
1243 if (mb->mode != MODE_I4x4) {
1244 uint8_t *y_dst = dst[0];
1245 for (y = 0; y < 4; y++) {
1246 uint32_t nnz4 = AV_RN32A(s->non_zero_count_cache[y]);
1248 if (nnz4&~0x01010101) {
1249 for (x = 0; x < 4; x++) {
1250 int nnz = s->non_zero_count_cache[y][x];
1253 s->vp8dsp.vp8_idct_dc_add(y_dst+4*x, s->block[y][x], s->linesize);
1255 s->vp8dsp.vp8_idct_add(y_dst+4*x, s->block[y][x], s->linesize);
1259 s->vp8dsp.vp8_idct_dc_add4y(y_dst, s->block[y], s->linesize);
1262 y_dst += 4*s->linesize;
1266 for (ch = 0; ch < 2; ch++) {
1267 uint32_t nnz4 = AV_RN32A(s->non_zero_count_cache[4+ch]);
1269 uint8_t *ch_dst = dst[1+ch];
1270 if (nnz4&~0x01010101) {
1271 for (y = 0; y < 2; y++) {
1272 for (x = 0; x < 2; x++) {
1273 int nnz = s->non_zero_count_cache[4+ch][(y<<1)+x];
1276 s->vp8dsp.vp8_idct_dc_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize);
1278 s->vp8dsp.vp8_idct_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize);
1281 ch_dst += 4*s->uvlinesize;
1284 s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, s->block[4+ch], s->uvlinesize);
1290 static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f )
1292 int interior_limit, filter_level;
1294 if (s->segmentation.enabled) {
1295 filter_level = s->segmentation.filter_level[s->segment];
1296 if (!s->segmentation.absolute_vals)
1297 filter_level += s->filter.level;
1299 filter_level = s->filter.level;
1301 if (s->lf_delta.enabled) {
1302 filter_level += s->lf_delta.ref[mb->ref_frame];
1304 if (mb->ref_frame == VP56_FRAME_CURRENT) {
1305 if (mb->mode == MODE_I4x4)
1306 filter_level += s->lf_delta.mode[0];
1308 if (mb->mode == VP8_MVMODE_ZERO)
1309 filter_level += s->lf_delta.mode[1];
1310 else if (mb->mode == VP8_MVMODE_SPLIT)
1311 filter_level += s->lf_delta.mode[3];
1313 filter_level += s->lf_delta.mode[2];
1316 filter_level = av_clip(filter_level, 0, 63);
1318 interior_limit = filter_level;
1319 if (s->filter.sharpness) {
1320 interior_limit >>= s->filter.sharpness > 4 ? 2 : 1;
1321 interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
1323 interior_limit = FFMAX(interior_limit, 1);
1325 f->filter_level = filter_level;
1326 f->inner_limit = interior_limit;
1327 f->inner_filter = !mb->skip || mb->mode == MODE_I4x4 || mb->mode == VP8_MVMODE_SPLIT;
1330 static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y)
1332 int mbedge_lim, bedge_lim, hev_thresh;
1333 int filter_level = f->filter_level;
1334 int inner_limit = f->inner_limit;
1335 int inner_filter = f->inner_filter;
1336 int linesize = s->linesize;
1337 int uvlinesize = s->uvlinesize;
1342 mbedge_lim = 2*(filter_level+2) + inner_limit;
1343 bedge_lim = 2* filter_level + inner_limit;
1344 hev_thresh = filter_level >= 15;
1347 if (filter_level >= 40)
1350 if (filter_level >= 40)
1352 else if (filter_level >= 20)
1357 s->vp8dsp.vp8_h_loop_filter16y(dst[0], linesize,
1358 mbedge_lim, inner_limit, hev_thresh);
1359 s->vp8dsp.vp8_h_loop_filter8uv(dst[1], dst[2], uvlinesize,
1360 mbedge_lim, inner_limit, hev_thresh);
1364 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+ 4, linesize, bedge_lim,
1365 inner_limit, hev_thresh);
1366 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+ 8, linesize, bedge_lim,
1367 inner_limit, hev_thresh);
1368 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+12, linesize, bedge_lim,
1369 inner_limit, hev_thresh);
1370 s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4,
1371 uvlinesize, bedge_lim,
1372 inner_limit, hev_thresh);
1376 s->vp8dsp.vp8_v_loop_filter16y(dst[0], linesize,
1377 mbedge_lim, inner_limit, hev_thresh);
1378 s->vp8dsp.vp8_v_loop_filter8uv(dst[1], dst[2], uvlinesize,
1379 mbedge_lim, inner_limit, hev_thresh);
1383 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+ 4*linesize,
1384 linesize, bedge_lim,
1385 inner_limit, hev_thresh);
1386 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+ 8*linesize,
1387 linesize, bedge_lim,
1388 inner_limit, hev_thresh);
1389 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+12*linesize,
1390 linesize, bedge_lim,
1391 inner_limit, hev_thresh);
1392 s->vp8dsp.vp8_v_loop_filter8uv_inner(dst[1] + 4 * uvlinesize,
1393 dst[2] + 4 * uvlinesize,
1394 uvlinesize, bedge_lim,
1395 inner_limit, hev_thresh);
1399 static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
1401 int mbedge_lim, bedge_lim;
1402 int filter_level = f->filter_level;
1403 int inner_limit = f->inner_limit;
1404 int inner_filter = f->inner_filter;
1405 int linesize = s->linesize;
1410 mbedge_lim = 2*(filter_level+2) + inner_limit;
1411 bedge_lim = 2* filter_level + inner_limit;
1414 s->vp8dsp.vp8_h_loop_filter_simple(dst, linesize, mbedge_lim);
1416 s->vp8dsp.vp8_h_loop_filter_simple(dst+ 4, linesize, bedge_lim);
1417 s->vp8dsp.vp8_h_loop_filter_simple(dst+ 8, linesize, bedge_lim);
1418 s->vp8dsp.vp8_h_loop_filter_simple(dst+12, linesize, bedge_lim);
1422 s->vp8dsp.vp8_v_loop_filter_simple(dst, linesize, mbedge_lim);
1424 s->vp8dsp.vp8_v_loop_filter_simple(dst+ 4*linesize, linesize, bedge_lim);
1425 s->vp8dsp.vp8_v_loop_filter_simple(dst+ 8*linesize, linesize, bedge_lim);
1426 s->vp8dsp.vp8_v_loop_filter_simple(dst+12*linesize, linesize, bedge_lim);
1430 static void filter_mb_row(VP8Context *s, int mb_y)
1432 VP8FilterStrength *f = s->filter_strength;
1434 s->framep[VP56_FRAME_CURRENT]->data[0] + 16*mb_y*s->linesize,
1435 s->framep[VP56_FRAME_CURRENT]->data[1] + 8*mb_y*s->uvlinesize,
1436 s->framep[VP56_FRAME_CURRENT]->data[2] + 8*mb_y*s->uvlinesize
1440 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1441 backup_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2], s->linesize, s->uvlinesize, 0);
1442 filter_mb(s, dst, f++, mb_x, mb_y);
1449 static void filter_mb_row_simple(VP8Context *s, int mb_y)
1451 VP8FilterStrength *f = s->filter_strength;
1452 uint8_t *dst = s->framep[VP56_FRAME_CURRENT]->data[0] + 16*mb_y*s->linesize;
1455 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1456 backup_mb_border(s->top_border[mb_x+1], dst, NULL, NULL, s->linesize, 0, 1);
1457 filter_mb_simple(s, dst, f++, mb_x, mb_y);
1462 static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
1465 VP8Context *s = avctx->priv_data;
1466 int ret, mb_x, mb_y, i, y, referenced;
1467 enum AVDiscard skip_thresh;
1468 AVFrame *av_uninit(curframe);
1470 if ((ret = decode_frame_header(s, avpkt->data, avpkt->size)) < 0)
1473 referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT
1474 || s->update_altref == VP56_FRAME_CURRENT;
1476 skip_thresh = !referenced ? AVDISCARD_NONREF :
1477 !s->keyframe ? AVDISCARD_NONKEY : AVDISCARD_ALL;
1479 if (avctx->skip_frame >= skip_thresh) {
1483 s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
1485 for (i = 0; i < 4; i++)
1486 if (&s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
1487 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
1488 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
1489 curframe = s->framep[VP56_FRAME_CURRENT] = &s->frames[i];
1492 if (curframe->data[0])
1493 avctx->release_buffer(avctx, curframe);
1495 curframe->key_frame = s->keyframe;
1496 curframe->pict_type = s->keyframe ? FF_I_TYPE : FF_P_TYPE;
1497 curframe->reference = referenced ? 3 : 0;
1498 if ((ret = avctx->get_buffer(avctx, curframe))) {
1499 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed!\n");
1503 // Given that arithmetic probabilities are updated every frame, it's quite likely
1504 // that the values we have on a random interframe are complete junk if we didn't
1505 // start decode on a keyframe. So just don't display anything rather than junk.
1506 if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
1507 !s->framep[VP56_FRAME_GOLDEN] ||
1508 !s->framep[VP56_FRAME_GOLDEN2])) {
1509 av_log(avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n");
1510 return AVERROR_INVALIDDATA;
1513 s->linesize = curframe->linesize[0];
1514 s->uvlinesize = curframe->linesize[1];
1516 if (!s->edge_emu_buffer)
1517 s->edge_emu_buffer = av_malloc(21*s->linesize);
1519 memset(s->top_nnz, 0, s->mb_width*sizeof(*s->top_nnz));
1521 /* Zero macroblock structures for top/left prediction from outside the frame. */
1522 memset(s->macroblocks, 0, (s->mb_width + s->mb_height*2)*sizeof(*s->macroblocks));
1524 // top edge of 127 for intra prediction
1525 memset(s->top_border, 127, (s->mb_width+1)*sizeof(*s->top_border));
1526 memset(s->ref_count, 0, sizeof(s->ref_count));
1528 memset(s->intra4x4_pred_mode_top, DC_PRED, s->b4_stride*4);
1530 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1531 VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions-1)];
1532 VP8Macroblock *mb = s->macroblocks + (s->mb_height - mb_y - 1)*2;
1533 uint8_t *segment_map = s->segmentation_map + mb_y*s->mb_stride;
1534 int mb_xy = mb_y * s->mb_stride;
1536 curframe->data[0] + 16*mb_y*s->linesize,
1537 curframe->data[1] + 8*mb_y*s->uvlinesize,
1538 curframe->data[2] + 8*mb_y*s->uvlinesize
1541 memset(s->left_nnz, 0, sizeof(s->left_nnz));
1542 AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED*0x01010101);
1544 // left edge of 129 for intra prediction
1545 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE))
1546 for (i = 0; i < 3; i++)
1547 for (y = 0; y < 16>>!!i; y++)
1548 dst[i][y*curframe->linesize[i]-1] = 129;
1550 memset(s->top_border, 129, sizeof(*s->top_border));
1552 for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
1553 uint8_t *segment_mb = segment_map+mb_x;
1555 /* Prefetch the current frame, 4 MBs ahead */
1556 s->dsp.prefetch(dst[0] + (mb_x&3)*4*s->linesize + 64, s->linesize, 4);
1557 s->dsp.prefetch(dst[1] + (mb_x&7)*s->uvlinesize + 64, dst[2] - dst[1], 2);
1559 decode_mb_mode(s, mb, mb_x, mb_y, segment_mb);
1561 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
1564 decode_mb_coeffs(s, c, mb, s->top_nnz[mb_x], s->left_nnz);
1566 if (mb->mode <= MODE_I4x4)
1567 intra_predict(s, dst, mb, mb_x, mb_y);
1569 inter_predict(s, dst, mb, mb_x, mb_y);
1571 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
1574 idct_mb(s, dst, mb);
1576 AV_ZERO64(s->left_nnz);
1577 AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned
1579 // Reset DC block predictors if they would exist if the mb had coefficients
1580 if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
1582 s->top_nnz[mb_x][8] = 0;
1586 if (s->deblock_filter)
1587 filter_level_for_mb(s, mb, &s->filter_strength[mb_x]);
1589 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
1595 if (s->deblock_filter) {
1596 if (s->filter.simple)
1597 filter_mb_row_simple(s, mb_y);
1599 filter_mb_row(s, mb_y);
1604 // if future frames don't use the updated probabilities,
1605 // reset them to the values we saved
1606 if (!s->update_probabilities)
1607 s->prob[0] = s->prob[1];
1609 // check if golden and altref are swapped
1610 if (s->update_altref == VP56_FRAME_GOLDEN &&
1611 s->update_golden == VP56_FRAME_GOLDEN2)
1612 FFSWAP(AVFrame *, s->framep[VP56_FRAME_GOLDEN], s->framep[VP56_FRAME_GOLDEN2]);
1614 if (s->update_altref != VP56_FRAME_NONE)
1615 s->framep[VP56_FRAME_GOLDEN2] = s->framep[s->update_altref];
1617 if (s->update_golden != VP56_FRAME_NONE)
1618 s->framep[VP56_FRAME_GOLDEN] = s->framep[s->update_golden];
1621 if (s->update_last) // move cur->prev
1622 s->framep[VP56_FRAME_PREVIOUS] = s->framep[VP56_FRAME_CURRENT];
1624 // release no longer referenced frames
1625 for (i = 0; i < 4; i++)
1626 if (s->frames[i].data[0] &&
1627 &s->frames[i] != s->framep[VP56_FRAME_CURRENT] &&
1628 &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
1629 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
1630 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
1631 avctx->release_buffer(avctx, &s->frames[i]);
1633 if (!s->invisible) {
1634 *(AVFrame*)data = *s->framep[VP56_FRAME_CURRENT];
1635 *data_size = sizeof(AVFrame);
1641 static av_cold int vp8_decode_init(AVCodecContext *avctx)
1643 VP8Context *s = avctx->priv_data;
1646 avctx->pix_fmt = PIX_FMT_YUV420P;
1648 dsputil_init(&s->dsp, avctx);
1649 ff_h264_pred_init(&s->hpc, CODEC_ID_VP8);
1650 ff_vp8dsp_init(&s->vp8dsp);
1652 // intra pred needs edge emulation among other things
1653 if (avctx->flags&CODEC_FLAG_EMU_EDGE) {
1654 av_log(avctx, AV_LOG_ERROR, "Edge emulation not supported\n");
1655 return AVERROR_PATCHWELCOME;
1661 static av_cold int vp8_decode_free(AVCodecContext *avctx)
1663 vp8_decode_flush(avctx);
1667 AVCodec vp8_decoder = {
1677 .flush = vp8_decode_flush,
1678 .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),