2 * VP8 compatible video decoder
4 * Copyright (C) 2010 David Conrad
5 * Copyright (C) 2010 Ronald S. Bultje
6 * Copyright (C) 2010 Jason Garrett-Glaser
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
30 #include "rectangle.h"
40 // todo: make it possible to check for at least (i4x4 or split_mv)
41 // in one op. are others needed?
50 AVCodecContext *avctx;
54 vp8_mc_func put_pixels_tab[3][3][3];
57 uint8_t *edge_emu_buffer;
58 VP56RangeCoder c; ///< header context, includes mb modes and motion vectors
61 int mb_width; /* number of horizontal MB */
62 int mb_height; /* number of vertical MB */
68 int update_last; ///< update VP56_FRAME_PREVIOUS with the current one
69 int update_golden; ///< VP56_FRAME_NONE if not updated, or which frame to copy if so
74 * If this flag is not set, all the probability updates
75 * are discarded after this frame is decoded.
77 int update_probabilities;
80 * All coefficients are contained in separate arith coding contexts.
81 * There can be 1, 2, 4, or 8 of these after the header context.
83 int num_coeff_partitions;
84 VP56RangeCoder coeff_partition[8];
86 VP8Macroblock *macroblocks;
87 VP8Macroblock *macroblocks_base;
88 VP8FilterStrength *filter_strength;
91 uint8_t *intra4x4_pred_mode;
92 uint8_t *intra4x4_pred_mode_base;
93 uint8_t *segmentation_map;
97 * Cache of the top row needed for intra prediction
98 * 16 for luma, 8 for each chroma plane
100 uint8_t (*top_border)[16+8+8];
103 * For coeff decode, we need to know whether the above block had non-zero
104 * coefficients. This means for each macroblock, we need data for 4 luma
105 * blocks, 2 u blocks, 2 v blocks, and the luma dc block, for a total of 9
106 * per macroblock. We keep the last row in top_nnz.
108 uint8_t (*top_nnz)[9];
109 DECLARE_ALIGNED(8, uint8_t, left_nnz)[9];
112 * This is the index plus one of the last non-zero coeff
113 * for each of the blocks in the current macroblock.
115 * 1 -> dc-only (special transform)
116 * 2+-> full transform
118 DECLARE_ALIGNED(16, uint8_t, non_zero_count_cache)[6][4];
119 DECLARE_ALIGNED(16, DCTELEM, block)[6][4][16];
120 uint8_t intra4x4_pred_mode_mb[16];
122 int chroma_pred_mode; ///< 8x8c pred mode of the current macroblock
123 int segment; ///< segment of the current macroblock
126 uint8_t mbskip_proba;
129 uint8_t golden_proba;
130 int sign_bias[4]; ///< one state [0, 1] per ref frame type
134 * Base parameters for segmentation, i.e. per-macroblock parameters.
135 * These must be kept unchanged even if segmentation is not used for
136 * a frame, since the values persist between interframes.
142 int8_t base_quant[4];
143 int8_t filter_level[4]; ///< base loop filter level
147 * Macroblocks can have one of 4 different quants in a frame when
148 * segmentation is enabled.
149 * If segmentation is disabled, only the first segment's values are used.
152 // [0] - DC qmul [1] - AC qmul
153 int16_t luma_qmul[2];
154 int16_t luma_dc_qmul[2]; ///< luma dc-only block quant
155 int16_t chroma_qmul[2];
165 int enabled; ///< whether each mb can have a different strength based on mode/ref
168 * filter strength adjustment for the following macroblock modes:
171 * [2] - inter modes except for zero or split mv
173 * i16x16 modes never have any adjustment
178 * filter strength adjustment for macroblocks that reference:
179 * [0] - intra / VP56_FRAME_CURRENT
180 * [1] - VP56_FRAME_PREVIOUS
181 * [2] - VP56_FRAME_GOLDEN
182 * [3] - altref / VP56_FRAME_GOLDEN2
188 * These are all of the updatable probabilities for binary decisions.
189 * They are only implictly reset on keyframes, making it quite likely
190 * for an interframe to desync if a prior frame's header was corrupt
191 * or missing outright!
194 uint8_t segmentid[3];
195 uint8_t pred16x16[4];
197 uint8_t token[4][8][3][NUM_DCT_TOKENS-1];
202 static void vp8_decode_flush(AVCodecContext *avctx)
204 VP8Context *s = avctx->priv_data;
207 for (i = 0; i < 4; i++)
208 if (s->frames[i].data[0])
209 avctx->release_buffer(avctx, &s->frames[i]);
210 memset(s->framep, 0, sizeof(s->framep));
212 av_freep(&s->macroblocks_base);
213 av_freep(&s->filter_strength);
214 av_freep(&s->intra4x4_pred_mode_base);
215 av_freep(&s->top_nnz);
216 av_freep(&s->edge_emu_buffer);
217 av_freep(&s->top_border);
218 av_freep(&s->segmentation_map);
220 s->macroblocks = NULL;
221 s->intra4x4_pred_mode = NULL;
224 static int update_dimensions(VP8Context *s, int width, int height)
228 if (avcodec_check_dimensions(s->avctx, width, height))
229 return AVERROR_INVALIDDATA;
231 vp8_decode_flush(s->avctx);
233 avcodec_set_dimensions(s->avctx, width, height);
235 s->mb_width = (s->avctx->coded_width +15) / 16;
236 s->mb_height = (s->avctx->coded_height+15) / 16;
238 // we allocate a border around the top/left of intra4x4 modes
239 // this is 4 blocks for intra4x4 to keep 4-byte alignment for fill_rectangle
240 s->mb_stride = s->mb_width+1;
241 s->b4_stride = 4*s->mb_stride;
243 s->macroblocks_base = av_mallocz((s->mb_stride+s->mb_height*2+2)*sizeof(*s->macroblocks));
244 s->filter_strength = av_mallocz(s->mb_stride*sizeof(*s->filter_strength));
245 s->intra4x4_pred_mode_base = av_mallocz(s->b4_stride*(4*s->mb_height+1));
246 s->top_nnz = av_mallocz(s->mb_width*sizeof(*s->top_nnz));
247 s->top_border = av_mallocz((s->mb_width+1)*sizeof(*s->top_border));
248 s->segmentation_map = av_mallocz(s->mb_stride*s->mb_height);
250 if (!s->macroblocks_base || !s->filter_strength || !s->intra4x4_pred_mode_base ||
251 !s->top_nnz || !s->top_border || !s->segmentation_map)
252 return AVERROR(ENOMEM);
254 s->macroblocks = s->macroblocks_base + 1;
255 s->intra4x4_pred_mode = s->intra4x4_pred_mode_base + 4 + s->b4_stride;
257 memset(s->intra4x4_pred_mode_base, DC_PRED, s->b4_stride);
258 for (i = 0; i < 4*s->mb_height; i++)
259 s->intra4x4_pred_mode[i*s->b4_stride-1] = DC_PRED;
264 static void parse_segment_info(VP8Context *s)
266 VP56RangeCoder *c = &s->c;
269 s->segmentation.update_map = vp8_rac_get(c);
271 if (vp8_rac_get(c)) { // update segment feature data
272 s->segmentation.absolute_vals = vp8_rac_get(c);
274 for (i = 0; i < 4; i++)
275 s->segmentation.base_quant[i] = vp8_rac_get_sint(c, 7);
277 for (i = 0; i < 4; i++)
278 s->segmentation.filter_level[i] = vp8_rac_get_sint(c, 6);
280 if (s->segmentation.update_map)
281 for (i = 0; i < 3; i++)
282 s->prob->segmentid[i] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
285 static void update_lf_deltas(VP8Context *s)
287 VP56RangeCoder *c = &s->c;
290 for (i = 0; i < 4; i++)
291 s->lf_delta.ref[i] = vp8_rac_get_sint(c, 6);
293 for (i = 0; i < 4; i++)
294 s->lf_delta.mode[i] = vp8_rac_get_sint(c, 6);
297 static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
299 const uint8_t *sizes = buf;
302 s->num_coeff_partitions = 1 << vp8_rac_get_uint(&s->c, 2);
304 buf += 3*(s->num_coeff_partitions-1);
305 buf_size -= 3*(s->num_coeff_partitions-1);
309 for (i = 0; i < s->num_coeff_partitions-1; i++) {
310 int size = AV_RL24(sizes + 3*i);
311 if (buf_size - size < 0)
314 vp56_init_range_decoder(&s->coeff_partition[i], buf, size);
318 vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
323 static void get_quants(VP8Context *s)
325 VP56RangeCoder *c = &s->c;
328 int yac_qi = vp8_rac_get_uint(c, 7);
329 int ydc_delta = vp8_rac_get_sint(c, 4);
330 int y2dc_delta = vp8_rac_get_sint(c, 4);
331 int y2ac_delta = vp8_rac_get_sint(c, 4);
332 int uvdc_delta = vp8_rac_get_sint(c, 4);
333 int uvac_delta = vp8_rac_get_sint(c, 4);
335 for (i = 0; i < 4; i++) {
336 if (s->segmentation.enabled) {
337 base_qi = s->segmentation.base_quant[i];
338 if (!s->segmentation.absolute_vals)
343 s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip(base_qi + ydc_delta , 0, 127)];
344 s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip(base_qi , 0, 127)];
345 s->qmat[i].luma_dc_qmul[0] = 2 * vp8_dc_qlookup[av_clip(base_qi + y2dc_delta, 0, 127)];
346 s->qmat[i].luma_dc_qmul[1] = 155 * vp8_ac_qlookup[av_clip(base_qi + y2ac_delta, 2, 127)] / 100;
347 s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip(base_qi + uvdc_delta, 0, 117)];
348 s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip(base_qi + uvac_delta, 0, 127)];
353 * Determine which buffers golden and altref should be updated with after this frame.
354 * The spec isn't clear here, so I'm going by my understanding of what libvpx does
356 * Intra frames update all 3 references
357 * Inter frames update VP56_FRAME_PREVIOUS if the update_last flag is set
358 * If the update (golden|altref) flag is set, it's updated with the current frame
359 * if update_last is set, and VP56_FRAME_PREVIOUS otherwise.
360 * If the flag is not set, the number read means:
362 * 1: VP56_FRAME_PREVIOUS
363 * 2: update golden with altref, or update altref with golden
365 static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
367 VP56RangeCoder *c = &s->c;
370 return VP56_FRAME_CURRENT;
372 switch (vp8_rac_get_uint(c, 2)) {
374 return VP56_FRAME_PREVIOUS;
376 return (ref == VP56_FRAME_GOLDEN) ? VP56_FRAME_GOLDEN2 : VP56_FRAME_GOLDEN;
378 return VP56_FRAME_NONE;
381 static void update_refs(VP8Context *s)
383 VP56RangeCoder *c = &s->c;
385 int update_golden = vp8_rac_get(c);
386 int update_altref = vp8_rac_get(c);
388 s->update_golden = ref_to_update(s, update_golden, VP56_FRAME_GOLDEN);
389 s->update_altref = ref_to_update(s, update_altref, VP56_FRAME_GOLDEN2);
392 static int decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
394 VP56RangeCoder *c = &s->c;
395 int header_size, hscale, vscale, i, j, k, l, ret;
396 int width = s->avctx->width;
397 int height = s->avctx->height;
399 s->keyframe = !(buf[0] & 1);
400 s->profile = (buf[0]>>1) & 7;
401 s->invisible = !(buf[0] & 0x10);
402 header_size = AV_RL24(buf) >> 5;
407 av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
410 memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
411 else // profile 1-3 use bilinear, 4+ aren't defined so whatever
412 memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_bilinear_pixels_tab, sizeof(s->put_pixels_tab));
414 if (header_size > buf_size - 7*s->keyframe) {
415 av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
416 return AVERROR_INVALIDDATA;
420 if (AV_RL24(buf) != 0x2a019d) {
421 av_log(s->avctx, AV_LOG_ERROR, "Invalid start code 0x%x\n", AV_RL24(buf));
422 return AVERROR_INVALIDDATA;
424 width = AV_RL16(buf+3) & 0x3fff;
425 height = AV_RL16(buf+5) & 0x3fff;
426 hscale = buf[4] >> 6;
427 vscale = buf[6] >> 6;
431 if (hscale || vscale)
432 av_log_missing_feature(s->avctx, "Upscaling", 1);
434 s->update_golden = s->update_altref = VP56_FRAME_CURRENT;
435 memcpy(s->prob->token , vp8_token_default_probs , sizeof(s->prob->token));
436 memcpy(s->prob->pred16x16, vp8_pred16x16_prob_inter, sizeof(s->prob->pred16x16));
437 memcpy(s->prob->pred8x8c , vp8_pred8x8c_prob_inter , sizeof(s->prob->pred8x8c));
438 memcpy(s->prob->mvc , vp8_mv_default_prob , sizeof(s->prob->mvc));
439 memset(&s->segmentation, 0, sizeof(s->segmentation));
442 if (!s->macroblocks_base || /* first frame */
443 width != s->avctx->width || height != s->avctx->height) {
444 if ((ret = update_dimensions(s, width, height) < 0))
448 vp56_init_range_decoder(c, buf, header_size);
450 buf_size -= header_size;
454 av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
455 vp8_rac_get(c); // whether we can skip clamping in dsp functions
458 if ((s->segmentation.enabled = vp8_rac_get(c)))
459 parse_segment_info(s);
461 s->segmentation.update_map = 0; // FIXME: move this to some init function?
463 s->filter.simple = vp8_rac_get(c);
464 s->filter.level = vp8_rac_get_uint(c, 6);
465 s->filter.sharpness = vp8_rac_get_uint(c, 3);
467 if ((s->lf_delta.enabled = vp8_rac_get(c)))
471 if (setup_partitions(s, buf, buf_size)) {
472 av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
473 return AVERROR_INVALIDDATA;
480 s->sign_bias[VP56_FRAME_GOLDEN] = vp8_rac_get(c);
481 s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp8_rac_get(c);
484 // if we aren't saving this frame's probabilities for future frames,
485 // make a copy of the current probabilities
486 if (!(s->update_probabilities = vp8_rac_get(c)))
487 s->prob[1] = s->prob[0];
489 s->update_last = s->keyframe || vp8_rac_get(c);
491 for (i = 0; i < 4; i++)
492 for (j = 0; j < 8; j++)
493 for (k = 0; k < 3; k++)
494 for (l = 0; l < NUM_DCT_TOKENS-1; l++)
495 if (vp56_rac_get_prob_branchy(c, vp8_token_update_probs[i][j][k][l]))
496 s->prob->token[i][j][k][l] = vp8_rac_get_uint(c, 8);
498 if ((s->mbskip_enabled = vp8_rac_get(c)))
499 s->mbskip_proba = vp8_rac_get_uint(c, 8);
502 s->intra_proba = vp8_rac_get_uint(c, 8);
503 s->last_proba = vp8_rac_get_uint(c, 8);
504 s->golden_proba = vp8_rac_get_uint(c, 8);
507 for (i = 0; i < 4; i++)
508 s->prob->pred16x16[i] = vp8_rac_get_uint(c, 8);
510 for (i = 0; i < 3; i++)
511 s->prob->pred8x8c[i] = vp8_rac_get_uint(c, 8);
513 // 17.2 MV probability update
514 for (i = 0; i < 2; i++)
515 for (j = 0; j < 19; j++)
516 if (vp56_rac_get_prob_branchy(c, vp8_mv_update_prob[i][j]))
517 s->prob->mvc[i][j] = vp8_rac_get_nn(c);
523 static av_always_inline
524 void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src, int mb_x, int mb_y)
526 #define MARGIN (16 << 2)
527 dst->x = av_clip(src->x, -((mb_x << 6) + MARGIN),
528 ((s->mb_width - 1 - mb_x) << 6) + MARGIN);
529 dst->y = av_clip(src->y, -((mb_y << 6) + MARGIN),
530 ((s->mb_height - 1 - mb_y) << 6) + MARGIN);
533 static av_always_inline
534 void find_near_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
535 VP56mv near[2], VP56mv *best, uint8_t cnt[4])
537 VP8Macroblock *mb_edge[3] = { mb + 2 /* top */,
539 mb + 1 /* top-left */ };
540 enum { EDGE_TOP, EDGE_LEFT, EDGE_TOPLEFT };
541 VP56mv near_mv[4] = {{ 0 }};
542 enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
544 int best_idx = CNT_ZERO;
545 int cur_sign_bias = s->sign_bias[mb->ref_frame];
546 int *sign_bias = s->sign_bias;
548 /* Process MB on top, left and top-left */
549 #define MV_EDGE_CHECK(n)\
551 VP8Macroblock *edge = mb_edge[n];\
552 int edge_ref = edge->ref_frame;\
553 if (edge_ref != VP56_FRAME_CURRENT) {\
554 uint32_t mv = AV_RN32A(&edge->mv);\
556 if (cur_sign_bias != sign_bias[edge_ref]) {\
557 /* SWAR negate of the values in mv. */\
559 mv = ((mv&0x7fff7fff) + 0x00010001) ^ (mv&0x80008000);\
561 if (!n || mv != AV_RN32A(&near_mv[idx]))\
562 AV_WN32A(&near_mv[++idx], mv);\
563 cnt[idx] += 1 + (n != 2);\
565 cnt[CNT_ZERO] += 1 + (n != 2);\
572 /* If we have three distinct MVs, merge first and last if they're the same */
573 if (cnt[CNT_SPLITMV] && AV_RN32A(&near_mv[1+EDGE_TOP]) == AV_RN32A(&near_mv[1+EDGE_TOPLEFT]))
574 cnt[CNT_NEAREST] += 1;
576 cnt[CNT_SPLITMV] = ((mb_edge[EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
577 (mb_edge[EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
578 (mb_edge[EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
580 /* Swap near and nearest if necessary */
581 if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
582 FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
583 FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
586 /* Choose the best mv out of 0,0 and the nearest mv */
587 if (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])
588 best_idx = CNT_NEAREST;
590 mb->mv = near_mv[best_idx];
591 near[0] = near_mv[CNT_NEAREST];
592 near[1] = near_mv[CNT_NEAR];
596 * Motion vector coding, 17.1.
598 static int read_mv_component(VP56RangeCoder *c, const uint8_t *p)
602 if (vp56_rac_get_prob_branchy(c, p[0])) {
605 for (i = 0; i < 3; i++)
606 x += vp56_rac_get_prob(c, p[9 + i]) << i;
607 for (i = 9; i > 3; i--)
608 x += vp56_rac_get_prob(c, p[9 + i]) << i;
609 if (!(x & 0xFFF0) || vp56_rac_get_prob(c, p[12]))
613 const uint8_t *ps = p+2;
614 bit = vp56_rac_get_prob(c, *ps);
617 bit = vp56_rac_get_prob(c, *ps);
620 x += vp56_rac_get_prob(c, *ps);
623 return (x && vp56_rac_get_prob(c, p[1])) ? -x : x;
626 static av_always_inline
627 const uint8_t *get_submv_prob(uint32_t left, uint32_t top)
630 return vp8_submv_prob[4-!!left];
632 return vp8_submv_prob[2];
633 return vp8_submv_prob[1-!!left];
637 * Split motion vector prediction, 16.4.
638 * @returns the number of motion vectors parsed (2, 4 or 16)
640 static av_always_inline
641 int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb)
643 int part_idx = mb->partitioning =
644 vp8_rac_get_tree(c, vp8_mbsplit_tree, vp8_mbsplit_prob);
645 int n, num = vp8_mbsplit_count[part_idx];
646 VP8Macroblock *top_mb = &mb[2];
647 VP8Macroblock *left_mb = &mb[-1];
648 const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning],
649 *mbsplits_top = vp8_mbsplits[top_mb->partitioning],
650 *mbsplits_cur = vp8_mbsplits[part_idx],
651 *firstidx = vp8_mbfirstidx[part_idx];
652 VP56mv *top_mv = top_mb->bmv;
653 VP56mv *left_mv = left_mb->bmv;
654 VP56mv *cur_mv = mb->bmv;
656 for (n = 0; n < num; n++) {
658 uint32_t left, above;
659 const uint8_t *submv_prob;
662 left = AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
664 left = AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
666 above = AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
668 above = AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
670 submv_prob = get_submv_prob(left, above);
672 switch (vp8_rac_get_tree(c, vp8_submv_ref_tree, submv_prob)) {
673 case VP8_SUBMVMODE_NEW4X4:
674 mb->bmv[n].y = mb->mv.y + read_mv_component(c, s->prob->mvc[0]);
675 mb->bmv[n].x = mb->mv.x + read_mv_component(c, s->prob->mvc[1]);
677 case VP8_SUBMVMODE_ZERO4X4:
678 AV_ZERO32(&mb->bmv[n]);
680 case VP8_SUBMVMODE_LEFT4X4:
681 AV_WN32A(&mb->bmv[n], left);
683 case VP8_SUBMVMODE_TOP4X4:
684 AV_WN32A(&mb->bmv[n], above);
692 static av_always_inline
693 void decode_intra4x4_modes(VP56RangeCoder *c, uint8_t *intra4x4,
694 int stride, int keyframe)
700 for (y = 0; y < 4; y++) {
701 for (x = 0; x < 4; x++) {
702 t = intra4x4[x - stride];
704 ctx = vp8_pred4x4_prob_intra[t][l];
705 intra4x4[x] = vp8_rac_get_tree(c, vp8_pred4x4_tree, ctx);
710 for (i = 0; i < 16; i++)
711 intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree, vp8_pred4x4_prob_inter);
715 static av_always_inline
716 void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
717 uint8_t *intra4x4, uint8_t *segment)
719 VP56RangeCoder *c = &s->c;
721 if (s->segmentation.update_map)
722 *segment = vp8_rac_get_tree(c, vp8_segmentid_tree, s->prob->segmentid);
723 s->segment = *segment;
725 mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->mbskip_proba) : 0;
728 mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_intra, vp8_pred16x16_prob_intra);
730 if (mb->mode == MODE_I4x4) {
731 decode_intra4x4_modes(c, intra4x4, s->b4_stride, 1);
733 fill_rectangle(intra4x4, 4, 4, s->b4_stride, vp8_pred4x4_mode[mb->mode], 1);
735 s->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, vp8_pred8x8c_prob_intra);
736 mb->ref_frame = VP56_FRAME_CURRENT;
737 } else if (vp56_rac_get_prob_branchy(c, s->intra_proba)) {
738 VP56mv near[2], best;
739 uint8_t cnt[4] = { 0 };
743 if (vp56_rac_get_prob_branchy(c, s->last_proba))
744 mb->ref_frame = vp56_rac_get_prob(c, s->golden_proba) ?
745 VP56_FRAME_GOLDEN2 /* altref */ : VP56_FRAME_GOLDEN;
747 mb->ref_frame = VP56_FRAME_PREVIOUS;
748 s->ref_count[mb->ref_frame-1]++;
750 // motion vectors, 16.3
751 find_near_mvs(s, mb, mb_x, mb_y, near, &best, cnt);
752 p[0] = vp8_mode_contexts[cnt[0]][0];
753 p[1] = vp8_mode_contexts[cnt[1]][1];
754 p[2] = vp8_mode_contexts[cnt[2]][2];
755 p[3] = vp8_mode_contexts[cnt[3]][3];
756 mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_mvinter, p);
758 case VP8_MVMODE_SPLIT:
759 clamp_mv(s, &mb->mv, &mb->mv, mb_x, mb_y);
760 mb->mv = mb->bmv[decode_splitmvs(s, c, mb) - 1];
762 case VP8_MVMODE_ZERO:
765 case VP8_MVMODE_NEAREST:
766 clamp_mv(s, &mb->mv, &near[0], mb_x, mb_y);
768 case VP8_MVMODE_NEAR:
769 clamp_mv(s, &mb->mv, &near[1], mb_x, mb_y);
772 clamp_mv(s, &mb->mv, &mb->mv, mb_x, mb_y);
773 mb->mv.y += + read_mv_component(c, s->prob->mvc[0]);
774 mb->mv.x += + read_mv_component(c, s->prob->mvc[1]);
777 if (mb->mode != VP8_MVMODE_SPLIT) {
778 mb->partitioning = VP8_SPLITMVMODE_NONE;
783 mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_inter, s->prob->pred16x16);
785 if (mb->mode == MODE_I4x4)
786 decode_intra4x4_modes(c, intra4x4, 4, 0);
788 s->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, s->prob->pred8x8c);
789 mb->ref_frame = VP56_FRAME_CURRENT;
790 mb->partitioning = VP8_SPLITMVMODE_NONE;
791 AV_ZERO32(&mb->bmv[0]);
796 * @param c arithmetic bitstream reader context
797 * @param block destination for block coefficients
798 * @param probs probabilities to use when reading trees from the bitstream
799 * @param i initial coeff index, 0 unless a separate DC block is coded
800 * @param zero_nhood the initial prediction context for number of surrounding
801 * all-zero blocks (only left/top, so 0-2)
802 * @param qmul array holding the dc/ac dequant factor at position 0/1
803 * @return 0 if no coeffs were decoded
804 * otherwise, the index of the last coeff decoded plus one
806 static int decode_block_coeffs(VP56RangeCoder *c, DCTELEM block[16],
807 uint8_t probs[8][3][NUM_DCT_TOKENS-1],
808 int i, int zero_nhood, int16_t qmul[2])
815 token_prob = probs[vp8_coeff_band[i]][zero_nhood];
817 if (!vp56_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
821 if (!vp56_rac_get_prob_branchy(c, token_prob[1])) { // DCT_0
823 token_prob = probs[vp8_coeff_band[++i]][0];
826 return nonzero; // invalid input; blocks should end with EOB
829 if (!vp56_rac_get_prob_branchy(c, token_prob[2])) { // DCT_1
835 if (!vp56_rac_get_prob_branchy(c, token_prob[3])) { // DCT 2,3,4
836 coeff = vp56_rac_get_prob(c, token_prob[4]);
838 coeff += vp56_rac_get_prob(c, token_prob[5]);
842 if (!vp56_rac_get_prob_branchy(c, token_prob[6])) {
843 if (!vp56_rac_get_prob_branchy(c, token_prob[7])) { // DCT_CAT1
844 coeff = 5 + vp56_rac_get_prob(c, vp8_dct_cat1_prob[0]);
847 coeff += vp56_rac_get_prob(c, vp8_dct_cat2_prob[0]) << 1;
848 coeff += vp56_rac_get_prob(c, vp8_dct_cat2_prob[1]);
850 } else { // DCT_CAT3 and up
851 int a = vp56_rac_get_prob(c, token_prob[8]);
852 int b = vp56_rac_get_prob(c, token_prob[9+a]);
853 int cat = (a<<1) + b;
854 coeff = 3 + (8<<cat);
855 coeff += vp8_rac_get_coeff(c, vp8_dct_cat_prob[cat]);
860 // todo: full [16] qmat? load into register?
861 block[zigzag_scan[i]] = (vp8_rac_get(c) ? -coeff : coeff) * qmul[!!i];
868 static av_always_inline
869 void decode_mb_coeffs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb,
870 uint8_t t_nnz[9], uint8_t l_nnz[9])
872 LOCAL_ALIGNED_16(DCTELEM, dc,[16]);
873 int i, x, y, luma_start = 0, luma_ctx = 3;
874 int nnz_pred, nnz, nnz_total = 0;
875 int segment = s->segment;
877 if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
880 nnz_pred = t_nnz[8] + l_nnz[8];
882 // decode DC values and do hadamard
883 nnz = decode_block_coeffs(c, dc, s->prob->token[1], 0, nnz_pred,
884 s->qmat[segment].luma_dc_qmul);
885 l_nnz[8] = t_nnz[8] = !!nnz;
887 s->vp8dsp.vp8_luma_dc_wht(s->block, dc);
893 for (y = 0; y < 4; y++)
894 for (x = 0; x < 4; x++) {
895 nnz_pred = l_nnz[y] + t_nnz[x];
896 nnz = decode_block_coeffs(c, s->block[y][x], s->prob->token[luma_ctx], luma_start,
897 nnz_pred, s->qmat[segment].luma_qmul);
898 // nnz+luma_start may be one more than the actual last index, but we don't care
899 s->non_zero_count_cache[y][x] = nnz + luma_start;
900 t_nnz[x] = l_nnz[y] = !!nnz;
905 // TODO: what to do about dimensions? 2nd dim for luma is x,
906 // but for chroma it's (y<<1)|x
907 for (i = 4; i < 6; i++)
908 for (y = 0; y < 2; y++)
909 for (x = 0; x < 2; x++) {
910 nnz_pred = l_nnz[i+2*y] + t_nnz[i+2*x];
911 nnz = decode_block_coeffs(c, s->block[i][(y<<1)+x], s->prob->token[2], 0,
912 nnz_pred, s->qmat[segment].chroma_qmul);
913 s->non_zero_count_cache[i][(y<<1)+x] = nnz;
914 t_nnz[i+2*x] = l_nnz[i+2*y] = !!nnz;
918 // if there were no coded coeffs despite the macroblock not being marked skip,
919 // we MUST not do the inner loop filter and should not do IDCT
920 // Since skip isn't used for bitstream prediction, just manually set it.
925 static av_always_inline
926 void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr,
927 int linesize, int uvlinesize, int simple)
929 AV_COPY128(top_border, src_y + 15*linesize);
931 AV_COPY64(top_border+16, src_cb + 7*uvlinesize);
932 AV_COPY64(top_border+24, src_cr + 7*uvlinesize);
936 static av_always_inline
937 void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr,
938 int linesize, int uvlinesize, int mb_x, int mb_y, int mb_width,
939 int simple, int xchg)
941 uint8_t *top_border_m1 = top_border-32; // for TL prediction
943 src_cb -= uvlinesize;
944 src_cr -= uvlinesize;
946 #define XCHG(a,b,xchg) do { \
947 if (xchg) AV_SWAP64(b,a); \
948 else AV_COPY64(b,a); \
951 XCHG(top_border_m1+8, src_y-8, xchg);
952 XCHG(top_border, src_y, xchg);
953 XCHG(top_border+8, src_y+8, 1);
954 if (mb_x < mb_width-1)
955 XCHG(top_border+32, src_y+16, 1);
957 // only copy chroma for normal loop filter
958 // or to initialize the top row to 127
959 if (!simple || !mb_y) {
960 XCHG(top_border_m1+16, src_cb-8, xchg);
961 XCHG(top_border_m1+24, src_cr-8, xchg);
962 XCHG(top_border+16, src_cb, 1);
963 XCHG(top_border+24, src_cr, 1);
967 static av_always_inline
968 int check_intra_pred_mode(int mode, int mb_x, int mb_y)
970 if (mode == DC_PRED8x8) {
972 mode = mb_y ? TOP_DC_PRED8x8 : DC_128_PRED8x8;
974 mode = LEFT_DC_PRED8x8;
980 static av_always_inline
981 void intra_predict(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb,
982 uint8_t *intra4x4, int mb_x, int mb_y)
984 int x, y, mode, nnz, tr;
986 // for the first row, we need to run xchg_mb_border to init the top edge to 127
987 // otherwise, skip it if we aren't going to deblock
988 if (s->deblock_filter || !mb_y)
989 xchg_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2],
990 s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
991 s->filter.simple, 1);
993 if (mb->mode < MODE_I4x4) {
994 mode = check_intra_pred_mode(mb->mode, mb_x, mb_y);
995 s->hpc.pred16x16[mode](dst[0], s->linesize);
997 uint8_t *ptr = dst[0];
998 int stride = s->keyframe ? s->b4_stride : 4;
1000 // all blocks on the right edge of the macroblock use bottom edge
1001 // the top macroblock for their topright edge
1002 uint8_t *tr_right = ptr - s->linesize + 16;
1004 // if we're on the right edge of the frame, said edge is extended
1005 // from the top macroblock
1006 if (mb_x == s->mb_width-1) {
1007 tr = tr_right[-1]*0x01010101;
1008 tr_right = (uint8_t *)&tr;
1012 AV_ZERO128(s->non_zero_count_cache);
1014 for (y = 0; y < 4; y++) {
1015 uint8_t *topright = ptr + 4 - s->linesize;
1016 for (x = 0; x < 4; x++) {
1018 topright = tr_right;
1020 s->hpc.pred4x4[intra4x4[x]](ptr+4*x, topright, s->linesize);
1022 nnz = s->non_zero_count_cache[y][x];
1025 s->vp8dsp.vp8_idct_dc_add(ptr+4*x, s->block[y][x], s->linesize);
1027 s->vp8dsp.vp8_idct_add(ptr+4*x, s->block[y][x], s->linesize);
1032 ptr += 4*s->linesize;
1037 mode = check_intra_pred_mode(s->chroma_pred_mode, mb_x, mb_y);
1038 s->hpc.pred8x8[mode](dst[1], s->uvlinesize);
1039 s->hpc.pred8x8[mode](dst[2], s->uvlinesize);
1041 if (s->deblock_filter || !mb_y)
1042 xchg_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2],
1043 s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1044 s->filter.simple, 0);
1048 * Generic MC function.
1050 * @param s VP8 decoding context
1051 * @param luma 1 for luma (Y) planes, 0 for chroma (Cb/Cr) planes
1052 * @param dst target buffer for block data at block position
1053 * @param src reference picture buffer at origin (0, 0)
1054 * @param mv motion vector (relative to block position) to get pixel data from
1055 * @param x_off horizontal position of block from origin (0, 0)
1056 * @param y_off vertical position of block from origin (0, 0)
1057 * @param block_w width of block (16, 8 or 4)
1058 * @param block_h height of block (always same as block_w)
1059 * @param width width of src/dst plane data
1060 * @param height height of src/dst plane data
1061 * @param linesize size of a single line of plane data, including padding
1062 * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1064 static av_always_inline
1065 void vp8_mc(VP8Context *s, int luma,
1066 uint8_t *dst, uint8_t *src, const VP56mv *mv,
1067 int x_off, int y_off, int block_w, int block_h,
1068 int width, int height, int linesize,
1069 vp8_mc_func mc_func[3][3])
1072 static const uint8_t idx[8] = { 0, 1, 2, 1, 2, 1, 2, 1 };
1073 int mx = (mv->x << luma)&7, mx_idx = idx[mx];
1074 int my = (mv->y << luma)&7, my_idx = idx[my];
1076 x_off += mv->x >> (3 - luma);
1077 y_off += mv->y >> (3 - luma);
1080 src += y_off * linesize + x_off;
1081 if (x_off < 2 || x_off >= width - block_w - 3 ||
1082 y_off < 2 || y_off >= height - block_h - 3) {
1083 ff_emulated_edge_mc(s->edge_emu_buffer, src - 2 * linesize - 2, linesize,
1084 block_w + 5, block_h + 5,
1085 x_off - 2, y_off - 2, width, height);
1086 src = s->edge_emu_buffer + 2 + linesize * 2;
1088 mc_func[my_idx][mx_idx](dst, linesize, src, linesize, block_h, mx, my);
1090 mc_func[0][0](dst, linesize, src + y_off * linesize + x_off, linesize, block_h, 0, 0);
1093 static av_always_inline
1094 void vp8_mc_part(VP8Context *s, uint8_t *dst[3],
1095 AVFrame *ref_frame, int x_off, int y_off,
1096 int bx_off, int by_off,
1097 int block_w, int block_h,
1098 int width, int height, VP56mv *mv)
1103 vp8_mc(s, 1, dst[0] + by_off * s->linesize + bx_off,
1104 ref_frame->data[0], mv, x_off + bx_off, y_off + by_off,
1105 block_w, block_h, width, height, s->linesize,
1106 s->put_pixels_tab[block_w == 8]);
1109 if (s->profile == 3) {
1113 x_off >>= 1; y_off >>= 1;
1114 bx_off >>= 1; by_off >>= 1;
1115 width >>= 1; height >>= 1;
1116 block_w >>= 1; block_h >>= 1;
1117 vp8_mc(s, 0, dst[1] + by_off * s->uvlinesize + bx_off,
1118 ref_frame->data[1], &uvmv, x_off + bx_off, y_off + by_off,
1119 block_w, block_h, width, height, s->uvlinesize,
1120 s->put_pixels_tab[1 + (block_w == 4)]);
1121 vp8_mc(s, 0, dst[2] + by_off * s->uvlinesize + bx_off,
1122 ref_frame->data[2], &uvmv, x_off + bx_off, y_off + by_off,
1123 block_w, block_h, width, height, s->uvlinesize,
1124 s->put_pixels_tab[1 + (block_w == 4)]);
1127 /* Fetch pixels for estimated mv 4 macroblocks ahead.
1128 * Optimized for 64-byte cache lines. Inspired by ffh264 prefetch_motion. */
1129 static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
1131 /* Don't prefetch refs that haven't been used very often this frame. */
1132 if (s->ref_count[ref-1] > (mb_xy >> 5)) {
1133 int x_off = mb_x << 4, y_off = mb_y << 4;
1134 int mx = mb->mv.x + x_off + 8;
1135 int my = mb->mv.y + y_off;
1136 uint8_t **src= s->framep[ref]->data;
1137 int off= mx + (my + (mb_x&3)*4)*s->linesize + 64;
1138 s->dsp.prefetch(src[0]+off, s->linesize, 4);
1139 off= (mx>>1) + ((my>>1) + (mb_x&7))*s->uvlinesize + 64;
1140 s->dsp.prefetch(src[1]+off, src[2]-src[1], 2);
1145 * Apply motion vectors to prediction buffer, chapter 18.
1147 static av_always_inline
1148 void inter_predict(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb,
1151 int x_off = mb_x << 4, y_off = mb_y << 4;
1152 int width = 16*s->mb_width, height = 16*s->mb_height;
1153 AVFrame *ref = s->framep[mb->ref_frame];
1154 VP56mv *bmv = mb->bmv;
1156 if (mb->mode < VP8_MVMODE_SPLIT) {
1157 vp8_mc_part(s, dst, ref, x_off, y_off,
1158 0, 0, 16, 16, width, height, &mb->mv);
1159 } else switch (mb->partitioning) {
1160 case VP8_SPLITMVMODE_4x4: {
1165 for (y = 0; y < 4; y++) {
1166 for (x = 0; x < 4; x++) {
1167 vp8_mc(s, 1, dst[0] + 4*y*s->linesize + x*4,
1168 ref->data[0], &bmv[4*y + x],
1169 4*x + x_off, 4*y + y_off, 4, 4,
1170 width, height, s->linesize,
1171 s->put_pixels_tab[2]);
1176 x_off >>= 1; y_off >>= 1; width >>= 1; height >>= 1;
1177 for (y = 0; y < 2; y++) {
1178 for (x = 0; x < 2; x++) {
1179 uvmv.x = mb->bmv[ 2*y * 4 + 2*x ].x +
1180 mb->bmv[ 2*y * 4 + 2*x+1].x +
1181 mb->bmv[(2*y+1) * 4 + 2*x ].x +
1182 mb->bmv[(2*y+1) * 4 + 2*x+1].x;
1183 uvmv.y = mb->bmv[ 2*y * 4 + 2*x ].y +
1184 mb->bmv[ 2*y * 4 + 2*x+1].y +
1185 mb->bmv[(2*y+1) * 4 + 2*x ].y +
1186 mb->bmv[(2*y+1) * 4 + 2*x+1].y;
1187 uvmv.x = (uvmv.x + 2 + (uvmv.x >> (INT_BIT-1))) >> 2;
1188 uvmv.y = (uvmv.y + 2 + (uvmv.y >> (INT_BIT-1))) >> 2;
1189 if (s->profile == 3) {
1193 vp8_mc(s, 0, dst[1] + 4*y*s->uvlinesize + x*4,
1194 ref->data[1], &uvmv,
1195 4*x + x_off, 4*y + y_off, 4, 4,
1196 width, height, s->uvlinesize,
1197 s->put_pixels_tab[2]);
1198 vp8_mc(s, 0, dst[2] + 4*y*s->uvlinesize + x*4,
1199 ref->data[2], &uvmv,
1200 4*x + x_off, 4*y + y_off, 4, 4,
1201 width, height, s->uvlinesize,
1202 s->put_pixels_tab[2]);
1207 case VP8_SPLITMVMODE_16x8:
1208 vp8_mc_part(s, dst, ref, x_off, y_off,
1209 0, 0, 16, 8, width, height, &bmv[0]);
1210 vp8_mc_part(s, dst, ref, x_off, y_off,
1211 0, 8, 16, 8, width, height, &bmv[1]);
1213 case VP8_SPLITMVMODE_8x16:
1214 vp8_mc_part(s, dst, ref, x_off, y_off,
1215 0, 0, 8, 16, width, height, &bmv[0]);
1216 vp8_mc_part(s, dst, ref, x_off, y_off,
1217 8, 0, 8, 16, width, height, &bmv[1]);
1219 case VP8_SPLITMVMODE_8x8:
1220 vp8_mc_part(s, dst, ref, x_off, y_off,
1221 0, 0, 8, 8, width, height, &bmv[0]);
1222 vp8_mc_part(s, dst, ref, x_off, y_off,
1223 8, 0, 8, 8, width, height, &bmv[1]);
1224 vp8_mc_part(s, dst, ref, x_off, y_off,
1225 0, 8, 8, 8, width, height, &bmv[2]);
1226 vp8_mc_part(s, dst, ref, x_off, y_off,
1227 8, 8, 8, 8, width, height, &bmv[3]);
1232 static av_always_inline void idct_mb(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb)
1236 if (mb->mode != MODE_I4x4) {
1237 uint8_t *y_dst = dst[0];
1238 for (y = 0; y < 4; y++) {
1239 uint32_t nnz4 = AV_RN32A(s->non_zero_count_cache[y]);
1241 if (nnz4&~0x01010101) {
1242 for (x = 0; x < 4; x++) {
1243 int nnz = s->non_zero_count_cache[y][x];
1246 s->vp8dsp.vp8_idct_dc_add(y_dst+4*x, s->block[y][x], s->linesize);
1248 s->vp8dsp.vp8_idct_add(y_dst+4*x, s->block[y][x], s->linesize);
1252 s->vp8dsp.vp8_idct_dc_add4y(y_dst, s->block[y], s->linesize);
1255 y_dst += 4*s->linesize;
1259 for (ch = 0; ch < 2; ch++) {
1260 uint32_t nnz4 = AV_RN32A(s->non_zero_count_cache[4+ch]);
1262 uint8_t *ch_dst = dst[1+ch];
1263 if (nnz4&~0x01010101) {
1264 for (y = 0; y < 2; y++) {
1265 for (x = 0; x < 2; x++) {
1266 int nnz = s->non_zero_count_cache[4+ch][(y<<1)+x];
1269 s->vp8dsp.vp8_idct_dc_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize);
1271 s->vp8dsp.vp8_idct_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize);
1274 ch_dst += 4*s->uvlinesize;
1277 s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, s->block[4+ch], s->uvlinesize);
1283 static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f )
1285 int interior_limit, filter_level;
1287 if (s->segmentation.enabled) {
1288 filter_level = s->segmentation.filter_level[s->segment];
1289 if (!s->segmentation.absolute_vals)
1290 filter_level += s->filter.level;
1292 filter_level = s->filter.level;
1294 if (s->lf_delta.enabled) {
1295 filter_level += s->lf_delta.ref[mb->ref_frame];
1297 if (mb->ref_frame == VP56_FRAME_CURRENT) {
1298 if (mb->mode == MODE_I4x4)
1299 filter_level += s->lf_delta.mode[0];
1301 if (mb->mode == VP8_MVMODE_ZERO)
1302 filter_level += s->lf_delta.mode[1];
1303 else if (mb->mode == VP8_MVMODE_SPLIT)
1304 filter_level += s->lf_delta.mode[3];
1306 filter_level += s->lf_delta.mode[2];
1309 filter_level = av_clip(filter_level, 0, 63);
1311 interior_limit = filter_level;
1312 if (s->filter.sharpness) {
1313 interior_limit >>= s->filter.sharpness > 4 ? 2 : 1;
1314 interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
1316 interior_limit = FFMAX(interior_limit, 1);
1318 f->filter_level = filter_level;
1319 f->inner_limit = interior_limit;
1320 f->inner_filter = !mb->skip || mb->mode == MODE_I4x4 || mb->mode == VP8_MVMODE_SPLIT;
1323 static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y)
1325 int mbedge_lim, bedge_lim, hev_thresh;
1326 int filter_level = f->filter_level;
1327 int inner_limit = f->inner_limit;
1328 int inner_filter = f->inner_filter;
1329 int linesize = s->linesize;
1330 int uvlinesize = s->uvlinesize;
1335 mbedge_lim = 2*(filter_level+2) + inner_limit;
1336 bedge_lim = 2* filter_level + inner_limit;
1337 hev_thresh = filter_level >= 15;
1340 if (filter_level >= 40)
1343 if (filter_level >= 40)
1345 else if (filter_level >= 20)
1350 s->vp8dsp.vp8_h_loop_filter16y(dst[0], linesize,
1351 mbedge_lim, inner_limit, hev_thresh);
1352 s->vp8dsp.vp8_h_loop_filter8uv(dst[1], dst[2], uvlinesize,
1353 mbedge_lim, inner_limit, hev_thresh);
1357 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+ 4, linesize, bedge_lim,
1358 inner_limit, hev_thresh);
1359 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+ 8, linesize, bedge_lim,
1360 inner_limit, hev_thresh);
1361 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+12, linesize, bedge_lim,
1362 inner_limit, hev_thresh);
1363 s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4,
1364 uvlinesize, bedge_lim,
1365 inner_limit, hev_thresh);
1369 s->vp8dsp.vp8_v_loop_filter16y(dst[0], linesize,
1370 mbedge_lim, inner_limit, hev_thresh);
1371 s->vp8dsp.vp8_v_loop_filter8uv(dst[1], dst[2], uvlinesize,
1372 mbedge_lim, inner_limit, hev_thresh);
1376 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+ 4*linesize,
1377 linesize, bedge_lim,
1378 inner_limit, hev_thresh);
1379 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+ 8*linesize,
1380 linesize, bedge_lim,
1381 inner_limit, hev_thresh);
1382 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+12*linesize,
1383 linesize, bedge_lim,
1384 inner_limit, hev_thresh);
1385 s->vp8dsp.vp8_v_loop_filter8uv_inner(dst[1] + 4 * uvlinesize,
1386 dst[2] + 4 * uvlinesize,
1387 uvlinesize, bedge_lim,
1388 inner_limit, hev_thresh);
1392 static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
1394 int mbedge_lim, bedge_lim;
1395 int filter_level = f->filter_level;
1396 int inner_limit = f->inner_limit;
1397 int inner_filter = f->inner_filter;
1398 int linesize = s->linesize;
1403 mbedge_lim = 2*(filter_level+2) + inner_limit;
1404 bedge_lim = 2* filter_level + inner_limit;
1407 s->vp8dsp.vp8_h_loop_filter_simple(dst, linesize, mbedge_lim);
1409 s->vp8dsp.vp8_h_loop_filter_simple(dst+ 4, linesize, bedge_lim);
1410 s->vp8dsp.vp8_h_loop_filter_simple(dst+ 8, linesize, bedge_lim);
1411 s->vp8dsp.vp8_h_loop_filter_simple(dst+12, linesize, bedge_lim);
1415 s->vp8dsp.vp8_v_loop_filter_simple(dst, linesize, mbedge_lim);
1417 s->vp8dsp.vp8_v_loop_filter_simple(dst+ 4*linesize, linesize, bedge_lim);
1418 s->vp8dsp.vp8_v_loop_filter_simple(dst+ 8*linesize, linesize, bedge_lim);
1419 s->vp8dsp.vp8_v_loop_filter_simple(dst+12*linesize, linesize, bedge_lim);
1423 static void filter_mb_row(VP8Context *s, int mb_y)
1425 VP8FilterStrength *f = s->filter_strength;
1427 s->framep[VP56_FRAME_CURRENT]->data[0] + 16*mb_y*s->linesize,
1428 s->framep[VP56_FRAME_CURRENT]->data[1] + 8*mb_y*s->uvlinesize,
1429 s->framep[VP56_FRAME_CURRENT]->data[2] + 8*mb_y*s->uvlinesize
1433 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1434 backup_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2], s->linesize, s->uvlinesize, 0);
1435 filter_mb(s, dst, f++, mb_x, mb_y);
1442 static void filter_mb_row_simple(VP8Context *s, int mb_y)
1444 VP8FilterStrength *f = s->filter_strength;
1445 uint8_t *dst = s->framep[VP56_FRAME_CURRENT]->data[0] + 16*mb_y*s->linesize;
1448 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1449 backup_mb_border(s->top_border[mb_x+1], dst, NULL, NULL, s->linesize, 0, 1);
1450 filter_mb_simple(s, dst, f++, mb_x, mb_y);
1455 static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
1458 VP8Context *s = avctx->priv_data;
1459 int ret, mb_x, mb_y, i, y, referenced;
1460 enum AVDiscard skip_thresh;
1461 AVFrame *av_uninit(curframe);
1463 if ((ret = decode_frame_header(s, avpkt->data, avpkt->size)) < 0)
1466 referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT
1467 || s->update_altref == VP56_FRAME_CURRENT;
1469 skip_thresh = !referenced ? AVDISCARD_NONREF :
1470 !s->keyframe ? AVDISCARD_NONKEY : AVDISCARD_ALL;
1472 if (avctx->skip_frame >= skip_thresh) {
1476 s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
1478 for (i = 0; i < 4; i++)
1479 if (&s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
1480 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
1481 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
1482 curframe = s->framep[VP56_FRAME_CURRENT] = &s->frames[i];
1485 if (curframe->data[0])
1486 avctx->release_buffer(avctx, curframe);
1488 curframe->key_frame = s->keyframe;
1489 curframe->pict_type = s->keyframe ? FF_I_TYPE : FF_P_TYPE;
1490 curframe->reference = referenced ? 3 : 0;
1491 if ((ret = avctx->get_buffer(avctx, curframe))) {
1492 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed!\n");
1496 // Given that arithmetic probabilities are updated every frame, it's quite likely
1497 // that the values we have on a random interframe are complete junk if we didn't
1498 // start decode on a keyframe. So just don't display anything rather than junk.
1499 if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
1500 !s->framep[VP56_FRAME_GOLDEN] ||
1501 !s->framep[VP56_FRAME_GOLDEN2])) {
1502 av_log(avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n");
1503 return AVERROR_INVALIDDATA;
1506 s->linesize = curframe->linesize[0];
1507 s->uvlinesize = curframe->linesize[1];
1509 if (!s->edge_emu_buffer)
1510 s->edge_emu_buffer = av_malloc(21*s->linesize);
1512 memset(s->top_nnz, 0, s->mb_width*sizeof(*s->top_nnz));
1514 /* Zero macroblock structures for top/left prediction from outside the frame. */
1515 memset(s->macroblocks, 0, (s->mb_width + s->mb_height*2)*sizeof(*s->macroblocks));
1517 // top edge of 127 for intra prediction
1518 memset(s->top_border, 127, (s->mb_width+1)*sizeof(*s->top_border));
1519 memset(s->ref_count, 0, sizeof(s->ref_count));
1521 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1522 VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions-1)];
1523 VP8Macroblock *mb = s->macroblocks + (s->mb_height - mb_y - 1)*2;
1524 uint8_t *intra4x4 = s->intra4x4_pred_mode + 4*mb_y*s->b4_stride;
1525 uint8_t *segment_map = s->segmentation_map + mb_y*s->mb_stride;
1526 int mb_xy = mb_y * s->mb_stride;
1528 curframe->data[0] + 16*mb_y*s->linesize,
1529 curframe->data[1] + 8*mb_y*s->uvlinesize,
1530 curframe->data[2] + 8*mb_y*s->uvlinesize
1533 memset(s->left_nnz, 0, sizeof(s->left_nnz));
1535 // left edge of 129 for intra prediction
1536 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE))
1537 for (i = 0; i < 3; i++)
1538 for (y = 0; y < 16>>!!i; y++)
1539 dst[i][y*curframe->linesize[i]-1] = 129;
1541 memset(s->top_border, 129, sizeof(*s->top_border));
1543 for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
1544 uint8_t *intra4x4_mb = s->keyframe ? intra4x4 + 4*mb_x : s->intra4x4_pred_mode_mb;
1545 uint8_t *segment_mb = segment_map+mb_x;
1547 /* Prefetch the current frame, 4 MBs ahead */
1548 s->dsp.prefetch(dst[0] + (mb_x&3)*4*s->linesize + 64, s->linesize, 4);
1549 s->dsp.prefetch(dst[1] + (mb_x&7)*s->uvlinesize + 64, dst[2] - dst[1], 2);
1551 decode_mb_mode(s, mb, mb_x, mb_y, intra4x4_mb, segment_mb);
1553 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
1556 decode_mb_coeffs(s, c, mb, s->top_nnz[mb_x], s->left_nnz);
1558 if (mb->mode <= MODE_I4x4)
1559 intra_predict(s, dst, mb, intra4x4_mb, mb_x, mb_y);
1561 inter_predict(s, dst, mb, mb_x, mb_y);
1563 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
1566 idct_mb(s, dst, mb);
1568 AV_ZERO64(s->left_nnz);
1569 AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned
1571 // Reset DC block predictors if they would exist if the mb had coefficients
1572 if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
1574 s->top_nnz[mb_x][8] = 0;
1578 if (s->deblock_filter)
1579 filter_level_for_mb(s, mb, &s->filter_strength[mb_x]);
1581 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
1587 if (s->deblock_filter) {
1588 if (s->filter.simple)
1589 filter_mb_row_simple(s, mb_y);
1591 filter_mb_row(s, mb_y);
1596 // if future frames don't use the updated probabilities,
1597 // reset them to the values we saved
1598 if (!s->update_probabilities)
1599 s->prob[0] = s->prob[1];
1601 // check if golden and altref are swapped
1602 if (s->update_altref == VP56_FRAME_GOLDEN &&
1603 s->update_golden == VP56_FRAME_GOLDEN2)
1604 FFSWAP(AVFrame *, s->framep[VP56_FRAME_GOLDEN], s->framep[VP56_FRAME_GOLDEN2]);
1606 if (s->update_altref != VP56_FRAME_NONE)
1607 s->framep[VP56_FRAME_GOLDEN2] = s->framep[s->update_altref];
1609 if (s->update_golden != VP56_FRAME_NONE)
1610 s->framep[VP56_FRAME_GOLDEN] = s->framep[s->update_golden];
1613 if (s->update_last) // move cur->prev
1614 s->framep[VP56_FRAME_PREVIOUS] = s->framep[VP56_FRAME_CURRENT];
1616 // release no longer referenced frames
1617 for (i = 0; i < 4; i++)
1618 if (s->frames[i].data[0] &&
1619 &s->frames[i] != s->framep[VP56_FRAME_CURRENT] &&
1620 &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
1621 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
1622 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
1623 avctx->release_buffer(avctx, &s->frames[i]);
1625 if (!s->invisible) {
1626 *(AVFrame*)data = *s->framep[VP56_FRAME_CURRENT];
1627 *data_size = sizeof(AVFrame);
1633 static av_cold int vp8_decode_init(AVCodecContext *avctx)
1635 VP8Context *s = avctx->priv_data;
1638 avctx->pix_fmt = PIX_FMT_YUV420P;
1640 dsputil_init(&s->dsp, avctx);
1641 ff_h264_pred_init(&s->hpc, CODEC_ID_VP8);
1642 ff_vp8dsp_init(&s->vp8dsp);
1644 // intra pred needs edge emulation among other things
1645 if (avctx->flags&CODEC_FLAG_EMU_EDGE) {
1646 av_log(avctx, AV_LOG_ERROR, "Edge emulation not supported\n");
1647 return AVERROR_PATCHWELCOME;
1653 static av_cold int vp8_decode_free(AVCodecContext *avctx)
1655 vp8_decode_flush(avctx);
1659 AVCodec vp8_decoder = {
1669 .flush = vp8_decode_flush,
1670 .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),