2 * VP8 compatible video decoder
4 * Copyright (C) 2010 David Conrad
5 * Copyright (C) 2010 Ronald S. Bultje
6 * Copyright (C) 2010 Jason Garrett-Glaser
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
30 #include "rectangle.h"
40 // todo: make it possible to check for at least (i4x4 or split_mv)
41 // in one op. are others needed?
50 AVCodecContext *avctx;
54 vp8_mc_func put_pixels_tab[3][3][3];
57 uint8_t *edge_emu_buffer;
58 VP56RangeCoder c; ///< header context, includes mb modes and motion vectors
61 int mb_width; /* number of horizontal MB */
62 int mb_height; /* number of vertical MB */
68 int update_last; ///< update VP56_FRAME_PREVIOUS with the current one
69 int update_golden; ///< VP56_FRAME_NONE if not updated, or which frame to copy if so
74 * If this flag is not set, all the probability updates
75 * are discarded after this frame is decoded.
77 int update_probabilities;
80 * All coefficients are contained in separate arith coding contexts.
81 * There can be 1, 2, 4, or 8 of these after the header context.
83 int num_coeff_partitions;
84 VP56RangeCoder coeff_partition[8];
86 VP8Macroblock *macroblocks;
87 VP8Macroblock *macroblocks_base;
88 VP8FilterStrength *filter_strength;
91 uint8_t *intra4x4_pred_mode;
92 uint8_t *intra4x4_pred_mode_base;
93 uint8_t *segmentation_map;
97 * Cache of the top row needed for intra prediction
98 * 16 for luma, 8 for each chroma plane
100 uint8_t (*top_border)[16+8+8];
103 * For coeff decode, we need to know whether the above block had non-zero
104 * coefficients. This means for each macroblock, we need data for 4 luma
105 * blocks, 2 u blocks, 2 v blocks, and the luma dc block, for a total of 9
106 * per macroblock. We keep the last row in top_nnz.
108 uint8_t (*top_nnz)[9];
109 DECLARE_ALIGNED(8, uint8_t, left_nnz)[9];
112 * This is the index plus one of the last non-zero coeff
113 * for each of the blocks in the current macroblock.
115 * 1 -> dc-only (special transform)
116 * 2+-> full transform
118 DECLARE_ALIGNED(16, uint8_t, non_zero_count_cache)[6][4];
119 DECLARE_ALIGNED(16, DCTELEM, block)[6][4][16];
120 uint8_t intra4x4_pred_mode_mb[16];
122 int chroma_pred_mode; ///< 8x8c pred mode of the current macroblock
123 int segment; ///< segment of the current macroblock
126 int sign_bias[4]; ///< one state [0, 1] per ref frame type
130 * Base parameters for segmentation, i.e. per-macroblock parameters.
131 * These must be kept unchanged even if segmentation is not used for
132 * a frame, since the values persist between interframes.
138 int8_t base_quant[4];
139 int8_t filter_level[4]; ///< base loop filter level
143 * Macroblocks can have one of 4 different quants in a frame when
144 * segmentation is enabled.
145 * If segmentation is disabled, only the first segment's values are used.
148 // [0] - DC qmul [1] - AC qmul
149 int16_t luma_qmul[2];
150 int16_t luma_dc_qmul[2]; ///< luma dc-only block quant
151 int16_t chroma_qmul[2];
161 int enabled; ///< whether each mb can have a different strength based on mode/ref
164 * filter strength adjustment for the following macroblock modes:
167 * [2] - inter modes except for zero or split mv
169 * i16x16 modes never have any adjustment
174 * filter strength adjustment for macroblocks that reference:
175 * [0] - intra / VP56_FRAME_CURRENT
176 * [1] - VP56_FRAME_PREVIOUS
177 * [2] - VP56_FRAME_GOLDEN
178 * [3] - altref / VP56_FRAME_GOLDEN2
184 * These are all of the updatable probabilities for binary decisions.
185 * They are only implictly reset on keyframes, making it quite likely
186 * for an interframe to desync if a prior frame's header was corrupt
187 * or missing outright!
190 uint8_t segmentid[3];
195 uint8_t pred16x16[4];
197 uint8_t token[4][8][3][NUM_DCT_TOKENS-1];
202 static void vp8_decode_flush(AVCodecContext *avctx)
204 VP8Context *s = avctx->priv_data;
207 for (i = 0; i < 4; i++)
208 if (s->frames[i].data[0])
209 avctx->release_buffer(avctx, &s->frames[i]);
210 memset(s->framep, 0, sizeof(s->framep));
212 av_freep(&s->macroblocks_base);
213 av_freep(&s->intra4x4_pred_mode_base);
214 av_freep(&s->top_nnz);
215 av_freep(&s->edge_emu_buffer);
216 av_freep(&s->top_border);
217 av_freep(&s->segmentation_map);
219 s->macroblocks = NULL;
220 s->intra4x4_pred_mode = NULL;
223 static int update_dimensions(VP8Context *s, int width, int height)
227 if (avcodec_check_dimensions(s->avctx, width, height))
228 return AVERROR_INVALIDDATA;
230 vp8_decode_flush(s->avctx);
232 avcodec_set_dimensions(s->avctx, width, height);
234 s->mb_width = (s->avctx->coded_width +15) / 16;
235 s->mb_height = (s->avctx->coded_height+15) / 16;
237 // we allocate a border around the top/left of intra4x4 modes
238 // this is 4 blocks for intra4x4 to keep 4-byte alignment for fill_rectangle
239 s->mb_stride = s->mb_width+1;
240 s->b4_stride = 4*s->mb_stride;
242 s->macroblocks_base = av_mallocz((s->mb_stride+s->mb_height*2+2)*sizeof(*s->macroblocks));
243 s->filter_strength = av_mallocz(s->mb_stride*sizeof(*s->filter_strength));
244 s->intra4x4_pred_mode_base = av_mallocz(s->b4_stride*(4*s->mb_height+1));
245 s->top_nnz = av_mallocz(s->mb_width*sizeof(*s->top_nnz));
246 s->top_border = av_mallocz((s->mb_width+1)*sizeof(*s->top_border));
247 s->segmentation_map = av_mallocz(s->mb_stride*s->mb_height);
249 if (!s->macroblocks_base || !s->filter_strength || !s->intra4x4_pred_mode_base ||
250 !s->top_nnz || !s->top_border || !s->segmentation_map)
251 return AVERROR(ENOMEM);
253 s->macroblocks = s->macroblocks_base + 1;
254 s->intra4x4_pred_mode = s->intra4x4_pred_mode_base + 4 + s->b4_stride;
256 memset(s->intra4x4_pred_mode_base, DC_PRED, s->b4_stride);
257 for (i = 0; i < 4*s->mb_height; i++)
258 s->intra4x4_pred_mode[i*s->b4_stride-1] = DC_PRED;
263 static void parse_segment_info(VP8Context *s)
265 VP56RangeCoder *c = &s->c;
268 s->segmentation.update_map = vp8_rac_get(c);
270 if (vp8_rac_get(c)) { // update segment feature data
271 s->segmentation.absolute_vals = vp8_rac_get(c);
273 for (i = 0; i < 4; i++)
274 s->segmentation.base_quant[i] = vp8_rac_get_sint(c, 7);
276 for (i = 0; i < 4; i++)
277 s->segmentation.filter_level[i] = vp8_rac_get_sint(c, 6);
279 if (s->segmentation.update_map)
280 for (i = 0; i < 3; i++)
281 s->prob->segmentid[i] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
284 static void update_lf_deltas(VP8Context *s)
286 VP56RangeCoder *c = &s->c;
289 for (i = 0; i < 4; i++)
290 s->lf_delta.ref[i] = vp8_rac_get_sint(c, 6);
292 for (i = 0; i < 4; i++)
293 s->lf_delta.mode[i] = vp8_rac_get_sint(c, 6);
296 static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
298 const uint8_t *sizes = buf;
301 s->num_coeff_partitions = 1 << vp8_rac_get_uint(&s->c, 2);
303 buf += 3*(s->num_coeff_partitions-1);
304 buf_size -= 3*(s->num_coeff_partitions-1);
308 for (i = 0; i < s->num_coeff_partitions-1; i++) {
309 int size = AV_RL24(sizes + 3*i);
310 if (buf_size - size < 0)
313 vp56_init_range_decoder(&s->coeff_partition[i], buf, size);
317 vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
322 static void get_quants(VP8Context *s)
324 VP56RangeCoder *c = &s->c;
327 int yac_qi = vp8_rac_get_uint(c, 7);
328 int ydc_delta = vp8_rac_get_sint(c, 4);
329 int y2dc_delta = vp8_rac_get_sint(c, 4);
330 int y2ac_delta = vp8_rac_get_sint(c, 4);
331 int uvdc_delta = vp8_rac_get_sint(c, 4);
332 int uvac_delta = vp8_rac_get_sint(c, 4);
334 for (i = 0; i < 4; i++) {
335 if (s->segmentation.enabled) {
336 base_qi = s->segmentation.base_quant[i];
337 if (!s->segmentation.absolute_vals)
342 s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip(base_qi + ydc_delta , 0, 127)];
343 s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip(base_qi , 0, 127)];
344 s->qmat[i].luma_dc_qmul[0] = 2 * vp8_dc_qlookup[av_clip(base_qi + y2dc_delta, 0, 127)];
345 s->qmat[i].luma_dc_qmul[1] = 155 * vp8_ac_qlookup[av_clip(base_qi + y2ac_delta, 0, 127)] / 100;
346 s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip(base_qi + uvdc_delta, 0, 127)];
347 s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip(base_qi + uvac_delta, 0, 127)];
349 s->qmat[i].luma_dc_qmul[1] = FFMAX(s->qmat[i].luma_dc_qmul[1], 8);
350 s->qmat[i].chroma_qmul[0] = FFMIN(s->qmat[i].chroma_qmul[0], 132);
355 * Determine which buffers golden and altref should be updated with after this frame.
356 * The spec isn't clear here, so I'm going by my understanding of what libvpx does
358 * Intra frames update all 3 references
359 * Inter frames update VP56_FRAME_PREVIOUS if the update_last flag is set
360 * If the update (golden|altref) flag is set, it's updated with the current frame
361 * if update_last is set, and VP56_FRAME_PREVIOUS otherwise.
362 * If the flag is not set, the number read means:
364 * 1: VP56_FRAME_PREVIOUS
365 * 2: update golden with altref, or update altref with golden
367 static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
369 VP56RangeCoder *c = &s->c;
372 return VP56_FRAME_CURRENT;
374 switch (vp8_rac_get_uint(c, 2)) {
376 return VP56_FRAME_PREVIOUS;
378 return (ref == VP56_FRAME_GOLDEN) ? VP56_FRAME_GOLDEN2 : VP56_FRAME_GOLDEN;
380 return VP56_FRAME_NONE;
383 static void update_refs(VP8Context *s)
385 VP56RangeCoder *c = &s->c;
387 int update_golden = vp8_rac_get(c);
388 int update_altref = vp8_rac_get(c);
390 s->update_golden = ref_to_update(s, update_golden, VP56_FRAME_GOLDEN);
391 s->update_altref = ref_to_update(s, update_altref, VP56_FRAME_GOLDEN2);
394 static int decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
396 VP56RangeCoder *c = &s->c;
397 int header_size, hscale, vscale, i, j, k, l, ret;
398 int width = s->avctx->width;
399 int height = s->avctx->height;
401 s->keyframe = !(buf[0] & 1);
402 s->profile = (buf[0]>>1) & 7;
403 s->invisible = !(buf[0] & 0x10);
404 header_size = AV_RL24(buf) >> 5;
409 av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
412 memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
413 else // profile 1-3 use bilinear, 4+ aren't defined so whatever
414 memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_bilinear_pixels_tab, sizeof(s->put_pixels_tab));
416 if (header_size > buf_size - 7*s->keyframe) {
417 av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
418 return AVERROR_INVALIDDATA;
422 if (AV_RL24(buf) != 0x2a019d) {
423 av_log(s->avctx, AV_LOG_ERROR, "Invalid start code 0x%x\n", AV_RL24(buf));
424 return AVERROR_INVALIDDATA;
426 width = AV_RL16(buf+3) & 0x3fff;
427 height = AV_RL16(buf+5) & 0x3fff;
428 hscale = buf[4] >> 6;
429 vscale = buf[6] >> 6;
433 if (hscale || vscale)
434 av_log_missing_feature(s->avctx, "Upscaling", 1);
436 s->update_golden = s->update_altref = VP56_FRAME_CURRENT;
437 memcpy(s->prob->token , vp8_token_default_probs , sizeof(s->prob->token));
438 memcpy(s->prob->pred16x16, vp8_pred16x16_prob_inter, sizeof(s->prob->pred16x16));
439 memcpy(s->prob->pred8x8c , vp8_pred8x8c_prob_inter , sizeof(s->prob->pred8x8c));
440 memcpy(s->prob->mvc , vp8_mv_default_prob , sizeof(s->prob->mvc));
441 memset(&s->segmentation, 0, sizeof(s->segmentation));
444 if (!s->macroblocks_base || /* first frame */
445 width != s->avctx->width || height != s->avctx->height) {
446 if ((ret = update_dimensions(s, width, height) < 0))
450 vp56_init_range_decoder(c, buf, header_size);
452 buf_size -= header_size;
456 av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
457 vp8_rac_get(c); // whether we can skip clamping in dsp functions
460 if ((s->segmentation.enabled = vp8_rac_get(c)))
461 parse_segment_info(s);
463 s->segmentation.update_map = 0; // FIXME: move this to some init function?
465 s->filter.simple = vp8_rac_get(c);
466 s->filter.level = vp8_rac_get_uint(c, 6);
467 s->filter.sharpness = vp8_rac_get_uint(c, 3);
469 if ((s->lf_delta.enabled = vp8_rac_get(c)))
473 if (setup_partitions(s, buf, buf_size)) {
474 av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
475 return AVERROR_INVALIDDATA;
482 s->sign_bias[VP56_FRAME_GOLDEN] = vp8_rac_get(c);
483 s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp8_rac_get(c);
486 // if we aren't saving this frame's probabilities for future frames,
487 // make a copy of the current probabilities
488 if (!(s->update_probabilities = vp8_rac_get(c)))
489 s->prob[1] = s->prob[0];
491 s->update_last = s->keyframe || vp8_rac_get(c);
493 for (i = 0; i < 4; i++)
494 for (j = 0; j < 8; j++)
495 for (k = 0; k < 3; k++)
496 for (l = 0; l < NUM_DCT_TOKENS-1; l++)
497 if (vp56_rac_get_prob(c, vp8_token_update_probs[i][j][k][l]))
498 s->prob->token[i][j][k][l] = vp8_rac_get_uint(c, 8);
500 if ((s->mbskip_enabled = vp8_rac_get(c)))
501 s->prob->mbskip = vp8_rac_get_uint(c, 8);
504 s->prob->intra = vp8_rac_get_uint(c, 8);
505 s->prob->last = vp8_rac_get_uint(c, 8);
506 s->prob->golden = vp8_rac_get_uint(c, 8);
509 for (i = 0; i < 4; i++)
510 s->prob->pred16x16[i] = vp8_rac_get_uint(c, 8);
512 for (i = 0; i < 3; i++)
513 s->prob->pred8x8c[i] = vp8_rac_get_uint(c, 8);
515 // 17.2 MV probability update
516 for (i = 0; i < 2; i++)
517 for (j = 0; j < 19; j++)
518 if (vp56_rac_get_prob(c, vp8_mv_update_prob[i][j]))
519 s->prob->mvc[i][j] = vp8_rac_get_nn(c);
525 static av_always_inline
526 void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src, int mb_x, int mb_y)
528 #define MARGIN (16 << 2)
529 dst->x = av_clip(src->x, -((mb_x << 6) + MARGIN),
530 ((s->mb_width - 1 - mb_x) << 6) + MARGIN);
531 dst->y = av_clip(src->y, -((mb_y << 6) + MARGIN),
532 ((s->mb_height - 1 - mb_y) << 6) + MARGIN);
535 static av_always_inline
536 void find_near_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
537 VP56mv near[2], VP56mv *best, uint8_t cnt[4])
539 VP8Macroblock *mb_edge[3] = { mb + 2 /* top */,
541 mb + 1 /* top-left */ };
542 enum { EDGE_TOP, EDGE_LEFT, EDGE_TOPLEFT };
543 VP56mv near_mv[4] = {{ 0 }};
544 enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
546 int best_idx = CNT_ZERO;
547 int cur_sign_bias = s->sign_bias[mb->ref_frame];
548 int *sign_bias = s->sign_bias;
550 /* Process MB on top, left and top-left */
551 #define MV_EDGE_CHECK(n)\
553 VP8Macroblock *edge = mb_edge[n];\
554 int edge_ref = edge->ref_frame;\
555 if (edge_ref != VP56_FRAME_CURRENT) {\
556 uint32_t mv = AV_RN32A(&edge->mv);\
558 if (cur_sign_bias != sign_bias[edge_ref]) {\
559 /* SWAR negate of the values in mv. */\
561 mv = ((mv&0x7fff7fff) + 0x00010001) ^ (mv&0x80008000);\
563 if (!n || mv != AV_RN32A(&near_mv[idx]))\
564 AV_WN32A(&near_mv[++idx], mv);\
565 cnt[idx] += 1 + (n != 2);\
567 cnt[CNT_ZERO] += 1 + (n != 2);\
574 /* If we have three distinct MVs, merge first and last if they're the same */
575 if (cnt[CNT_SPLITMV] && AV_RN32A(&near_mv[1+EDGE_TOP]) == AV_RN32A(&near_mv[1+EDGE_TOPLEFT]))
576 cnt[CNT_NEAREST] += 1;
578 cnt[CNT_SPLITMV] = ((mb_edge[EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
579 (mb_edge[EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
580 (mb_edge[EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
582 /* Swap near and nearest if necessary */
583 if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
584 FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
585 FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
588 /* Choose the best mv out of 0,0 and the nearest mv */
589 if (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])
590 best_idx = CNT_NEAREST;
592 mb->mv = near_mv[best_idx];
593 near[0] = near_mv[CNT_NEAREST];
594 near[1] = near_mv[CNT_NEAR];
598 * Motion vector coding, 17.1.
600 static int read_mv_component(VP56RangeCoder *c, const uint8_t *p)
604 if (vp56_rac_get_prob(c, p[0])) {
607 for (i = 0; i < 3; i++)
608 x += vp56_rac_get_prob(c, p[9 + i]) << i;
609 for (i = 9; i > 3; i--)
610 x += vp56_rac_get_prob(c, p[9 + i]) << i;
611 if (!(x & 0xFFF0) || vp56_rac_get_prob(c, p[12]))
614 x = vp8_rac_get_tree(c, vp8_small_mvtree, &p[2]);
616 return (x && vp56_rac_get_prob(c, p[1])) ? -x : x;
619 static av_always_inline
620 const uint8_t *get_submv_prob(uint32_t left, uint32_t top)
623 return vp8_submv_prob[4-!!left];
625 return vp8_submv_prob[2];
626 return vp8_submv_prob[1-!!left];
630 * Split motion vector prediction, 16.4.
631 * @returns the number of motion vectors parsed (2, 4 or 16)
633 static av_always_inline
634 int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb)
636 int part_idx = mb->partitioning =
637 vp8_rac_get_tree(c, vp8_mbsplit_tree, vp8_mbsplit_prob);
638 int n, num = vp8_mbsplit_count[part_idx];
639 VP8Macroblock *top_mb = &mb[2];
640 VP8Macroblock *left_mb = &mb[-1];
641 const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning],
642 *mbsplits_top = vp8_mbsplits[top_mb->partitioning],
643 *mbsplits_cur = vp8_mbsplits[part_idx],
644 *firstidx = vp8_mbfirstidx[part_idx];
645 VP56mv *top_mv = top_mb->bmv;
646 VP56mv *left_mv = left_mb->bmv;
647 VP56mv *cur_mv = mb->bmv;
649 for (n = 0; n < num; n++) {
651 uint32_t left, above;
652 const uint8_t *submv_prob;
655 left = AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
657 left = AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
659 above = AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
661 above = AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
663 submv_prob = get_submv_prob(left, above);
665 switch (vp8_rac_get_tree(c, vp8_submv_ref_tree, submv_prob)) {
666 case VP8_SUBMVMODE_NEW4X4:
667 mb->bmv[n].y = mb->mv.y + read_mv_component(c, s->prob->mvc[0]);
668 mb->bmv[n].x = mb->mv.x + read_mv_component(c, s->prob->mvc[1]);
670 case VP8_SUBMVMODE_ZERO4X4:
671 AV_ZERO32(&mb->bmv[n]);
673 case VP8_SUBMVMODE_LEFT4X4:
674 AV_WN32A(&mb->bmv[n], left);
676 case VP8_SUBMVMODE_TOP4X4:
677 AV_WN32A(&mb->bmv[n], above);
685 static av_always_inline
686 void decode_intra4x4_modes(VP56RangeCoder *c, uint8_t *intra4x4,
687 int stride, int keyframe)
693 for (y = 0; y < 4; y++) {
694 for (x = 0; x < 4; x++) {
695 t = intra4x4[x - stride];
697 ctx = vp8_pred4x4_prob_intra[t][l];
698 intra4x4[x] = vp8_rac_get_tree(c, vp8_pred4x4_tree, ctx);
703 for (i = 0; i < 16; i++)
704 intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree, vp8_pred4x4_prob_inter);
708 static av_always_inline
709 void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
710 uint8_t *intra4x4, uint8_t *segment)
712 VP56RangeCoder *c = &s->c;
714 if (s->segmentation.update_map)
715 *segment = vp8_rac_get_tree(c, vp8_segmentid_tree, s->prob->segmentid);
716 s->segment = *segment;
718 mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0;
721 mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_intra, vp8_pred16x16_prob_intra);
723 if (mb->mode == MODE_I4x4) {
724 decode_intra4x4_modes(c, intra4x4, s->b4_stride, 1);
726 fill_rectangle(intra4x4, 4, 4, s->b4_stride, vp8_pred4x4_mode[mb->mode], 1);
728 s->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, vp8_pred8x8c_prob_intra);
729 mb->ref_frame = VP56_FRAME_CURRENT;
730 } else if (vp56_rac_get_prob(c, s->prob->intra)) {
731 VP56mv near[2], best;
732 uint8_t cnt[4] = { 0 };
736 if (vp56_rac_get_prob(c, s->prob->last))
737 mb->ref_frame = vp56_rac_get_prob(c, s->prob->golden) ?
738 VP56_FRAME_GOLDEN2 /* altref */ : VP56_FRAME_GOLDEN;
740 mb->ref_frame = VP56_FRAME_PREVIOUS;
741 s->ref_count[mb->ref_frame-1]++;
743 // motion vectors, 16.3
744 find_near_mvs(s, mb, mb_x, mb_y, near, &best, cnt);
745 p[0] = vp8_mode_contexts[cnt[0]][0];
746 p[1] = vp8_mode_contexts[cnt[1]][1];
747 p[2] = vp8_mode_contexts[cnt[2]][2];
748 p[3] = vp8_mode_contexts[cnt[3]][3];
749 mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_mvinter, p);
751 case VP8_MVMODE_SPLIT:
752 clamp_mv(s, &mb->mv, &mb->mv, mb_x, mb_y);
753 mb->mv = mb->bmv[decode_splitmvs(s, c, mb) - 1];
755 case VP8_MVMODE_ZERO:
758 case VP8_MVMODE_NEAREST:
759 clamp_mv(s, &mb->mv, &near[0], mb_x, mb_y);
761 case VP8_MVMODE_NEAR:
762 clamp_mv(s, &mb->mv, &near[1], mb_x, mb_y);
765 clamp_mv(s, &mb->mv, &mb->mv, mb_x, mb_y);
766 mb->mv.y += + read_mv_component(c, s->prob->mvc[0]);
767 mb->mv.x += + read_mv_component(c, s->prob->mvc[1]);
770 if (mb->mode != VP8_MVMODE_SPLIT) {
771 mb->partitioning = VP8_SPLITMVMODE_NONE;
776 mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_inter, s->prob->pred16x16);
778 if (mb->mode == MODE_I4x4)
779 decode_intra4x4_modes(c, intra4x4, 4, 0);
781 s->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, s->prob->pred8x8c);
782 mb->ref_frame = VP56_FRAME_CURRENT;
783 mb->partitioning = VP8_SPLITMVMODE_NONE;
784 AV_ZERO32(&mb->bmv[0]);
789 * @param c arithmetic bitstream reader context
790 * @param block destination for block coefficients
791 * @param probs probabilities to use when reading trees from the bitstream
792 * @param i initial coeff index, 0 unless a separate DC block is coded
793 * @param zero_nhood the initial prediction context for number of surrounding
794 * all-zero blocks (only left/top, so 0-2)
795 * @param qmul array holding the dc/ac dequant factor at position 0/1
796 * @return 0 if no coeffs were decoded
797 * otherwise, the index of the last coeff decoded plus one
799 static int decode_block_coeffs(VP56RangeCoder *c, DCTELEM block[16],
800 uint8_t probs[8][3][NUM_DCT_TOKENS-1],
801 int i, int zero_nhood, int16_t qmul[2])
803 int token, nonzero = 0;
806 for (; i < 16; i++) {
807 token = vp8_rac_get_tree_with_offset(c, vp8_coeff_tree, probs[vp8_coeff_band[i]][zero_nhood], offset);
809 if (token == DCT_EOB)
811 else if (token >= DCT_CAT1) {
812 int cat = token-DCT_CAT1;
813 token = vp8_rac_get_coeff(c, vp8_dct_cat_prob[cat]);
814 token += 3 + (2<<cat);
817 // after the first token, the non-zero prediction context becomes
818 // based on the last decoded coeff
823 } else if (token == 1)
828 // todo: full [16] qmat? load into register?
829 block[zigzag_scan[i]] = (vp8_rac_get(c) ? -token : token) * qmul[!!i];
836 static av_always_inline
837 void decode_mb_coeffs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb,
838 uint8_t t_nnz[9], uint8_t l_nnz[9])
840 LOCAL_ALIGNED_16(DCTELEM, dc,[16]);
841 int i, x, y, luma_start = 0, luma_ctx = 3;
842 int nnz_pred, nnz, nnz_total = 0;
843 int segment = s->segment;
845 if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
848 nnz_pred = t_nnz[8] + l_nnz[8];
850 // decode DC values and do hadamard
851 nnz = decode_block_coeffs(c, dc, s->prob->token[1], 0, nnz_pred,
852 s->qmat[segment].luma_dc_qmul);
853 l_nnz[8] = t_nnz[8] = !!nnz;
855 s->vp8dsp.vp8_luma_dc_wht(s->block, dc);
861 for (y = 0; y < 4; y++)
862 for (x = 0; x < 4; x++) {
863 nnz_pred = l_nnz[y] + t_nnz[x];
864 nnz = decode_block_coeffs(c, s->block[y][x], s->prob->token[luma_ctx], luma_start,
865 nnz_pred, s->qmat[segment].luma_qmul);
866 // nnz+luma_start may be one more than the actual last index, but we don't care
867 s->non_zero_count_cache[y][x] = nnz + luma_start;
868 t_nnz[x] = l_nnz[y] = !!nnz;
873 // TODO: what to do about dimensions? 2nd dim for luma is x,
874 // but for chroma it's (y<<1)|x
875 for (i = 4; i < 6; i++)
876 for (y = 0; y < 2; y++)
877 for (x = 0; x < 2; x++) {
878 nnz_pred = l_nnz[i+2*y] + t_nnz[i+2*x];
879 nnz = decode_block_coeffs(c, s->block[i][(y<<1)+x], s->prob->token[2], 0,
880 nnz_pred, s->qmat[segment].chroma_qmul);
881 s->non_zero_count_cache[i][(y<<1)+x] = nnz;
882 t_nnz[i+2*x] = l_nnz[i+2*y] = !!nnz;
886 // if there were no coded coeffs despite the macroblock not being marked skip,
887 // we MUST not do the inner loop filter and should not do IDCT
888 // Since skip isn't used for bitstream prediction, just manually set it.
893 static av_always_inline
894 void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr,
895 int linesize, int uvlinesize, int simple)
897 AV_COPY128(top_border, src_y + 15*linesize);
899 AV_COPY64(top_border+16, src_cb + 7*uvlinesize);
900 AV_COPY64(top_border+24, src_cr + 7*uvlinesize);
904 static av_always_inline
905 void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr,
906 int linesize, int uvlinesize, int mb_x, int mb_y, int mb_width,
907 int simple, int xchg)
909 uint8_t *top_border_m1 = top_border-32; // for TL prediction
911 src_cb -= uvlinesize;
912 src_cr -= uvlinesize;
914 #define XCHG(a,b,xchg) do { \
915 if (xchg) AV_SWAP64(b,a); \
916 else AV_COPY64(b,a); \
919 XCHG(top_border_m1+8, src_y-8, xchg);
920 XCHG(top_border, src_y, xchg);
921 XCHG(top_border+8, src_y+8, 1);
922 if (mb_x < mb_width-1)
923 XCHG(top_border+32, src_y+16, 1);
925 // only copy chroma for normal loop filter
926 // or to initialize the top row to 127
927 if (!simple || !mb_y) {
928 XCHG(top_border_m1+16, src_cb-8, xchg);
929 XCHG(top_border_m1+24, src_cr-8, xchg);
930 XCHG(top_border+16, src_cb, 1);
931 XCHG(top_border+24, src_cr, 1);
935 static av_always_inline
936 int check_intra_pred_mode(int mode, int mb_x, int mb_y)
938 if (mode == DC_PRED8x8) {
940 mode = mb_y ? TOP_DC_PRED8x8 : DC_128_PRED8x8;
942 mode = LEFT_DC_PRED8x8;
948 static av_always_inline
949 void intra_predict(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb,
950 uint8_t *intra4x4, int mb_x, int mb_y)
952 int x, y, mode, nnz, tr;
954 // for the first row, we need to run xchg_mb_border to init the top edge to 127
955 // otherwise, skip it if we aren't going to deblock
956 if (s->deblock_filter || !mb_y)
957 xchg_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2],
958 s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
959 s->filter.simple, 1);
961 if (mb->mode < MODE_I4x4) {
962 mode = check_intra_pred_mode(mb->mode, mb_x, mb_y);
963 s->hpc.pred16x16[mode](dst[0], s->linesize);
965 uint8_t *ptr = dst[0];
966 int stride = s->keyframe ? s->b4_stride : 4;
968 // all blocks on the right edge of the macroblock use bottom edge
969 // the top macroblock for their topright edge
970 uint8_t *tr_right = ptr - s->linesize + 16;
972 // if we're on the right edge of the frame, said edge is extended
973 // from the top macroblock
974 if (mb_x == s->mb_width-1) {
975 tr = tr_right[-1]*0x01010101;
976 tr_right = (uint8_t *)&tr;
980 AV_ZERO128(s->non_zero_count_cache);
982 for (y = 0; y < 4; y++) {
983 uint8_t *topright = ptr + 4 - s->linesize;
984 for (x = 0; x < 4; x++) {
988 s->hpc.pred4x4[intra4x4[x]](ptr+4*x, topright, s->linesize);
990 nnz = s->non_zero_count_cache[y][x];
993 s->vp8dsp.vp8_idct_dc_add(ptr+4*x, s->block[y][x], s->linesize);
995 s->vp8dsp.vp8_idct_add(ptr+4*x, s->block[y][x], s->linesize);
1000 ptr += 4*s->linesize;
1005 mode = check_intra_pred_mode(s->chroma_pred_mode, mb_x, mb_y);
1006 s->hpc.pred8x8[mode](dst[1], s->uvlinesize);
1007 s->hpc.pred8x8[mode](dst[2], s->uvlinesize);
1009 if (s->deblock_filter || !mb_y)
1010 xchg_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2],
1011 s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1012 s->filter.simple, 0);
1016 * Generic MC function.
1018 * @param s VP8 decoding context
1019 * @param luma 1 for luma (Y) planes, 0 for chroma (Cb/Cr) planes
1020 * @param dst target buffer for block data at block position
1021 * @param src reference picture buffer at origin (0, 0)
1022 * @param mv motion vector (relative to block position) to get pixel data from
1023 * @param x_off horizontal position of block from origin (0, 0)
1024 * @param y_off vertical position of block from origin (0, 0)
1025 * @param block_w width of block (16, 8 or 4)
1026 * @param block_h height of block (always same as block_w)
1027 * @param width width of src/dst plane data
1028 * @param height height of src/dst plane data
1029 * @param linesize size of a single line of plane data, including padding
1030 * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1032 static av_always_inline
1033 void vp8_mc(VP8Context *s, int luma,
1034 uint8_t *dst, uint8_t *src, const VP56mv *mv,
1035 int x_off, int y_off, int block_w, int block_h,
1036 int width, int height, int linesize,
1037 vp8_mc_func mc_func[3][3])
1040 static const uint8_t idx[8] = { 0, 1, 2, 1, 2, 1, 2, 1 };
1041 int mx = (mv->x << luma)&7, mx_idx = idx[mx];
1042 int my = (mv->y << luma)&7, my_idx = idx[my];
1044 x_off += mv->x >> (3 - luma);
1045 y_off += mv->y >> (3 - luma);
1048 src += y_off * linesize + x_off;
1049 if (x_off < 2 || x_off >= width - block_w - 3 ||
1050 y_off < 2 || y_off >= height - block_h - 3) {
1051 ff_emulated_edge_mc(s->edge_emu_buffer, src - 2 * linesize - 2, linesize,
1052 block_w + 5, block_h + 5,
1053 x_off - 2, y_off - 2, width, height);
1054 src = s->edge_emu_buffer + 2 + linesize * 2;
1056 mc_func[my_idx][mx_idx](dst, linesize, src, linesize, block_h, mx, my);
1058 mc_func[0][0](dst, linesize, src + y_off * linesize + x_off, linesize, block_h, 0, 0);
1061 static av_always_inline
1062 void vp8_mc_part(VP8Context *s, uint8_t *dst[3],
1063 AVFrame *ref_frame, int x_off, int y_off,
1064 int bx_off, int by_off,
1065 int block_w, int block_h,
1066 int width, int height, VP56mv *mv)
1071 vp8_mc(s, 1, dst[0] + by_off * s->linesize + bx_off,
1072 ref_frame->data[0], mv, x_off + bx_off, y_off + by_off,
1073 block_w, block_h, width, height, s->linesize,
1074 s->put_pixels_tab[block_w == 8]);
1077 if (s->profile == 3) {
1081 x_off >>= 1; y_off >>= 1;
1082 bx_off >>= 1; by_off >>= 1;
1083 width >>= 1; height >>= 1;
1084 block_w >>= 1; block_h >>= 1;
1085 vp8_mc(s, 0, dst[1] + by_off * s->uvlinesize + bx_off,
1086 ref_frame->data[1], &uvmv, x_off + bx_off, y_off + by_off,
1087 block_w, block_h, width, height, s->uvlinesize,
1088 s->put_pixels_tab[1 + (block_w == 4)]);
1089 vp8_mc(s, 0, dst[2] + by_off * s->uvlinesize + bx_off,
1090 ref_frame->data[2], &uvmv, x_off + bx_off, y_off + by_off,
1091 block_w, block_h, width, height, s->uvlinesize,
1092 s->put_pixels_tab[1 + (block_w == 4)]);
1095 /* Fetch pixels for estimated mv 4 macroblocks ahead.
1096 * Optimized for 64-byte cache lines. Inspired by ffh264 prefetch_motion. */
1097 static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
1099 /* Don't prefetch refs that haven't been used very often this frame. */
1100 if (s->ref_count[ref-1] > (mb_xy >> 5)) {
1101 int x_off = mb_x << 4, y_off = mb_y << 4;
1102 int mx = mb->mv.x + x_off + 8;
1103 int my = mb->mv.y + y_off;
1104 uint8_t **src= s->framep[ref]->data;
1105 int off= mx + (my + (mb_x&3)*4)*s->linesize + 64;
1106 s->dsp.prefetch(src[0]+off, s->linesize, 4);
1107 off= (mx>>1) + ((my>>1) + (mb_x&7))*s->uvlinesize + 64;
1108 s->dsp.prefetch(src[1]+off, src[2]-src[1], 2);
1113 * Apply motion vectors to prediction buffer, chapter 18.
1115 static av_always_inline
1116 void inter_predict(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb,
1119 int x_off = mb_x << 4, y_off = mb_y << 4;
1120 int width = 16*s->mb_width, height = 16*s->mb_height;
1121 AVFrame *ref = s->framep[mb->ref_frame];
1122 VP56mv *bmv = mb->bmv;
1124 if (mb->mode < VP8_MVMODE_SPLIT) {
1125 vp8_mc_part(s, dst, ref, x_off, y_off,
1126 0, 0, 16, 16, width, height, &mb->mv);
1127 } else switch (mb->partitioning) {
1128 case VP8_SPLITMVMODE_4x4: {
1133 for (y = 0; y < 4; y++) {
1134 for (x = 0; x < 4; x++) {
1135 vp8_mc(s, 1, dst[0] + 4*y*s->linesize + x*4,
1136 ref->data[0], &bmv[4*y + x],
1137 4*x + x_off, 4*y + y_off, 4, 4,
1138 width, height, s->linesize,
1139 s->put_pixels_tab[2]);
1144 x_off >>= 1; y_off >>= 1; width >>= 1; height >>= 1;
1145 for (y = 0; y < 2; y++) {
1146 for (x = 0; x < 2; x++) {
1147 uvmv.x = mb->bmv[ 2*y * 4 + 2*x ].x +
1148 mb->bmv[ 2*y * 4 + 2*x+1].x +
1149 mb->bmv[(2*y+1) * 4 + 2*x ].x +
1150 mb->bmv[(2*y+1) * 4 + 2*x+1].x;
1151 uvmv.y = mb->bmv[ 2*y * 4 + 2*x ].y +
1152 mb->bmv[ 2*y * 4 + 2*x+1].y +
1153 mb->bmv[(2*y+1) * 4 + 2*x ].y +
1154 mb->bmv[(2*y+1) * 4 + 2*x+1].y;
1155 uvmv.x = (uvmv.x + 2 + (uvmv.x >> (INT_BIT-1))) >> 2;
1156 uvmv.y = (uvmv.y + 2 + (uvmv.y >> (INT_BIT-1))) >> 2;
1157 if (s->profile == 3) {
1161 vp8_mc(s, 0, dst[1] + 4*y*s->uvlinesize + x*4,
1162 ref->data[1], &uvmv,
1163 4*x + x_off, 4*y + y_off, 4, 4,
1164 width, height, s->uvlinesize,
1165 s->put_pixels_tab[2]);
1166 vp8_mc(s, 0, dst[2] + 4*y*s->uvlinesize + x*4,
1167 ref->data[2], &uvmv,
1168 4*x + x_off, 4*y + y_off, 4, 4,
1169 width, height, s->uvlinesize,
1170 s->put_pixels_tab[2]);
1175 case VP8_SPLITMVMODE_16x8:
1176 vp8_mc_part(s, dst, ref, x_off, y_off,
1177 0, 0, 16, 8, width, height, &bmv[0]);
1178 vp8_mc_part(s, dst, ref, x_off, y_off,
1179 0, 8, 16, 8, width, height, &bmv[1]);
1181 case VP8_SPLITMVMODE_8x16:
1182 vp8_mc_part(s, dst, ref, x_off, y_off,
1183 0, 0, 8, 16, width, height, &bmv[0]);
1184 vp8_mc_part(s, dst, ref, x_off, y_off,
1185 8, 0, 8, 16, width, height, &bmv[1]);
1187 case VP8_SPLITMVMODE_8x8:
1188 vp8_mc_part(s, dst, ref, x_off, y_off,
1189 0, 0, 8, 8, width, height, &bmv[0]);
1190 vp8_mc_part(s, dst, ref, x_off, y_off,
1191 8, 0, 8, 8, width, height, &bmv[1]);
1192 vp8_mc_part(s, dst, ref, x_off, y_off,
1193 0, 8, 8, 8, width, height, &bmv[2]);
1194 vp8_mc_part(s, dst, ref, x_off, y_off,
1195 8, 8, 8, 8, width, height, &bmv[3]);
1200 static av_always_inline void idct_mb(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb)
1204 if (mb->mode != MODE_I4x4) {
1205 uint8_t *y_dst = dst[0];
1206 for (y = 0; y < 4; y++) {
1207 uint32_t nnz4 = AV_RN32A(s->non_zero_count_cache[y]);
1209 if (nnz4&~0x01010101) {
1210 for (x = 0; x < 4; x++) {
1211 int nnz = s->non_zero_count_cache[y][x];
1214 s->vp8dsp.vp8_idct_dc_add(y_dst+4*x, s->block[y][x], s->linesize);
1216 s->vp8dsp.vp8_idct_add(y_dst+4*x, s->block[y][x], s->linesize);
1220 s->vp8dsp.vp8_idct_dc_add4y(y_dst, s->block[y], s->linesize);
1223 y_dst += 4*s->linesize;
1227 for (ch = 0; ch < 2; ch++) {
1228 uint32_t nnz4 = AV_RN32A(s->non_zero_count_cache[4+ch]);
1230 uint8_t *ch_dst = dst[1+ch];
1231 if (nnz4&~0x01010101) {
1232 for (y = 0; y < 2; y++) {
1233 for (x = 0; x < 2; x++) {
1234 int nnz = s->non_zero_count_cache[4+ch][(y<<1)+x];
1237 s->vp8dsp.vp8_idct_dc_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize);
1239 s->vp8dsp.vp8_idct_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize);
1242 ch_dst += 4*s->uvlinesize;
1245 s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, s->block[4+ch], s->uvlinesize);
1251 static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f )
1253 int interior_limit, filter_level;
1255 if (s->segmentation.enabled) {
1256 filter_level = s->segmentation.filter_level[s->segment];
1257 if (!s->segmentation.absolute_vals)
1258 filter_level += s->filter.level;
1260 filter_level = s->filter.level;
1262 if (s->lf_delta.enabled) {
1263 filter_level += s->lf_delta.ref[mb->ref_frame];
1265 if (mb->ref_frame == VP56_FRAME_CURRENT) {
1266 if (mb->mode == MODE_I4x4)
1267 filter_level += s->lf_delta.mode[0];
1269 if (mb->mode == VP8_MVMODE_ZERO)
1270 filter_level += s->lf_delta.mode[1];
1271 else if (mb->mode == VP8_MVMODE_SPLIT)
1272 filter_level += s->lf_delta.mode[3];
1274 filter_level += s->lf_delta.mode[2];
1277 filter_level = av_clip(filter_level, 0, 63);
1279 interior_limit = filter_level;
1280 if (s->filter.sharpness) {
1281 interior_limit >>= s->filter.sharpness > 4 ? 2 : 1;
1282 interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
1284 interior_limit = FFMAX(interior_limit, 1);
1286 f->filter_level = filter_level;
1287 f->inner_limit = interior_limit;
1288 f->inner_filter = !mb->skip || mb->mode == MODE_I4x4 || mb->mode == VP8_MVMODE_SPLIT;
1291 static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y)
1293 int mbedge_lim, bedge_lim, hev_thresh;
1294 int filter_level = f->filter_level;
1295 int inner_limit = f->inner_limit;
1296 int inner_filter = f->inner_filter;
1297 int linesize = s->linesize;
1298 int uvlinesize = s->uvlinesize;
1303 mbedge_lim = 2*(filter_level+2) + inner_limit;
1304 bedge_lim = 2* filter_level + inner_limit;
1305 hev_thresh = filter_level >= 15;
1308 if (filter_level >= 40)
1311 if (filter_level >= 40)
1313 else if (filter_level >= 20)
1318 s->vp8dsp.vp8_h_loop_filter16y(dst[0], linesize,
1319 mbedge_lim, inner_limit, hev_thresh);
1320 s->vp8dsp.vp8_h_loop_filter8uv(dst[1], dst[2], uvlinesize,
1321 mbedge_lim, inner_limit, hev_thresh);
1325 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+ 4, linesize, bedge_lim,
1326 inner_limit, hev_thresh);
1327 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+ 8, linesize, bedge_lim,
1328 inner_limit, hev_thresh);
1329 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+12, linesize, bedge_lim,
1330 inner_limit, hev_thresh);
1331 s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4,
1332 uvlinesize, bedge_lim,
1333 inner_limit, hev_thresh);
1337 s->vp8dsp.vp8_v_loop_filter16y(dst[0], linesize,
1338 mbedge_lim, inner_limit, hev_thresh);
1339 s->vp8dsp.vp8_v_loop_filter8uv(dst[1], dst[2], uvlinesize,
1340 mbedge_lim, inner_limit, hev_thresh);
1344 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+ 4*linesize,
1345 linesize, bedge_lim,
1346 inner_limit, hev_thresh);
1347 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+ 8*linesize,
1348 linesize, bedge_lim,
1349 inner_limit, hev_thresh);
1350 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+12*linesize,
1351 linesize, bedge_lim,
1352 inner_limit, hev_thresh);
1353 s->vp8dsp.vp8_v_loop_filter8uv_inner(dst[1] + 4 * uvlinesize,
1354 dst[2] + 4 * uvlinesize,
1355 uvlinesize, bedge_lim,
1356 inner_limit, hev_thresh);
1360 static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
1362 int mbedge_lim, bedge_lim;
1363 int filter_level = f->filter_level;
1364 int inner_limit = f->inner_limit;
1365 int inner_filter = f->inner_filter;
1366 int linesize = s->linesize;
1371 mbedge_lim = 2*(filter_level+2) + inner_limit;
1372 bedge_lim = 2* filter_level + inner_limit;
1375 s->vp8dsp.vp8_h_loop_filter_simple(dst, linesize, mbedge_lim);
1377 s->vp8dsp.vp8_h_loop_filter_simple(dst+ 4, linesize, bedge_lim);
1378 s->vp8dsp.vp8_h_loop_filter_simple(dst+ 8, linesize, bedge_lim);
1379 s->vp8dsp.vp8_h_loop_filter_simple(dst+12, linesize, bedge_lim);
1383 s->vp8dsp.vp8_v_loop_filter_simple(dst, linesize, mbedge_lim);
1385 s->vp8dsp.vp8_v_loop_filter_simple(dst+ 4*linesize, linesize, bedge_lim);
1386 s->vp8dsp.vp8_v_loop_filter_simple(dst+ 8*linesize, linesize, bedge_lim);
1387 s->vp8dsp.vp8_v_loop_filter_simple(dst+12*linesize, linesize, bedge_lim);
1391 static void filter_mb_row(VP8Context *s, int mb_y)
1393 VP8FilterStrength *f = s->filter_strength;
1395 s->framep[VP56_FRAME_CURRENT]->data[0] + 16*mb_y*s->linesize,
1396 s->framep[VP56_FRAME_CURRENT]->data[1] + 8*mb_y*s->uvlinesize,
1397 s->framep[VP56_FRAME_CURRENT]->data[2] + 8*mb_y*s->uvlinesize
1401 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1402 backup_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2], s->linesize, s->uvlinesize, 0);
1403 filter_mb(s, dst, f++, mb_x, mb_y);
1410 static void filter_mb_row_simple(VP8Context *s, int mb_y)
1412 VP8FilterStrength *f = s->filter_strength;
1413 uint8_t *dst = s->framep[VP56_FRAME_CURRENT]->data[0] + 16*mb_y*s->linesize;
1416 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1417 backup_mb_border(s->top_border[mb_x+1], dst, NULL, NULL, s->linesize, 0, 1);
1418 filter_mb_simple(s, dst, f++, mb_x, mb_y);
1423 static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
1426 VP8Context *s = avctx->priv_data;
1427 int ret, mb_x, mb_y, i, y, referenced;
1428 enum AVDiscard skip_thresh;
1429 AVFrame *curframe = NULL;
1431 if ((ret = decode_frame_header(s, avpkt->data, avpkt->size)) < 0)
1434 referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT
1435 || s->update_altref == VP56_FRAME_CURRENT;
1437 skip_thresh = !referenced ? AVDISCARD_NONREF :
1438 !s->keyframe ? AVDISCARD_NONKEY : AVDISCARD_ALL;
1440 if (avctx->skip_frame >= skip_thresh) {
1444 s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
1446 for (i = 0; i < 4; i++)
1447 if (&s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
1448 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
1449 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
1450 curframe = s->framep[VP56_FRAME_CURRENT] = &s->frames[i];
1453 if (curframe->data[0])
1454 avctx->release_buffer(avctx, curframe);
1456 curframe->key_frame = s->keyframe;
1457 curframe->pict_type = s->keyframe ? FF_I_TYPE : FF_P_TYPE;
1458 curframe->reference = referenced ? 3 : 0;
1459 if ((ret = avctx->get_buffer(avctx, curframe))) {
1460 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed!\n");
1464 // Given that arithmetic probabilities are updated every frame, it's quite likely
1465 // that the values we have on a random interframe are complete junk if we didn't
1466 // start decode on a keyframe. So just don't display anything rather than junk.
1467 if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
1468 !s->framep[VP56_FRAME_GOLDEN] ||
1469 !s->framep[VP56_FRAME_GOLDEN2])) {
1470 av_log(avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n");
1471 return AVERROR_INVALIDDATA;
1474 s->linesize = curframe->linesize[0];
1475 s->uvlinesize = curframe->linesize[1];
1477 if (!s->edge_emu_buffer)
1478 s->edge_emu_buffer = av_malloc(21*s->linesize);
1480 memset(s->top_nnz, 0, s->mb_width*sizeof(*s->top_nnz));
1482 /* Zero macroblock structures for top/left prediction from outside the frame. */
1483 memset(s->macroblocks, 0, (s->mb_width + s->mb_height*2)*sizeof(*s->macroblocks));
1485 // top edge of 127 for intra prediction
1486 memset(s->top_border, 127, (s->mb_width+1)*sizeof(*s->top_border));
1487 memset(s->ref_count, 0, sizeof(s->ref_count));
1489 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1490 VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions-1)];
1491 VP8Macroblock *mb = s->macroblocks + (s->mb_height - mb_y - 1)*2;
1492 uint8_t *intra4x4 = s->intra4x4_pred_mode + 4*mb_y*s->b4_stride;
1493 uint8_t *segment_map = s->segmentation_map + mb_y*s->mb_stride;
1494 int mb_xy = mb_y * s->mb_stride;
1496 curframe->data[0] + 16*mb_y*s->linesize,
1497 curframe->data[1] + 8*mb_y*s->uvlinesize,
1498 curframe->data[2] + 8*mb_y*s->uvlinesize
1501 memset(s->left_nnz, 0, sizeof(s->left_nnz));
1503 // left edge of 129 for intra prediction
1504 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE))
1505 for (i = 0; i < 3; i++)
1506 for (y = 0; y < 16>>!!i; y++)
1507 dst[i][y*curframe->linesize[i]-1] = 129;
1509 memset(s->top_border, 129, sizeof(*s->top_border));
1511 for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
1512 uint8_t *intra4x4_mb = s->keyframe ? intra4x4 + 4*mb_x : s->intra4x4_pred_mode_mb;
1513 uint8_t *segment_mb = segment_map+mb_x;
1515 /* Prefetch the current frame, 4 MBs ahead */
1516 s->dsp.prefetch(dst[0] + (mb_x&3)*4*s->linesize + 64, s->linesize, 4);
1517 s->dsp.prefetch(dst[1] + (mb_x&7)*s->uvlinesize + 64, dst[2] - dst[1], 2);
1519 decode_mb_mode(s, mb, mb_x, mb_y, intra4x4_mb, segment_mb);
1521 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
1524 decode_mb_coeffs(s, c, mb, s->top_nnz[mb_x], s->left_nnz);
1526 if (mb->mode <= MODE_I4x4)
1527 intra_predict(s, dst, mb, intra4x4_mb, mb_x, mb_y);
1529 inter_predict(s, dst, mb, mb_x, mb_y);
1531 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
1534 idct_mb(s, dst, mb);
1536 AV_ZERO64(s->left_nnz);
1537 AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned
1539 // Reset DC block predictors if they would exist if the mb had coefficients
1540 if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
1542 s->top_nnz[mb_x][8] = 0;
1546 if (s->deblock_filter)
1547 filter_level_for_mb(s, mb, &s->filter_strength[mb_x]);
1549 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
1555 if (s->deblock_filter) {
1556 if (s->filter.simple)
1557 filter_mb_row_simple(s, mb_y);
1559 filter_mb_row(s, mb_y);
1564 // if future frames don't use the updated probabilities,
1565 // reset them to the values we saved
1566 if (!s->update_probabilities)
1567 s->prob[0] = s->prob[1];
1569 // check if golden and altref are swapped
1570 if (s->update_altref == VP56_FRAME_GOLDEN &&
1571 s->update_golden == VP56_FRAME_GOLDEN2)
1572 FFSWAP(AVFrame *, s->framep[VP56_FRAME_GOLDEN], s->framep[VP56_FRAME_GOLDEN2]);
1574 if (s->update_altref != VP56_FRAME_NONE)
1575 s->framep[VP56_FRAME_GOLDEN2] = s->framep[s->update_altref];
1577 if (s->update_golden != VP56_FRAME_NONE)
1578 s->framep[VP56_FRAME_GOLDEN] = s->framep[s->update_golden];
1581 if (s->update_last) // move cur->prev
1582 s->framep[VP56_FRAME_PREVIOUS] = s->framep[VP56_FRAME_CURRENT];
1584 // release no longer referenced frames
1585 for (i = 0; i < 4; i++)
1586 if (s->frames[i].data[0] &&
1587 &s->frames[i] != s->framep[VP56_FRAME_CURRENT] &&
1588 &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
1589 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
1590 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
1591 avctx->release_buffer(avctx, &s->frames[i]);
1593 if (!s->invisible) {
1594 *(AVFrame*)data = *s->framep[VP56_FRAME_CURRENT];
1595 *data_size = sizeof(AVFrame);
1601 static av_cold int vp8_decode_init(AVCodecContext *avctx)
1603 VP8Context *s = avctx->priv_data;
1606 avctx->pix_fmt = PIX_FMT_YUV420P;
1608 dsputil_init(&s->dsp, avctx);
1609 ff_h264_pred_init(&s->hpc, CODEC_ID_VP8);
1610 ff_vp8dsp_init(&s->vp8dsp);
1612 // intra pred needs edge emulation among other things
1613 if (avctx->flags&CODEC_FLAG_EMU_EDGE) {
1614 av_log(avctx, AV_LOG_ERROR, "Edge emulation not supported\n");
1615 return AVERROR_PATCHWELCOME;
1621 static av_cold int vp8_decode_free(AVCodecContext *avctx)
1623 vp8_decode_flush(avctx);
1627 AVCodec vp8_decoder = {
1637 .flush = vp8_decode_flush,
1638 .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),