2 * VP8 compatible video decoder
4 * Copyright (C) 2010 David Conrad
5 * Copyright (C) 2010 Ronald S. Bultje
6 * Copyright (C) 2010 Jason Garrett-Glaser
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "libavcore/imgutils.h"
31 #include "rectangle.h"
41 // todo: make it possible to check for at least (i4x4 or split_mv)
42 // in one op. are others needed?
51 AVCodecContext *avctx;
55 vp8_mc_func put_pixels_tab[3][3][3];
58 uint8_t *edge_emu_buffer;
59 VP56RangeCoder c; ///< header context, includes mb modes and motion vectors
62 int mb_width; /* number of horizontal MB */
63 int mb_height; /* number of vertical MB */
69 int update_last; ///< update VP56_FRAME_PREVIOUS with the current one
70 int update_golden; ///< VP56_FRAME_NONE if not updated, or which frame to copy if so
75 * If this flag is not set, all the probability updates
76 * are discarded after this frame is decoded.
78 int update_probabilities;
81 * All coefficients are contained in separate arith coding contexts.
82 * There can be 1, 2, 4, or 8 of these after the header context.
84 int num_coeff_partitions;
85 VP56RangeCoder coeff_partition[8];
87 VP8Macroblock *macroblocks;
88 VP8Macroblock *macroblocks_base;
89 VP8FilterStrength *filter_strength;
91 uint8_t *intra4x4_pred_mode_top;
92 uint8_t intra4x4_pred_mode_left[4];
93 uint8_t *segmentation_map;
96 * Cache of the top row needed for intra prediction
97 * 16 for luma, 8 for each chroma plane
99 uint8_t (*top_border)[16+8+8];
102 * For coeff decode, we need to know whether the above block had non-zero
103 * coefficients. This means for each macroblock, we need data for 4 luma
104 * blocks, 2 u blocks, 2 v blocks, and the luma dc block, for a total of 9
105 * per macroblock. We keep the last row in top_nnz.
107 uint8_t (*top_nnz)[9];
108 DECLARE_ALIGNED(8, uint8_t, left_nnz)[9];
111 * This is the index plus one of the last non-zero coeff
112 * for each of the blocks in the current macroblock.
114 * 1 -> dc-only (special transform)
115 * 2+-> full transform
117 DECLARE_ALIGNED(16, uint8_t, non_zero_count_cache)[6][4];
118 DECLARE_ALIGNED(16, DCTELEM, block)[6][4][16];
119 DECLARE_ALIGNED(16, DCTELEM, block_dc)[16];
120 uint8_t intra4x4_pred_mode_mb[16];
122 int chroma_pred_mode; ///< 8x8c pred mode of the current macroblock
123 int segment; ///< segment of the current macroblock
126 int sign_bias[4]; ///< one state [0, 1] per ref frame type
130 * Base parameters for segmentation, i.e. per-macroblock parameters.
131 * These must be kept unchanged even if segmentation is not used for
132 * a frame, since the values persist between interframes.
138 int8_t base_quant[4];
139 int8_t filter_level[4]; ///< base loop filter level
143 * Macroblocks can have one of 4 different quants in a frame when
144 * segmentation is enabled.
145 * If segmentation is disabled, only the first segment's values are used.
148 // [0] - DC qmul [1] - AC qmul
149 int16_t luma_qmul[2];
150 int16_t luma_dc_qmul[2]; ///< luma dc-only block quant
151 int16_t chroma_qmul[2];
161 int enabled; ///< whether each mb can have a different strength based on mode/ref
164 * filter strength adjustment for the following macroblock modes:
165 * [0-3] - i16x16 (always zero)
168 * [6] - inter modes except for zero or split mv
170 * i16x16 modes never have any adjustment
172 int8_t mode[VP8_MVMODE_SPLIT+1];
175 * filter strength adjustment for macroblocks that reference:
176 * [0] - intra / VP56_FRAME_CURRENT
177 * [1] - VP56_FRAME_PREVIOUS
178 * [2] - VP56_FRAME_GOLDEN
179 * [3] - altref / VP56_FRAME_GOLDEN2
185 * These are all of the updatable probabilities for binary decisions.
186 * They are only implictly reset on keyframes, making it quite likely
187 * for an interframe to desync if a prior frame's header was corrupt
188 * or missing outright!
191 uint8_t segmentid[3];
196 uint8_t pred16x16[4];
198 /* Padded to allow overreads */
199 uint8_t token[4][17][3][NUM_DCT_TOKENS-1];
204 static void vp8_decode_flush(AVCodecContext *avctx)
206 VP8Context *s = avctx->priv_data;
209 for (i = 0; i < 4; i++)
210 if (s->frames[i].data[0])
211 avctx->release_buffer(avctx, &s->frames[i]);
212 memset(s->framep, 0, sizeof(s->framep));
214 av_freep(&s->macroblocks_base);
215 av_freep(&s->filter_strength);
216 av_freep(&s->intra4x4_pred_mode_top);
217 av_freep(&s->top_nnz);
218 av_freep(&s->edge_emu_buffer);
219 av_freep(&s->top_border);
220 av_freep(&s->segmentation_map);
222 s->macroblocks = NULL;
225 static int update_dimensions(VP8Context *s, int width, int height)
227 if (av_image_check_size(width, height, 0, s->avctx))
228 return AVERROR_INVALIDDATA;
230 vp8_decode_flush(s->avctx);
232 avcodec_set_dimensions(s->avctx, width, height);
234 s->mb_width = (s->avctx->coded_width +15) / 16;
235 s->mb_height = (s->avctx->coded_height+15) / 16;
237 s->macroblocks_base = av_mallocz((s->mb_width+s->mb_height*2+1)*sizeof(*s->macroblocks));
238 s->filter_strength = av_mallocz(s->mb_width*sizeof(*s->filter_strength));
239 s->intra4x4_pred_mode_top = av_mallocz(s->mb_width*4);
240 s->top_nnz = av_mallocz(s->mb_width*sizeof(*s->top_nnz));
241 s->top_border = av_mallocz((s->mb_width+1)*sizeof(*s->top_border));
242 s->segmentation_map = av_mallocz(s->mb_width*s->mb_height);
244 if (!s->macroblocks_base || !s->filter_strength || !s->intra4x4_pred_mode_top ||
245 !s->top_nnz || !s->top_border || !s->segmentation_map)
246 return AVERROR(ENOMEM);
248 s->macroblocks = s->macroblocks_base + 1;
253 static void parse_segment_info(VP8Context *s)
255 VP56RangeCoder *c = &s->c;
258 s->segmentation.update_map = vp8_rac_get(c);
260 if (vp8_rac_get(c)) { // update segment feature data
261 s->segmentation.absolute_vals = vp8_rac_get(c);
263 for (i = 0; i < 4; i++)
264 s->segmentation.base_quant[i] = vp8_rac_get_sint(c, 7);
266 for (i = 0; i < 4; i++)
267 s->segmentation.filter_level[i] = vp8_rac_get_sint(c, 6);
269 if (s->segmentation.update_map)
270 for (i = 0; i < 3; i++)
271 s->prob->segmentid[i] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
274 static void update_lf_deltas(VP8Context *s)
276 VP56RangeCoder *c = &s->c;
279 for (i = 0; i < 4; i++)
280 s->lf_delta.ref[i] = vp8_rac_get_sint(c, 6);
282 for (i = MODE_I4x4; i <= VP8_MVMODE_SPLIT; i++)
283 s->lf_delta.mode[i] = vp8_rac_get_sint(c, 6);
286 static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
288 const uint8_t *sizes = buf;
291 s->num_coeff_partitions = 1 << vp8_rac_get_uint(&s->c, 2);
293 buf += 3*(s->num_coeff_partitions-1);
294 buf_size -= 3*(s->num_coeff_partitions-1);
298 for (i = 0; i < s->num_coeff_partitions-1; i++) {
299 int size = AV_RL24(sizes + 3*i);
300 if (buf_size - size < 0)
303 ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, size);
307 ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
312 static void get_quants(VP8Context *s)
314 VP56RangeCoder *c = &s->c;
317 int yac_qi = vp8_rac_get_uint(c, 7);
318 int ydc_delta = vp8_rac_get_sint(c, 4);
319 int y2dc_delta = vp8_rac_get_sint(c, 4);
320 int y2ac_delta = vp8_rac_get_sint(c, 4);
321 int uvdc_delta = vp8_rac_get_sint(c, 4);
322 int uvac_delta = vp8_rac_get_sint(c, 4);
324 for (i = 0; i < 4; i++) {
325 if (s->segmentation.enabled) {
326 base_qi = s->segmentation.base_quant[i];
327 if (!s->segmentation.absolute_vals)
332 s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip(base_qi + ydc_delta , 0, 127)];
333 s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip(base_qi , 0, 127)];
334 s->qmat[i].luma_dc_qmul[0] = 2 * vp8_dc_qlookup[av_clip(base_qi + y2dc_delta, 0, 127)];
335 s->qmat[i].luma_dc_qmul[1] = 155 * vp8_ac_qlookup[av_clip(base_qi + y2ac_delta, 0, 127)] / 100;
336 s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip(base_qi + uvdc_delta, 0, 127)];
337 s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip(base_qi + uvac_delta, 0, 127)];
339 s->qmat[i].luma_dc_qmul[1] = FFMAX(s->qmat[i].luma_dc_qmul[1], 8);
340 s->qmat[i].chroma_qmul[0] = FFMIN(s->qmat[i].chroma_qmul[0], 132);
345 * Determine which buffers golden and altref should be updated with after this frame.
346 * The spec isn't clear here, so I'm going by my understanding of what libvpx does
348 * Intra frames update all 3 references
349 * Inter frames update VP56_FRAME_PREVIOUS if the update_last flag is set
350 * If the update (golden|altref) flag is set, it's updated with the current frame
351 * if update_last is set, and VP56_FRAME_PREVIOUS otherwise.
352 * If the flag is not set, the number read means:
354 * 1: VP56_FRAME_PREVIOUS
355 * 2: update golden with altref, or update altref with golden
357 static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
359 VP56RangeCoder *c = &s->c;
362 return VP56_FRAME_CURRENT;
364 switch (vp8_rac_get_uint(c, 2)) {
366 return VP56_FRAME_PREVIOUS;
368 return (ref == VP56_FRAME_GOLDEN) ? VP56_FRAME_GOLDEN2 : VP56_FRAME_GOLDEN;
370 return VP56_FRAME_NONE;
373 static void update_refs(VP8Context *s)
375 VP56RangeCoder *c = &s->c;
377 int update_golden = vp8_rac_get(c);
378 int update_altref = vp8_rac_get(c);
380 s->update_golden = ref_to_update(s, update_golden, VP56_FRAME_GOLDEN);
381 s->update_altref = ref_to_update(s, update_altref, VP56_FRAME_GOLDEN2);
384 static int decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
386 VP56RangeCoder *c = &s->c;
387 int header_size, hscale, vscale, i, j, k, l, m, ret;
388 int width = s->avctx->width;
389 int height = s->avctx->height;
391 s->keyframe = !(buf[0] & 1);
392 s->profile = (buf[0]>>1) & 7;
393 s->invisible = !(buf[0] & 0x10);
394 header_size = AV_RL24(buf) >> 5;
399 av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
402 memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
403 else // profile 1-3 use bilinear, 4+ aren't defined so whatever
404 memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_bilinear_pixels_tab, sizeof(s->put_pixels_tab));
406 if (header_size > buf_size - 7*s->keyframe) {
407 av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
408 return AVERROR_INVALIDDATA;
412 if (AV_RL24(buf) != 0x2a019d) {
413 av_log(s->avctx, AV_LOG_ERROR, "Invalid start code 0x%x\n", AV_RL24(buf));
414 return AVERROR_INVALIDDATA;
416 width = AV_RL16(buf+3) & 0x3fff;
417 height = AV_RL16(buf+5) & 0x3fff;
418 hscale = buf[4] >> 6;
419 vscale = buf[6] >> 6;
423 if (hscale || vscale)
424 av_log_missing_feature(s->avctx, "Upscaling", 1);
426 s->update_golden = s->update_altref = VP56_FRAME_CURRENT;
427 for (i = 0; i < 4; i++)
428 for (j = 0; j < 16; j++)
429 memcpy(s->prob->token[i][j], vp8_token_default_probs[i][vp8_coeff_band[j]],
430 sizeof(s->prob->token[i][j]));
431 memcpy(s->prob->pred16x16, vp8_pred16x16_prob_inter, sizeof(s->prob->pred16x16));
432 memcpy(s->prob->pred8x8c , vp8_pred8x8c_prob_inter , sizeof(s->prob->pred8x8c));
433 memcpy(s->prob->mvc , vp8_mv_default_prob , sizeof(s->prob->mvc));
434 memset(&s->segmentation, 0, sizeof(s->segmentation));
437 if (!s->macroblocks_base || /* first frame */
438 width != s->avctx->width || height != s->avctx->height) {
439 if ((ret = update_dimensions(s, width, height) < 0))
443 ff_vp56_init_range_decoder(c, buf, header_size);
445 buf_size -= header_size;
449 av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
450 vp8_rac_get(c); // whether we can skip clamping in dsp functions
453 if ((s->segmentation.enabled = vp8_rac_get(c)))
454 parse_segment_info(s);
456 s->segmentation.update_map = 0; // FIXME: move this to some init function?
458 s->filter.simple = vp8_rac_get(c);
459 s->filter.level = vp8_rac_get_uint(c, 6);
460 s->filter.sharpness = vp8_rac_get_uint(c, 3);
462 if ((s->lf_delta.enabled = vp8_rac_get(c)))
466 if (setup_partitions(s, buf, buf_size)) {
467 av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
468 return AVERROR_INVALIDDATA;
475 s->sign_bias[VP56_FRAME_GOLDEN] = vp8_rac_get(c);
476 s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp8_rac_get(c);
479 // if we aren't saving this frame's probabilities for future frames,
480 // make a copy of the current probabilities
481 if (!(s->update_probabilities = vp8_rac_get(c)))
482 s->prob[1] = s->prob[0];
484 s->update_last = s->keyframe || vp8_rac_get(c);
486 for (i = 0; i < 4; i++)
487 for (j = 0; j < 8; j++)
488 for (k = 0; k < 3; k++)
489 for (l = 0; l < NUM_DCT_TOKENS-1; l++)
490 if (vp56_rac_get_prob_branchy(c, vp8_token_update_probs[i][j][k][l])) {
491 int prob = vp8_rac_get_uint(c, 8);
492 for (m = 0; vp8_coeff_band_indexes[j][m] >= 0; m++)
493 s->prob->token[i][vp8_coeff_band_indexes[j][m]][k][l] = prob;
496 if ((s->mbskip_enabled = vp8_rac_get(c)))
497 s->prob->mbskip = vp8_rac_get_uint(c, 8);
500 s->prob->intra = vp8_rac_get_uint(c, 8);
501 s->prob->last = vp8_rac_get_uint(c, 8);
502 s->prob->golden = vp8_rac_get_uint(c, 8);
505 for (i = 0; i < 4; i++)
506 s->prob->pred16x16[i] = vp8_rac_get_uint(c, 8);
508 for (i = 0; i < 3; i++)
509 s->prob->pred8x8c[i] = vp8_rac_get_uint(c, 8);
511 // 17.2 MV probability update
512 for (i = 0; i < 2; i++)
513 for (j = 0; j < 19; j++)
514 if (vp56_rac_get_prob_branchy(c, vp8_mv_update_prob[i][j]))
515 s->prob->mvc[i][j] = vp8_rac_get_nn(c);
521 static av_always_inline
522 void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src, int mb_x, int mb_y)
524 #define MARGIN (16 << 2)
525 dst->x = av_clip(src->x, -((mb_x << 6) + MARGIN),
526 ((s->mb_width - 1 - mb_x) << 6) + MARGIN);
527 dst->y = av_clip(src->y, -((mb_y << 6) + MARGIN),
528 ((s->mb_height - 1 - mb_y) << 6) + MARGIN);
531 static av_always_inline
532 void find_near_mvs(VP8Context *s, VP8Macroblock *mb,
533 VP56mv near[2], VP56mv *best, uint8_t cnt[4])
535 VP8Macroblock *mb_edge[3] = { mb + 2 /* top */,
537 mb + 1 /* top-left */ };
538 enum { EDGE_TOP, EDGE_LEFT, EDGE_TOPLEFT };
539 VP56mv near_mv[4] = {{ 0 }};
540 enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
542 int best_idx = CNT_ZERO;
543 int cur_sign_bias = s->sign_bias[mb->ref_frame];
544 int *sign_bias = s->sign_bias;
546 /* Process MB on top, left and top-left */
547 #define MV_EDGE_CHECK(n)\
549 VP8Macroblock *edge = mb_edge[n];\
550 int edge_ref = edge->ref_frame;\
551 if (edge_ref != VP56_FRAME_CURRENT) {\
552 uint32_t mv = AV_RN32A(&edge->mv);\
554 if (cur_sign_bias != sign_bias[edge_ref]) {\
555 /* SWAR negate of the values in mv. */\
557 mv = ((mv&0x7fff7fff) + 0x00010001) ^ (mv&0x80008000);\
559 if (!n || mv != AV_RN32A(&near_mv[idx]))\
560 AV_WN32A(&near_mv[++idx], mv);\
561 cnt[idx] += 1 + (n != 2);\
563 cnt[CNT_ZERO] += 1 + (n != 2);\
570 /* If we have three distinct MVs, merge first and last if they're the same */
571 if (cnt[CNT_SPLITMV] && AV_RN32A(&near_mv[1+EDGE_TOP]) == AV_RN32A(&near_mv[1+EDGE_TOPLEFT]))
572 cnt[CNT_NEAREST] += 1;
574 cnt[CNT_SPLITMV] = ((mb_edge[EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
575 (mb_edge[EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
576 (mb_edge[EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
578 /* Swap near and nearest if necessary */
579 if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
580 FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
581 FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
584 /* Choose the best mv out of 0,0 and the nearest mv */
585 if (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])
586 best_idx = CNT_NEAREST;
588 mb->mv = near_mv[best_idx];
589 near[0] = near_mv[CNT_NEAREST];
590 near[1] = near_mv[CNT_NEAR];
594 * Motion vector coding, 17.1.
596 static int read_mv_component(VP56RangeCoder *c, const uint8_t *p)
600 if (vp56_rac_get_prob_branchy(c, p[0])) {
603 for (i = 0; i < 3; i++)
604 x += vp56_rac_get_prob(c, p[9 + i]) << i;
605 for (i = 9; i > 3; i--)
606 x += vp56_rac_get_prob(c, p[9 + i]) << i;
607 if (!(x & 0xFFF0) || vp56_rac_get_prob(c, p[12]))
611 const uint8_t *ps = p+2;
612 bit = vp56_rac_get_prob(c, *ps);
615 bit = vp56_rac_get_prob(c, *ps);
618 x += vp56_rac_get_prob(c, *ps);
621 return (x && vp56_rac_get_prob(c, p[1])) ? -x : x;
624 static av_always_inline
625 const uint8_t *get_submv_prob(uint32_t left, uint32_t top)
628 return vp8_submv_prob[4-!!left];
630 return vp8_submv_prob[2];
631 return vp8_submv_prob[1-!!left];
635 * Split motion vector prediction, 16.4.
636 * @returns the number of motion vectors parsed (2, 4 or 16)
638 static av_always_inline
639 int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb)
643 VP8Macroblock *top_mb = &mb[2];
644 VP8Macroblock *left_mb = &mb[-1];
645 const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning],
646 *mbsplits_top = vp8_mbsplits[top_mb->partitioning],
647 *mbsplits_cur, *firstidx;
648 VP56mv *top_mv = top_mb->bmv;
649 VP56mv *left_mv = left_mb->bmv;
650 VP56mv *cur_mv = mb->bmv;
652 if (vp56_rac_get_prob_branchy(c, vp8_mbsplit_prob[0])) {
653 if (vp56_rac_get_prob_branchy(c, vp8_mbsplit_prob[1])) {
654 part_idx = VP8_SPLITMVMODE_16x8 + vp56_rac_get_prob(c, vp8_mbsplit_prob[2]);
656 part_idx = VP8_SPLITMVMODE_8x8;
659 part_idx = VP8_SPLITMVMODE_4x4;
662 num = vp8_mbsplit_count[part_idx];
663 mbsplits_cur = vp8_mbsplits[part_idx],
664 firstidx = vp8_mbfirstidx[part_idx];
665 mb->partitioning = part_idx;
667 for (n = 0; n < num; n++) {
669 uint32_t left, above;
670 const uint8_t *submv_prob;
673 left = AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
675 left = AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
677 above = AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
679 above = AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
681 submv_prob = get_submv_prob(left, above);
683 if (vp56_rac_get_prob_branchy(c, submv_prob[0])) {
684 if (vp56_rac_get_prob_branchy(c, submv_prob[1])) {
685 if (vp56_rac_get_prob_branchy(c, submv_prob[2])) {
686 mb->bmv[n].y = mb->mv.y + read_mv_component(c, s->prob->mvc[0]);
687 mb->bmv[n].x = mb->mv.x + read_mv_component(c, s->prob->mvc[1]);
689 AV_ZERO32(&mb->bmv[n]);
692 AV_WN32A(&mb->bmv[n], above);
695 AV_WN32A(&mb->bmv[n], left);
702 static av_always_inline
703 void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c,
704 int mb_x, int keyframe)
706 uint8_t *intra4x4 = s->intra4x4_pred_mode_mb;
709 uint8_t* const top = s->intra4x4_pred_mode_top + 4 * mb_x;
710 uint8_t* const left = s->intra4x4_pred_mode_left;
711 for (y = 0; y < 4; y++) {
712 for (x = 0; x < 4; x++) {
714 ctx = vp8_pred4x4_prob_intra[top[x]][left[y]];
715 *intra4x4 = vp8_rac_get_tree(c, vp8_pred4x4_tree, ctx);
716 left[y] = top[x] = *intra4x4;
722 for (i = 0; i < 16; i++)
723 intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree, vp8_pred4x4_prob_inter);
727 static av_always_inline
728 void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment)
730 VP56RangeCoder *c = &s->c;
732 if (s->segmentation.update_map)
733 *segment = vp8_rac_get_tree(c, vp8_segmentid_tree, s->prob->segmentid);
734 s->segment = *segment;
736 mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0;
739 mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_intra, vp8_pred16x16_prob_intra);
741 if (mb->mode == MODE_I4x4) {
742 decode_intra4x4_modes(s, c, mb_x, 1);
744 const uint32_t modes = vp8_pred4x4_mode[mb->mode] * 0x01010101u;
745 AV_WN32A(s->intra4x4_pred_mode_top + 4 * mb_x, modes);
746 AV_WN32A(s->intra4x4_pred_mode_left, modes);
749 s->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, vp8_pred8x8c_prob_intra);
750 mb->ref_frame = VP56_FRAME_CURRENT;
751 } else if (vp56_rac_get_prob_branchy(c, s->prob->intra)) {
752 VP56mv near[2], best;
753 uint8_t cnt[4] = { 0 };
756 if (vp56_rac_get_prob_branchy(c, s->prob->last))
757 mb->ref_frame = vp56_rac_get_prob(c, s->prob->golden) ?
758 VP56_FRAME_GOLDEN2 /* altref */ : VP56_FRAME_GOLDEN;
760 mb->ref_frame = VP56_FRAME_PREVIOUS;
761 s->ref_count[mb->ref_frame-1]++;
763 // motion vectors, 16.3
764 find_near_mvs(s, mb, near, &best, cnt);
765 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[0]][0])) {
766 mb->mode = VP8_MVMODE_MV;
767 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[1]][1])) {
768 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[2]][2])) {
769 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[3]][3])) {
770 mb->mode = VP8_MVMODE_SPLIT;
771 clamp_mv(s, &mb->mv, &mb->mv, mb_x, mb_y);
772 mb->mv = mb->bmv[decode_splitmvs(s, c, mb) - 1];
774 clamp_mv(s, &mb->mv, &mb->mv, mb_x, mb_y);
775 mb->mv.y += read_mv_component(c, s->prob->mvc[0]);
776 mb->mv.x += read_mv_component(c, s->prob->mvc[1]);
779 clamp_mv(s, &mb->mv, &near[1], mb_x, mb_y);
781 clamp_mv(s, &mb->mv, &near[0], mb_x, mb_y);
783 mb->mode = VP8_MVMODE_ZERO;
786 if (mb->mode != VP8_MVMODE_SPLIT) {
787 mb->partitioning = VP8_SPLITMVMODE_NONE;
792 mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_inter, s->prob->pred16x16);
794 if (mb->mode == MODE_I4x4)
795 decode_intra4x4_modes(s, c, mb_x, 0);
797 s->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, s->prob->pred8x8c);
798 mb->ref_frame = VP56_FRAME_CURRENT;
799 mb->partitioning = VP8_SPLITMVMODE_NONE;
800 AV_ZERO32(&mb->bmv[0]);
805 * @param c arithmetic bitstream reader context
806 * @param block destination for block coefficients
807 * @param probs probabilities to use when reading trees from the bitstream
808 * @param i initial coeff index, 0 unless a separate DC block is coded
809 * @param zero_nhood the initial prediction context for number of surrounding
810 * all-zero blocks (only left/top, so 0-2)
811 * @param qmul array holding the dc/ac dequant factor at position 0/1
812 * @return 0 if no coeffs were decoded
813 * otherwise, the index of the last coeff decoded plus one
815 static int decode_block_coeffs_internal(VP56RangeCoder *c, DCTELEM block[16],
816 uint8_t probs[8][3][NUM_DCT_TOKENS-1],
817 int i, uint8_t *token_prob, int16_t qmul[2])
822 if (!vp56_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
826 if (!vp56_rac_get_prob_branchy(c, token_prob[1])) { // DCT_0
828 return i; // invalid input; blocks should end with EOB
829 token_prob = probs[i][0];
833 if (!vp56_rac_get_prob_branchy(c, token_prob[2])) { // DCT_1
835 token_prob = probs[i+1][1];
837 if (!vp56_rac_get_prob_branchy(c, token_prob[3])) { // DCT 2,3,4
838 coeff = vp56_rac_get_prob_branchy(c, token_prob[4]);
840 coeff += vp56_rac_get_prob(c, token_prob[5]);
844 if (!vp56_rac_get_prob_branchy(c, token_prob[6])) {
845 if (!vp56_rac_get_prob_branchy(c, token_prob[7])) { // DCT_CAT1
846 coeff = 5 + vp56_rac_get_prob(c, vp8_dct_cat1_prob[0]);
849 coeff += vp56_rac_get_prob(c, vp8_dct_cat2_prob[0]) << 1;
850 coeff += vp56_rac_get_prob(c, vp8_dct_cat2_prob[1]);
852 } else { // DCT_CAT3 and up
853 int a = vp56_rac_get_prob(c, token_prob[8]);
854 int b = vp56_rac_get_prob(c, token_prob[9+a]);
855 int cat = (a<<1) + b;
856 coeff = 3 + (8<<cat);
857 coeff += vp8_rac_get_coeff(c, vp8_dct_cat_prob[cat]);
860 token_prob = probs[i+1][2];
862 block[zigzag_scan[i]] = (vp8_rac_get(c) ? -coeff : coeff) * qmul[!!i];
868 static av_always_inline
869 int decode_block_coeffs(VP56RangeCoder *c, DCTELEM block[16],
870 uint8_t probs[8][3][NUM_DCT_TOKENS-1],
871 int i, int zero_nhood, int16_t qmul[2])
873 uint8_t *token_prob = probs[i][zero_nhood];
874 if (!vp56_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
876 return decode_block_coeffs_internal(c, block, probs, i, token_prob, qmul);
879 static av_always_inline
880 void decode_mb_coeffs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb,
881 uint8_t t_nnz[9], uint8_t l_nnz[9])
883 int i, x, y, luma_start = 0, luma_ctx = 3;
884 int nnz_pred, nnz, nnz_total = 0;
885 int segment = s->segment;
888 if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
889 nnz_pred = t_nnz[8] + l_nnz[8];
891 // decode DC values and do hadamard
892 nnz = decode_block_coeffs(c, s->block_dc, s->prob->token[1], 0, nnz_pred,
893 s->qmat[segment].luma_dc_qmul);
894 l_nnz[8] = t_nnz[8] = !!nnz;
899 s->vp8dsp.vp8_luma_dc_wht_dc(s->block, s->block_dc);
901 s->vp8dsp.vp8_luma_dc_wht(s->block, s->block_dc);
908 for (y = 0; y < 4; y++)
909 for (x = 0; x < 4; x++) {
910 nnz_pred = l_nnz[y] + t_nnz[x];
911 nnz = decode_block_coeffs(c, s->block[y][x], s->prob->token[luma_ctx], luma_start,
912 nnz_pred, s->qmat[segment].luma_qmul);
913 // nnz+block_dc may be one more than the actual last index, but we don't care
914 s->non_zero_count_cache[y][x] = nnz + block_dc;
915 t_nnz[x] = l_nnz[y] = !!nnz;
920 // TODO: what to do about dimensions? 2nd dim for luma is x,
921 // but for chroma it's (y<<1)|x
922 for (i = 4; i < 6; i++)
923 for (y = 0; y < 2; y++)
924 for (x = 0; x < 2; x++) {
925 nnz_pred = l_nnz[i+2*y] + t_nnz[i+2*x];
926 nnz = decode_block_coeffs(c, s->block[i][(y<<1)+x], s->prob->token[2], 0,
927 nnz_pred, s->qmat[segment].chroma_qmul);
928 s->non_zero_count_cache[i][(y<<1)+x] = nnz;
929 t_nnz[i+2*x] = l_nnz[i+2*y] = !!nnz;
933 // if there were no coded coeffs despite the macroblock not being marked skip,
934 // we MUST not do the inner loop filter and should not do IDCT
935 // Since skip isn't used for bitstream prediction, just manually set it.
940 static av_always_inline
941 void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr,
942 int linesize, int uvlinesize, int simple)
944 AV_COPY128(top_border, src_y + 15*linesize);
946 AV_COPY64(top_border+16, src_cb + 7*uvlinesize);
947 AV_COPY64(top_border+24, src_cr + 7*uvlinesize);
951 static av_always_inline
952 void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr,
953 int linesize, int uvlinesize, int mb_x, int mb_y, int mb_width,
954 int simple, int xchg)
956 uint8_t *top_border_m1 = top_border-32; // for TL prediction
958 src_cb -= uvlinesize;
959 src_cr -= uvlinesize;
961 #define XCHG(a,b,xchg) do { \
962 if (xchg) AV_SWAP64(b,a); \
963 else AV_COPY64(b,a); \
966 XCHG(top_border_m1+8, src_y-8, xchg);
967 XCHG(top_border, src_y, xchg);
968 XCHG(top_border+8, src_y+8, 1);
969 if (mb_x < mb_width-1)
970 XCHG(top_border+32, src_y+16, 1);
972 // only copy chroma for normal loop filter
973 // or to initialize the top row to 127
974 if (!simple || !mb_y) {
975 XCHG(top_border_m1+16, src_cb-8, xchg);
976 XCHG(top_border_m1+24, src_cr-8, xchg);
977 XCHG(top_border+16, src_cb, 1);
978 XCHG(top_border+24, src_cr, 1);
982 static av_always_inline
983 int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
986 return mb_y ? TOP_DC_PRED8x8 : DC_128_PRED8x8;
988 return mb_y ? mode : LEFT_DC_PRED8x8;
992 static av_always_inline
993 int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y)
996 return mb_y ? VERT_PRED8x8 : DC_129_PRED8x8;
998 return mb_y ? mode : HOR_PRED8x8;
1002 static av_always_inline
1003 int check_intra_pred8x8_mode(int mode, int mb_x, int mb_y)
1005 if (mode == DC_PRED8x8) {
1006 return check_dc_pred8x8_mode(mode, mb_x, mb_y);
1012 static av_always_inline
1013 int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y)
1017 return check_dc_pred8x8_mode(mode, mb_x, mb_y);
1019 return !mb_y ? DC_127_PRED8x8 : mode;
1021 return !mb_x ? DC_129_PRED8x8 : mode;
1022 case PLANE_PRED8x8 /*TM*/:
1023 return check_tm_pred8x8_mode(mode, mb_x, mb_y);
1028 static av_always_inline
1029 int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y)
1032 return mb_y ? VERT_VP8_PRED : DC_129_PRED;
1034 return mb_y ? mode : HOR_VP8_PRED;
1038 static av_always_inline
1039 int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf)
1043 if (!mb_x && mb_y) {
1048 case DIAG_DOWN_LEFT_PRED:
1049 case VERT_LEFT_PRED:
1050 return !mb_y ? DC_127_PRED : mode;
1058 return !mb_x ? DC_129_PRED : mode;
1060 return check_tm_pred4x4_mode(mode, mb_x, mb_y);
1061 case DC_PRED: // 4x4 DC doesn't use the same "H.264-style" exceptions as 16x16/8x8 DC
1062 case DIAG_DOWN_RIGHT_PRED:
1063 case VERT_RIGHT_PRED:
1072 static av_always_inline
1073 void intra_predict(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb,
1076 AVCodecContext *avctx = s->avctx;
1077 int x, y, mode, nnz, tr;
1079 // for the first row, we need to run xchg_mb_border to init the top edge to 127
1080 // otherwise, skip it if we aren't going to deblock
1081 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE && !mb_y) && (s->deblock_filter || !mb_y))
1082 xchg_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2],
1083 s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1084 s->filter.simple, 1);
1086 if (mb->mode < MODE_I4x4) {
1087 if (avctx->flags & CODEC_FLAG_EMU_EDGE) { // tested
1088 mode = check_intra_pred8x8_mode_emuedge(mb->mode, mb_x, mb_y);
1090 mode = check_intra_pred8x8_mode(mb->mode, mb_x, mb_y);
1092 s->hpc.pred16x16[mode](dst[0], s->linesize);
1094 uint8_t *ptr = dst[0];
1095 uint8_t *intra4x4 = s->intra4x4_pred_mode_mb;
1096 uint8_t tr_top[4] = { 127, 127, 127, 127 };
1098 // all blocks on the right edge of the macroblock use bottom edge
1099 // the top macroblock for their topright edge
1100 uint8_t *tr_right = ptr - s->linesize + 16;
1102 // if we're on the right edge of the frame, said edge is extended
1103 // from the top macroblock
1104 if (!(!mb_y && avctx->flags & CODEC_FLAG_EMU_EDGE) &&
1105 mb_x == s->mb_width-1) {
1106 tr = tr_right[-1]*0x01010101;
1107 tr_right = (uint8_t *)&tr;
1111 AV_ZERO128(s->non_zero_count_cache);
1113 for (y = 0; y < 4; y++) {
1114 uint8_t *topright = ptr + 4 - s->linesize;
1115 for (x = 0; x < 4; x++) {
1116 int copy = 0, linesize = s->linesize;
1117 uint8_t *dst = ptr+4*x;
1118 DECLARE_ALIGNED(4, uint8_t, copy_dst)[5*8];
1120 if ((y == 0 || x == 3) && mb_y == 0 && avctx->flags & CODEC_FLAG_EMU_EDGE) {
1123 topright = tr_right;
1125 if (avctx->flags & CODEC_FLAG_EMU_EDGE) { // mb_x+x or mb_y+y is a hack but works
1126 mode = check_intra_pred4x4_mode_emuedge(intra4x4[x], mb_x + x, mb_y + y, ©);
1128 dst = copy_dst + 12;
1132 AV_WN32A(copy_dst+4, 127U * 0x01010101U);
1134 AV_COPY32(copy_dst+4, ptr+4*x-s->linesize);
1138 copy_dst[3] = ptr[4*x-s->linesize-1];
1145 copy_dst[35] = 129U;
1147 copy_dst[11] = ptr[4*x -1];
1148 copy_dst[19] = ptr[4*x+s->linesize -1];
1149 copy_dst[27] = ptr[4*x+s->linesize*2-1];
1150 copy_dst[35] = ptr[4*x+s->linesize*3-1];
1156 s->hpc.pred4x4[mode](dst, topright, linesize);
1158 AV_COPY32(ptr+4*x , copy_dst+12);
1159 AV_COPY32(ptr+4*x+s->linesize , copy_dst+20);
1160 AV_COPY32(ptr+4*x+s->linesize*2, copy_dst+28);
1161 AV_COPY32(ptr+4*x+s->linesize*3, copy_dst+36);
1164 nnz = s->non_zero_count_cache[y][x];
1167 s->vp8dsp.vp8_idct_dc_add(ptr+4*x, s->block[y][x], s->linesize);
1169 s->vp8dsp.vp8_idct_add(ptr+4*x, s->block[y][x], s->linesize);
1174 ptr += 4*s->linesize;
1179 if (avctx->flags & CODEC_FLAG_EMU_EDGE) {
1180 mode = check_intra_pred8x8_mode_emuedge(s->chroma_pred_mode, mb_x, mb_y);
1182 mode = check_intra_pred8x8_mode(s->chroma_pred_mode, mb_x, mb_y);
1184 s->hpc.pred8x8[mode](dst[1], s->uvlinesize);
1185 s->hpc.pred8x8[mode](dst[2], s->uvlinesize);
1187 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE && !mb_y) && (s->deblock_filter || !mb_y))
1188 xchg_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2],
1189 s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1190 s->filter.simple, 0);
1193 static const uint8_t subpel_idx[3][8] = {
1194 { 0, 1, 2, 1, 2, 1, 2, 1 }, // nr. of left extra pixels,
1195 // also function pointer index
1196 { 0, 3, 5, 3, 5, 3, 5, 3 }, // nr. of extra pixels required
1197 { 0, 2, 3, 2, 3, 2, 3, 2 }, // nr. of right extra pixels
1201 * Generic MC function.
1203 * @param s VP8 decoding context
1204 * @param luma 1 for luma (Y) planes, 0 for chroma (Cb/Cr) planes
1205 * @param dst target buffer for block data at block position
1206 * @param src reference picture buffer at origin (0, 0)
1207 * @param mv motion vector (relative to block position) to get pixel data from
1208 * @param x_off horizontal position of block from origin (0, 0)
1209 * @param y_off vertical position of block from origin (0, 0)
1210 * @param block_w width of block (16, 8 or 4)
1211 * @param block_h height of block (always same as block_w)
1212 * @param width width of src/dst plane data
1213 * @param height height of src/dst plane data
1214 * @param linesize size of a single line of plane data, including padding
1215 * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1217 static av_always_inline
1218 void vp8_mc_luma(VP8Context *s, uint8_t *dst, uint8_t *src, const VP56mv *mv,
1219 int x_off, int y_off, int block_w, int block_h,
1220 int width, int height, int linesize,
1221 vp8_mc_func mc_func[3][3])
1225 int mx = (mv->x << 1)&7, mx_idx = subpel_idx[0][mx];
1226 int my = (mv->y << 1)&7, my_idx = subpel_idx[0][my];
1228 x_off += mv->x >> 2;
1229 y_off += mv->y >> 2;
1232 src += y_off * linesize + x_off;
1233 if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1234 y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1235 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src - my_idx * linesize - mx_idx, linesize,
1236 block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
1237 x_off - mx_idx, y_off - my_idx, width, height);
1238 src = s->edge_emu_buffer + mx_idx + linesize * my_idx;
1240 mc_func[my_idx][mx_idx](dst, linesize, src, linesize, block_h, mx, my);
1242 mc_func[0][0](dst, linesize, src + y_off * linesize + x_off, linesize, block_h, 0, 0);
1245 static av_always_inline
1246 void vp8_mc_chroma(VP8Context *s, uint8_t *dst1, uint8_t *dst2, uint8_t *src1,
1247 uint8_t *src2, const VP56mv *mv, int x_off, int y_off,
1248 int block_w, int block_h, int width, int height, int linesize,
1249 vp8_mc_func mc_func[3][3])
1252 int mx = mv->x&7, mx_idx = subpel_idx[0][mx];
1253 int my = mv->y&7, my_idx = subpel_idx[0][my];
1255 x_off += mv->x >> 3;
1256 y_off += mv->y >> 3;
1259 src1 += y_off * linesize + x_off;
1260 src2 += y_off * linesize + x_off;
1261 if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1262 y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1263 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src1 - my_idx * linesize - mx_idx, linesize,
1264 block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
1265 x_off - mx_idx, y_off - my_idx, width, height);
1266 src1 = s->edge_emu_buffer + mx_idx + linesize * my_idx;
1267 mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1269 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src2 - my_idx * linesize - mx_idx, linesize,
1270 block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
1271 x_off - mx_idx, y_off - my_idx, width, height);
1272 src2 = s->edge_emu_buffer + mx_idx + linesize * my_idx;
1273 mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1275 mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1276 mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1279 mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1280 mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1284 static av_always_inline
1285 void vp8_mc_part(VP8Context *s, uint8_t *dst[3],
1286 AVFrame *ref_frame, int x_off, int y_off,
1287 int bx_off, int by_off,
1288 int block_w, int block_h,
1289 int width, int height, VP56mv *mv)
1294 vp8_mc_luma(s, dst[0] + by_off * s->linesize + bx_off,
1295 ref_frame->data[0], mv, x_off + bx_off, y_off + by_off,
1296 block_w, block_h, width, height, s->linesize,
1297 s->put_pixels_tab[block_w == 8]);
1300 if (s->profile == 3) {
1304 x_off >>= 1; y_off >>= 1;
1305 bx_off >>= 1; by_off >>= 1;
1306 width >>= 1; height >>= 1;
1307 block_w >>= 1; block_h >>= 1;
1308 vp8_mc_chroma(s, dst[1] + by_off * s->uvlinesize + bx_off,
1309 dst[2] + by_off * s->uvlinesize + bx_off, ref_frame->data[1],
1310 ref_frame->data[2], &uvmv, x_off + bx_off, y_off + by_off,
1311 block_w, block_h, width, height, s->uvlinesize,
1312 s->put_pixels_tab[1 + (block_w == 4)]);
1315 /* Fetch pixels for estimated mv 4 macroblocks ahead.
1316 * Optimized for 64-byte cache lines. Inspired by ffh264 prefetch_motion. */
1317 static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
1319 /* Don't prefetch refs that haven't been used very often this frame. */
1320 if (s->ref_count[ref-1] > (mb_xy >> 5)) {
1321 int x_off = mb_x << 4, y_off = mb_y << 4;
1322 int mx = (mb->mv.x>>2) + x_off + 8;
1323 int my = (mb->mv.y>>2) + y_off;
1324 uint8_t **src= s->framep[ref]->data;
1325 int off= mx + (my + (mb_x&3)*4)*s->linesize + 64;
1326 s->dsp.prefetch(src[0]+off, s->linesize, 4);
1327 off= (mx>>1) + ((my>>1) + (mb_x&7))*s->uvlinesize + 64;
1328 s->dsp.prefetch(src[1]+off, src[2]-src[1], 2);
1333 * Apply motion vectors to prediction buffer, chapter 18.
1335 static av_always_inline
1336 void inter_predict(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb,
1339 int x_off = mb_x << 4, y_off = mb_y << 4;
1340 int width = 16*s->mb_width, height = 16*s->mb_height;
1341 AVFrame *ref = s->framep[mb->ref_frame];
1342 VP56mv *bmv = mb->bmv;
1344 switch (mb->partitioning) {
1345 case VP8_SPLITMVMODE_NONE:
1346 vp8_mc_part(s, dst, ref, x_off, y_off,
1347 0, 0, 16, 16, width, height, &mb->mv);
1349 case VP8_SPLITMVMODE_4x4: {
1354 for (y = 0; y < 4; y++) {
1355 for (x = 0; x < 4; x++) {
1356 vp8_mc_luma(s, dst[0] + 4*y*s->linesize + x*4,
1357 ref->data[0], &bmv[4*y + x],
1358 4*x + x_off, 4*y + y_off, 4, 4,
1359 width, height, s->linesize,
1360 s->put_pixels_tab[2]);
1365 x_off >>= 1; y_off >>= 1; width >>= 1; height >>= 1;
1366 for (y = 0; y < 2; y++) {
1367 for (x = 0; x < 2; x++) {
1368 uvmv.x = mb->bmv[ 2*y * 4 + 2*x ].x +
1369 mb->bmv[ 2*y * 4 + 2*x+1].x +
1370 mb->bmv[(2*y+1) * 4 + 2*x ].x +
1371 mb->bmv[(2*y+1) * 4 + 2*x+1].x;
1372 uvmv.y = mb->bmv[ 2*y * 4 + 2*x ].y +
1373 mb->bmv[ 2*y * 4 + 2*x+1].y +
1374 mb->bmv[(2*y+1) * 4 + 2*x ].y +
1375 mb->bmv[(2*y+1) * 4 + 2*x+1].y;
1376 uvmv.x = (uvmv.x + 2 + (uvmv.x >> (INT_BIT-1))) >> 2;
1377 uvmv.y = (uvmv.y + 2 + (uvmv.y >> (INT_BIT-1))) >> 2;
1378 if (s->profile == 3) {
1382 vp8_mc_chroma(s, dst[1] + 4*y*s->uvlinesize + x*4,
1383 dst[2] + 4*y*s->uvlinesize + x*4,
1384 ref->data[1], ref->data[2], &uvmv,
1385 4*x + x_off, 4*y + y_off, 4, 4,
1386 width, height, s->uvlinesize,
1387 s->put_pixels_tab[2]);
1392 case VP8_SPLITMVMODE_16x8:
1393 vp8_mc_part(s, dst, ref, x_off, y_off,
1394 0, 0, 16, 8, width, height, &bmv[0]);
1395 vp8_mc_part(s, dst, ref, x_off, y_off,
1396 0, 8, 16, 8, width, height, &bmv[1]);
1398 case VP8_SPLITMVMODE_8x16:
1399 vp8_mc_part(s, dst, ref, x_off, y_off,
1400 0, 0, 8, 16, width, height, &bmv[0]);
1401 vp8_mc_part(s, dst, ref, x_off, y_off,
1402 8, 0, 8, 16, width, height, &bmv[1]);
1404 case VP8_SPLITMVMODE_8x8:
1405 vp8_mc_part(s, dst, ref, x_off, y_off,
1406 0, 0, 8, 8, width, height, &bmv[0]);
1407 vp8_mc_part(s, dst, ref, x_off, y_off,
1408 8, 0, 8, 8, width, height, &bmv[1]);
1409 vp8_mc_part(s, dst, ref, x_off, y_off,
1410 0, 8, 8, 8, width, height, &bmv[2]);
1411 vp8_mc_part(s, dst, ref, x_off, y_off,
1412 8, 8, 8, 8, width, height, &bmv[3]);
1417 static av_always_inline void idct_mb(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb)
1421 if (mb->mode != MODE_I4x4) {
1422 uint8_t *y_dst = dst[0];
1423 for (y = 0; y < 4; y++) {
1424 uint32_t nnz4 = AV_RN32A(s->non_zero_count_cache[y]);
1426 if (nnz4&~0x01010101) {
1427 for (x = 0; x < 4; x++) {
1428 int nnz = s->non_zero_count_cache[y][x];
1431 s->vp8dsp.vp8_idct_dc_add(y_dst+4*x, s->block[y][x], s->linesize);
1433 s->vp8dsp.vp8_idct_add(y_dst+4*x, s->block[y][x], s->linesize);
1437 s->vp8dsp.vp8_idct_dc_add4y(y_dst, s->block[y], s->linesize);
1440 y_dst += 4*s->linesize;
1444 for (ch = 0; ch < 2; ch++) {
1445 uint32_t nnz4 = AV_RN32A(s->non_zero_count_cache[4+ch]);
1447 uint8_t *ch_dst = dst[1+ch];
1448 if (nnz4&~0x01010101) {
1449 for (y = 0; y < 2; y++) {
1450 for (x = 0; x < 2; x++) {
1451 int nnz = s->non_zero_count_cache[4+ch][(y<<1)+x];
1454 s->vp8dsp.vp8_idct_dc_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize);
1456 s->vp8dsp.vp8_idct_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize);
1459 ch_dst += 4*s->uvlinesize;
1462 s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, s->block[4+ch], s->uvlinesize);
1468 static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f )
1470 int interior_limit, filter_level;
1472 if (s->segmentation.enabled) {
1473 filter_level = s->segmentation.filter_level[s->segment];
1474 if (!s->segmentation.absolute_vals)
1475 filter_level += s->filter.level;
1477 filter_level = s->filter.level;
1479 if (s->lf_delta.enabled) {
1480 filter_level += s->lf_delta.ref[mb->ref_frame];
1481 filter_level += s->lf_delta.mode[mb->mode];
1483 filter_level = av_clip(filter_level, 0, 63);
1485 interior_limit = filter_level;
1486 if (s->filter.sharpness) {
1487 interior_limit >>= s->filter.sharpness > 4 ? 2 : 1;
1488 interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
1490 interior_limit = FFMAX(interior_limit, 1);
1492 f->filter_level = filter_level;
1493 f->inner_limit = interior_limit;
1494 f->inner_filter = !mb->skip || mb->mode == MODE_I4x4 || mb->mode == VP8_MVMODE_SPLIT;
1497 static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y)
1499 int mbedge_lim, bedge_lim, hev_thresh;
1500 int filter_level = f->filter_level;
1501 int inner_limit = f->inner_limit;
1502 int inner_filter = f->inner_filter;
1503 int linesize = s->linesize;
1504 int uvlinesize = s->uvlinesize;
1509 mbedge_lim = 2*(filter_level+2) + inner_limit;
1510 bedge_lim = 2* filter_level + inner_limit;
1511 hev_thresh = filter_level >= 15;
1514 if (filter_level >= 40)
1517 if (filter_level >= 40)
1519 else if (filter_level >= 20)
1524 s->vp8dsp.vp8_h_loop_filter16y(dst[0], linesize,
1525 mbedge_lim, inner_limit, hev_thresh);
1526 s->vp8dsp.vp8_h_loop_filter8uv(dst[1], dst[2], uvlinesize,
1527 mbedge_lim, inner_limit, hev_thresh);
1531 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+ 4, linesize, bedge_lim,
1532 inner_limit, hev_thresh);
1533 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+ 8, linesize, bedge_lim,
1534 inner_limit, hev_thresh);
1535 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+12, linesize, bedge_lim,
1536 inner_limit, hev_thresh);
1537 s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4,
1538 uvlinesize, bedge_lim,
1539 inner_limit, hev_thresh);
1543 s->vp8dsp.vp8_v_loop_filter16y(dst[0], linesize,
1544 mbedge_lim, inner_limit, hev_thresh);
1545 s->vp8dsp.vp8_v_loop_filter8uv(dst[1], dst[2], uvlinesize,
1546 mbedge_lim, inner_limit, hev_thresh);
1550 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+ 4*linesize,
1551 linesize, bedge_lim,
1552 inner_limit, hev_thresh);
1553 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+ 8*linesize,
1554 linesize, bedge_lim,
1555 inner_limit, hev_thresh);
1556 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+12*linesize,
1557 linesize, bedge_lim,
1558 inner_limit, hev_thresh);
1559 s->vp8dsp.vp8_v_loop_filter8uv_inner(dst[1] + 4 * uvlinesize,
1560 dst[2] + 4 * uvlinesize,
1561 uvlinesize, bedge_lim,
1562 inner_limit, hev_thresh);
1566 static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
1568 int mbedge_lim, bedge_lim;
1569 int filter_level = f->filter_level;
1570 int inner_limit = f->inner_limit;
1571 int inner_filter = f->inner_filter;
1572 int linesize = s->linesize;
1577 mbedge_lim = 2*(filter_level+2) + inner_limit;
1578 bedge_lim = 2* filter_level + inner_limit;
1581 s->vp8dsp.vp8_h_loop_filter_simple(dst, linesize, mbedge_lim);
1583 s->vp8dsp.vp8_h_loop_filter_simple(dst+ 4, linesize, bedge_lim);
1584 s->vp8dsp.vp8_h_loop_filter_simple(dst+ 8, linesize, bedge_lim);
1585 s->vp8dsp.vp8_h_loop_filter_simple(dst+12, linesize, bedge_lim);
1589 s->vp8dsp.vp8_v_loop_filter_simple(dst, linesize, mbedge_lim);
1591 s->vp8dsp.vp8_v_loop_filter_simple(dst+ 4*linesize, linesize, bedge_lim);
1592 s->vp8dsp.vp8_v_loop_filter_simple(dst+ 8*linesize, linesize, bedge_lim);
1593 s->vp8dsp.vp8_v_loop_filter_simple(dst+12*linesize, linesize, bedge_lim);
1597 static void filter_mb_row(VP8Context *s, int mb_y)
1599 VP8FilterStrength *f = s->filter_strength;
1601 s->framep[VP56_FRAME_CURRENT]->data[0] + 16*mb_y*s->linesize,
1602 s->framep[VP56_FRAME_CURRENT]->data[1] + 8*mb_y*s->uvlinesize,
1603 s->framep[VP56_FRAME_CURRENT]->data[2] + 8*mb_y*s->uvlinesize
1607 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1608 backup_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2], s->linesize, s->uvlinesize, 0);
1609 filter_mb(s, dst, f++, mb_x, mb_y);
1616 static void filter_mb_row_simple(VP8Context *s, int mb_y)
1618 VP8FilterStrength *f = s->filter_strength;
1619 uint8_t *dst = s->framep[VP56_FRAME_CURRENT]->data[0] + 16*mb_y*s->linesize;
1622 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1623 backup_mb_border(s->top_border[mb_x+1], dst, NULL, NULL, s->linesize, 0, 1);
1624 filter_mb_simple(s, dst, f++, mb_x, mb_y);
1629 static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
1632 VP8Context *s = avctx->priv_data;
1633 int ret, mb_x, mb_y, i, y, referenced;
1634 enum AVDiscard skip_thresh;
1635 AVFrame *av_uninit(curframe);
1637 if ((ret = decode_frame_header(s, avpkt->data, avpkt->size)) < 0)
1640 referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT
1641 || s->update_altref == VP56_FRAME_CURRENT;
1643 skip_thresh = !referenced ? AVDISCARD_NONREF :
1644 !s->keyframe ? AVDISCARD_NONKEY : AVDISCARD_ALL;
1646 if (avctx->skip_frame >= skip_thresh) {
1650 s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
1652 for (i = 0; i < 4; i++)
1653 if (&s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
1654 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
1655 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
1656 curframe = s->framep[VP56_FRAME_CURRENT] = &s->frames[i];
1659 if (curframe->data[0])
1660 avctx->release_buffer(avctx, curframe);
1662 curframe->key_frame = s->keyframe;
1663 curframe->pict_type = s->keyframe ? FF_I_TYPE : FF_P_TYPE;
1664 curframe->reference = referenced ? 3 : 0;
1665 if ((ret = avctx->get_buffer(avctx, curframe))) {
1666 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed!\n");
1670 // Given that arithmetic probabilities are updated every frame, it's quite likely
1671 // that the values we have on a random interframe are complete junk if we didn't
1672 // start decode on a keyframe. So just don't display anything rather than junk.
1673 if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
1674 !s->framep[VP56_FRAME_GOLDEN] ||
1675 !s->framep[VP56_FRAME_GOLDEN2])) {
1676 av_log(avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n");
1677 return AVERROR_INVALIDDATA;
1680 s->linesize = curframe->linesize[0];
1681 s->uvlinesize = curframe->linesize[1];
1683 if (!s->edge_emu_buffer)
1684 s->edge_emu_buffer = av_malloc(21*s->linesize);
1686 memset(s->top_nnz, 0, s->mb_width*sizeof(*s->top_nnz));
1688 /* Zero macroblock structures for top/top-left prediction from outside the frame. */
1689 memset(s->macroblocks + s->mb_height*2 - 1, 0, (s->mb_width+1)*sizeof(*s->macroblocks));
1691 // top edge of 127 for intra prediction
1692 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
1693 s->top_border[0][15] = s->top_border[0][23] = 127;
1694 memset(s->top_border[1]-1, 127, s->mb_width*sizeof(*s->top_border)+1);
1696 memset(s->ref_count, 0, sizeof(s->ref_count));
1698 memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width*4);
1700 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1701 VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions-1)];
1702 VP8Macroblock *mb = s->macroblocks + (s->mb_height - mb_y - 1)*2;
1703 int mb_xy = mb_y*s->mb_width;
1705 curframe->data[0] + 16*mb_y*s->linesize,
1706 curframe->data[1] + 8*mb_y*s->uvlinesize,
1707 curframe->data[2] + 8*mb_y*s->uvlinesize
1710 memset(mb - 1, 0, sizeof(*mb)); // zero left macroblock
1711 memset(s->left_nnz, 0, sizeof(s->left_nnz));
1712 AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED*0x01010101);
1714 // left edge of 129 for intra prediction
1715 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
1716 for (i = 0; i < 3; i++)
1717 for (y = 0; y < 16>>!!i; y++)
1718 dst[i][y*curframe->linesize[i]-1] = 129;
1719 if (mb_y == 1) // top left edge is also 129
1720 s->top_border[0][15] = s->top_border[0][23] = s->top_border[0][31] = 129;
1723 for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
1724 /* Prefetch the current frame, 4 MBs ahead */
1725 s->dsp.prefetch(dst[0] + (mb_x&3)*4*s->linesize + 64, s->linesize, 4);
1726 s->dsp.prefetch(dst[1] + (mb_x&7)*s->uvlinesize + 64, dst[2] - dst[1], 2);
1728 decode_mb_mode(s, mb, mb_x, mb_y, s->segmentation_map + mb_xy);
1730 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
1733 decode_mb_coeffs(s, c, mb, s->top_nnz[mb_x], s->left_nnz);
1735 if (mb->mode <= MODE_I4x4)
1736 intra_predict(s, dst, mb, mb_x, mb_y);
1738 inter_predict(s, dst, mb, mb_x, mb_y);
1740 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
1743 idct_mb(s, dst, mb);
1745 AV_ZERO64(s->left_nnz);
1746 AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned
1748 // Reset DC block predictors if they would exist if the mb had coefficients
1749 if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
1751 s->top_nnz[mb_x][8] = 0;
1755 if (s->deblock_filter)
1756 filter_level_for_mb(s, mb, &s->filter_strength[mb_x]);
1758 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
1764 if (s->deblock_filter) {
1765 if (s->filter.simple)
1766 filter_mb_row_simple(s, mb_y);
1768 filter_mb_row(s, mb_y);
1773 // if future frames don't use the updated probabilities,
1774 // reset them to the values we saved
1775 if (!s->update_probabilities)
1776 s->prob[0] = s->prob[1];
1778 // check if golden and altref are swapped
1779 if (s->update_altref == VP56_FRAME_GOLDEN &&
1780 s->update_golden == VP56_FRAME_GOLDEN2)
1781 FFSWAP(AVFrame *, s->framep[VP56_FRAME_GOLDEN], s->framep[VP56_FRAME_GOLDEN2]);
1783 if (s->update_altref != VP56_FRAME_NONE)
1784 s->framep[VP56_FRAME_GOLDEN2] = s->framep[s->update_altref];
1786 if (s->update_golden != VP56_FRAME_NONE)
1787 s->framep[VP56_FRAME_GOLDEN] = s->framep[s->update_golden];
1790 if (s->update_last) // move cur->prev
1791 s->framep[VP56_FRAME_PREVIOUS] = s->framep[VP56_FRAME_CURRENT];
1793 // release no longer referenced frames
1794 for (i = 0; i < 4; i++)
1795 if (s->frames[i].data[0] &&
1796 &s->frames[i] != s->framep[VP56_FRAME_CURRENT] &&
1797 &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
1798 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
1799 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
1800 avctx->release_buffer(avctx, &s->frames[i]);
1802 if (!s->invisible) {
1803 *(AVFrame*)data = *s->framep[VP56_FRAME_CURRENT];
1804 *data_size = sizeof(AVFrame);
1810 static av_cold int vp8_decode_init(AVCodecContext *avctx)
1812 VP8Context *s = avctx->priv_data;
1815 avctx->pix_fmt = PIX_FMT_YUV420P;
1817 dsputil_init(&s->dsp, avctx);
1818 ff_h264_pred_init(&s->hpc, CODEC_ID_VP8);
1819 ff_vp8dsp_init(&s->vp8dsp);
1824 static av_cold int vp8_decode_free(AVCodecContext *avctx)
1826 vp8_decode_flush(avctx);
1830 AVCodec ff_vp8_decoder = {
1840 .flush = vp8_decode_flush,
1841 .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),