2 * VP8 compatible video decoder
4 * Copyright (C) 2010 David Conrad
5 * Copyright (C) 2010 Ronald S. Bultje
6 * Copyright (C) 2010 Jason Garrett-Glaser
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "libavutil/imgutils.h"
30 #include "rectangle.h"
37 static void free_buffers(VP8Context *s)
39 av_freep(&s->macroblocks_base);
40 av_freep(&s->filter_strength);
41 av_freep(&s->intra4x4_pred_mode_top);
42 av_freep(&s->top_nnz);
43 av_freep(&s->edge_emu_buffer);
44 av_freep(&s->top_border);
46 s->macroblocks = NULL;
49 static int vp8_alloc_frame(VP8Context *s, AVFrame *f)
52 if ((ret = ff_thread_get_buffer(s->avctx, f)) < 0)
54 if (s->num_maps_to_be_freed && !s->maps_are_invalid) {
55 f->ref_index[0] = s->segmentation_maps[--s->num_maps_to_be_freed];
56 } else if (!(f->ref_index[0] = av_mallocz(s->mb_width * s->mb_height))) {
57 ff_thread_release_buffer(s->avctx, f);
58 return AVERROR(ENOMEM);
63 static void vp8_release_frame(VP8Context *s, AVFrame *f, int prefer_delayed_free, int can_direct_free)
65 if (f->ref_index[0]) {
66 if (prefer_delayed_free) {
67 /* Upon a size change, we want to free the maps but other threads may still
68 * be using them, so queue them. Upon a seek, all threads are inactive so
69 * we want to cache one to prevent re-allocation in the next decoding
70 * iteration, but the rest we can free directly. */
71 int max_queued_maps = can_direct_free ? 1 : FF_ARRAY_ELEMS(s->segmentation_maps);
72 if (s->num_maps_to_be_freed < max_queued_maps) {
73 s->segmentation_maps[s->num_maps_to_be_freed++] = f->ref_index[0];
74 } else if (can_direct_free) /* vp8_decode_flush(), but our queue is full */ {
75 av_free(f->ref_index[0]);
76 } /* else: MEMLEAK (should never happen, but better that than crash) */
77 f->ref_index[0] = NULL;
78 } else /* vp8_decode_free() */ {
79 av_free(f->ref_index[0]);
82 ff_thread_release_buffer(s->avctx, f);
85 static void vp8_decode_flush_impl(AVCodecContext *avctx,
86 int prefer_delayed_free, int can_direct_free, int free_mem)
88 VP8Context *s = avctx->priv_data;
91 if (!avctx->internal->is_copy) {
92 for (i = 0; i < 5; i++)
93 if (s->frames[i].data[0])
94 vp8_release_frame(s, &s->frames[i], prefer_delayed_free, can_direct_free);
96 memset(s->framep, 0, sizeof(s->framep));
100 s->maps_are_invalid = 1;
104 static void vp8_decode_flush(AVCodecContext *avctx)
106 vp8_decode_flush_impl(avctx, 1, 1, 0);
109 static int update_dimensions(VP8Context *s, int width, int height)
111 if (width != s->avctx->width || ((width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height) && s->macroblocks_base ||
112 height != s->avctx->height) {
113 if (av_image_check_size(width, height, 0, s->avctx))
114 return AVERROR_INVALIDDATA;
116 vp8_decode_flush_impl(s->avctx, 1, 0, 1);
118 avcodec_set_dimensions(s->avctx, width, height);
121 s->mb_width = (s->avctx->coded_width +15) / 16;
122 s->mb_height = (s->avctx->coded_height+15) / 16;
124 s->macroblocks_base = av_mallocz((s->mb_width+s->mb_height*2+1)*sizeof(*s->macroblocks));
125 s->filter_strength = av_mallocz(s->mb_width*sizeof(*s->filter_strength));
126 s->intra4x4_pred_mode_top = av_mallocz(s->mb_width*4);
127 s->top_nnz = av_mallocz(s->mb_width*sizeof(*s->top_nnz));
128 s->top_border = av_mallocz((s->mb_width+1)*sizeof(*s->top_border));
130 if (!s->macroblocks_base || !s->filter_strength || !s->intra4x4_pred_mode_top ||
131 !s->top_nnz || !s->top_border)
132 return AVERROR(ENOMEM);
134 s->macroblocks = s->macroblocks_base + 1;
139 static void parse_segment_info(VP8Context *s)
141 VP56RangeCoder *c = &s->c;
144 s->segmentation.update_map = vp8_rac_get(c);
146 if (vp8_rac_get(c)) { // update segment feature data
147 s->segmentation.absolute_vals = vp8_rac_get(c);
149 for (i = 0; i < 4; i++)
150 s->segmentation.base_quant[i] = vp8_rac_get_sint(c, 7);
152 for (i = 0; i < 4; i++)
153 s->segmentation.filter_level[i] = vp8_rac_get_sint(c, 6);
155 if (s->segmentation.update_map)
156 for (i = 0; i < 3; i++)
157 s->prob->segmentid[i] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
160 static void update_lf_deltas(VP8Context *s)
162 VP56RangeCoder *c = &s->c;
165 for (i = 0; i < 4; i++) {
166 if (vp8_rac_get(c)) {
167 s->lf_delta.ref[i] = vp8_rac_get_uint(c, 6);
170 s->lf_delta.ref[i] = -s->lf_delta.ref[i];
174 for (i = MODE_I4x4; i <= VP8_MVMODE_SPLIT; i++) {
175 if (vp8_rac_get(c)) {
176 s->lf_delta.mode[i] = vp8_rac_get_uint(c, 6);
179 s->lf_delta.mode[i] = -s->lf_delta.mode[i];
184 static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
186 const uint8_t *sizes = buf;
189 s->num_coeff_partitions = 1 << vp8_rac_get_uint(&s->c, 2);
191 buf += 3*(s->num_coeff_partitions-1);
192 buf_size -= 3*(s->num_coeff_partitions-1);
196 for (i = 0; i < s->num_coeff_partitions-1; i++) {
197 int size = AV_RL24(sizes + 3*i);
198 if (buf_size - size < 0)
201 ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, size);
205 ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
210 static void get_quants(VP8Context *s)
212 VP56RangeCoder *c = &s->c;
215 int yac_qi = vp8_rac_get_uint(c, 7);
216 int ydc_delta = vp8_rac_get_sint(c, 4);
217 int y2dc_delta = vp8_rac_get_sint(c, 4);
218 int y2ac_delta = vp8_rac_get_sint(c, 4);
219 int uvdc_delta = vp8_rac_get_sint(c, 4);
220 int uvac_delta = vp8_rac_get_sint(c, 4);
222 for (i = 0; i < 4; i++) {
223 if (s->segmentation.enabled) {
224 base_qi = s->segmentation.base_quant[i];
225 if (!s->segmentation.absolute_vals)
230 s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + ydc_delta , 7)];
231 s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi , 7)];
232 s->qmat[i].luma_dc_qmul[0] = 2 * vp8_dc_qlookup[av_clip_uintp2(base_qi + y2dc_delta, 7)];
233 s->qmat[i].luma_dc_qmul[1] = 155 * vp8_ac_qlookup[av_clip_uintp2(base_qi + y2ac_delta, 7)] / 100;
234 s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + uvdc_delta, 7)];
235 s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + uvac_delta, 7)];
237 s->qmat[i].luma_dc_qmul[1] = FFMAX(s->qmat[i].luma_dc_qmul[1], 8);
238 s->qmat[i].chroma_qmul[0] = FFMIN(s->qmat[i].chroma_qmul[0], 132);
243 * Determine which buffers golden and altref should be updated with after this frame.
244 * The spec isn't clear here, so I'm going by my understanding of what libvpx does
246 * Intra frames update all 3 references
247 * Inter frames update VP56_FRAME_PREVIOUS if the update_last flag is set
248 * If the update (golden|altref) flag is set, it's updated with the current frame
249 * if update_last is set, and VP56_FRAME_PREVIOUS otherwise.
250 * If the flag is not set, the number read means:
252 * 1: VP56_FRAME_PREVIOUS
253 * 2: update golden with altref, or update altref with golden
255 static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
257 VP56RangeCoder *c = &s->c;
260 return VP56_FRAME_CURRENT;
262 switch (vp8_rac_get_uint(c, 2)) {
264 return VP56_FRAME_PREVIOUS;
266 return (ref == VP56_FRAME_GOLDEN) ? VP56_FRAME_GOLDEN2 : VP56_FRAME_GOLDEN;
268 return VP56_FRAME_NONE;
271 static void update_refs(VP8Context *s)
273 VP56RangeCoder *c = &s->c;
275 int update_golden = vp8_rac_get(c);
276 int update_altref = vp8_rac_get(c);
278 s->update_golden = ref_to_update(s, update_golden, VP56_FRAME_GOLDEN);
279 s->update_altref = ref_to_update(s, update_altref, VP56_FRAME_GOLDEN2);
282 static int decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
284 VP56RangeCoder *c = &s->c;
285 int header_size, hscale, vscale, i, j, k, l, m, ret;
286 int width = s->avctx->width;
287 int height = s->avctx->height;
289 s->keyframe = !(buf[0] & 1);
290 s->profile = (buf[0]>>1) & 7;
291 s->invisible = !(buf[0] & 0x10);
292 header_size = AV_RL24(buf) >> 5;
297 av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
300 memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
301 else // profile 1-3 use bilinear, 4+ aren't defined so whatever
302 memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_bilinear_pixels_tab, sizeof(s->put_pixels_tab));
304 if (header_size > buf_size - 7*s->keyframe) {
305 av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
306 return AVERROR_INVALIDDATA;
310 if (AV_RL24(buf) != 0x2a019d) {
311 av_log(s->avctx, AV_LOG_ERROR, "Invalid start code 0x%x\n", AV_RL24(buf));
312 return AVERROR_INVALIDDATA;
314 width = AV_RL16(buf+3) & 0x3fff;
315 height = AV_RL16(buf+5) & 0x3fff;
316 hscale = buf[4] >> 6;
317 vscale = buf[6] >> 6;
321 if (hscale || vscale)
322 av_log_missing_feature(s->avctx, "Upscaling", 1);
324 s->update_golden = s->update_altref = VP56_FRAME_CURRENT;
325 for (i = 0; i < 4; i++)
326 for (j = 0; j < 16; j++)
327 memcpy(s->prob->token[i][j], vp8_token_default_probs[i][vp8_coeff_band[j]],
328 sizeof(s->prob->token[i][j]));
329 memcpy(s->prob->pred16x16, vp8_pred16x16_prob_inter, sizeof(s->prob->pred16x16));
330 memcpy(s->prob->pred8x8c , vp8_pred8x8c_prob_inter , sizeof(s->prob->pred8x8c));
331 memcpy(s->prob->mvc , vp8_mv_default_prob , sizeof(s->prob->mvc));
332 memset(&s->segmentation, 0, sizeof(s->segmentation));
335 if (!s->macroblocks_base || /* first frame */
336 width != s->avctx->width || height != s->avctx->height || (width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height) {
337 if ((ret = update_dimensions(s, width, height)) < 0)
341 ff_vp56_init_range_decoder(c, buf, header_size);
343 buf_size -= header_size;
347 av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
348 vp8_rac_get(c); // whether we can skip clamping in dsp functions
351 if ((s->segmentation.enabled = vp8_rac_get(c)))
352 parse_segment_info(s);
354 s->segmentation.update_map = 0; // FIXME: move this to some init function?
356 s->filter.simple = vp8_rac_get(c);
357 s->filter.level = vp8_rac_get_uint(c, 6);
358 s->filter.sharpness = vp8_rac_get_uint(c, 3);
360 if ((s->lf_delta.enabled = vp8_rac_get(c)))
364 if (setup_partitions(s, buf, buf_size)) {
365 av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
366 return AVERROR_INVALIDDATA;
373 s->sign_bias[VP56_FRAME_GOLDEN] = vp8_rac_get(c);
374 s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp8_rac_get(c);
377 // if we aren't saving this frame's probabilities for future frames,
378 // make a copy of the current probabilities
379 if (!(s->update_probabilities = vp8_rac_get(c)))
380 s->prob[1] = s->prob[0];
382 s->update_last = s->keyframe || vp8_rac_get(c);
384 for (i = 0; i < 4; i++)
385 for (j = 0; j < 8; j++)
386 for (k = 0; k < 3; k++)
387 for (l = 0; l < NUM_DCT_TOKENS-1; l++)
388 if (vp56_rac_get_prob_branchy(c, vp8_token_update_probs[i][j][k][l])) {
389 int prob = vp8_rac_get_uint(c, 8);
390 for (m = 0; vp8_coeff_band_indexes[j][m] >= 0; m++)
391 s->prob->token[i][vp8_coeff_band_indexes[j][m]][k][l] = prob;
394 if ((s->mbskip_enabled = vp8_rac_get(c)))
395 s->prob->mbskip = vp8_rac_get_uint(c, 8);
398 s->prob->intra = vp8_rac_get_uint(c, 8);
399 s->prob->last = vp8_rac_get_uint(c, 8);
400 s->prob->golden = vp8_rac_get_uint(c, 8);
403 for (i = 0; i < 4; i++)
404 s->prob->pred16x16[i] = vp8_rac_get_uint(c, 8);
406 for (i = 0; i < 3; i++)
407 s->prob->pred8x8c[i] = vp8_rac_get_uint(c, 8);
409 // 17.2 MV probability update
410 for (i = 0; i < 2; i++)
411 for (j = 0; j < 19; j++)
412 if (vp56_rac_get_prob_branchy(c, vp8_mv_update_prob[i][j]))
413 s->prob->mvc[i][j] = vp8_rac_get_nn(c);
419 static av_always_inline void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
421 dst->x = av_clip(src->x, s->mv_min.x, s->mv_max.x);
422 dst->y = av_clip(src->y, s->mv_min.y, s->mv_max.y);
426 * Motion vector coding, 17.1.
428 static int read_mv_component(VP56RangeCoder *c, const uint8_t *p)
432 if (vp56_rac_get_prob_branchy(c, p[0])) {
435 for (i = 0; i < 3; i++)
436 x += vp56_rac_get_prob(c, p[9 + i]) << i;
437 for (i = 9; i > 3; i--)
438 x += vp56_rac_get_prob(c, p[9 + i]) << i;
439 if (!(x & 0xFFF0) || vp56_rac_get_prob(c, p[12]))
443 const uint8_t *ps = p+2;
444 bit = vp56_rac_get_prob(c, *ps);
447 bit = vp56_rac_get_prob(c, *ps);
450 x += vp56_rac_get_prob(c, *ps);
453 return (x && vp56_rac_get_prob(c, p[1])) ? -x : x;
456 static av_always_inline
457 const uint8_t *get_submv_prob(uint32_t left, uint32_t top)
460 return vp8_submv_prob[4-!!left];
462 return vp8_submv_prob[2];
463 return vp8_submv_prob[1-!!left];
467 * Split motion vector prediction, 16.4.
468 * @returns the number of motion vectors parsed (2, 4 or 16)
470 static av_always_inline
471 int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb)
475 VP8Macroblock *top_mb = &mb[2];
476 VP8Macroblock *left_mb = &mb[-1];
477 const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning],
478 *mbsplits_top = vp8_mbsplits[top_mb->partitioning],
479 *mbsplits_cur, *firstidx;
480 VP56mv *top_mv = top_mb->bmv;
481 VP56mv *left_mv = left_mb->bmv;
482 VP56mv *cur_mv = mb->bmv;
484 if (vp56_rac_get_prob_branchy(c, vp8_mbsplit_prob[0])) {
485 if (vp56_rac_get_prob_branchy(c, vp8_mbsplit_prob[1])) {
486 part_idx = VP8_SPLITMVMODE_16x8 + vp56_rac_get_prob(c, vp8_mbsplit_prob[2]);
488 part_idx = VP8_SPLITMVMODE_8x8;
491 part_idx = VP8_SPLITMVMODE_4x4;
494 num = vp8_mbsplit_count[part_idx];
495 mbsplits_cur = vp8_mbsplits[part_idx],
496 firstidx = vp8_mbfirstidx[part_idx];
497 mb->partitioning = part_idx;
499 for (n = 0; n < num; n++) {
501 uint32_t left, above;
502 const uint8_t *submv_prob;
505 left = AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
507 left = AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
509 above = AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
511 above = AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
513 submv_prob = get_submv_prob(left, above);
515 if (vp56_rac_get_prob_branchy(c, submv_prob[0])) {
516 if (vp56_rac_get_prob_branchy(c, submv_prob[1])) {
517 if (vp56_rac_get_prob_branchy(c, submv_prob[2])) {
518 mb->bmv[n].y = mb->mv.y + read_mv_component(c, s->prob->mvc[0]);
519 mb->bmv[n].x = mb->mv.x + read_mv_component(c, s->prob->mvc[1]);
521 AV_ZERO32(&mb->bmv[n]);
524 AV_WN32A(&mb->bmv[n], above);
527 AV_WN32A(&mb->bmv[n], left);
534 static av_always_inline
535 void decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y)
537 VP8Macroblock *mb_edge[3] = { mb + 2 /* top */,
539 mb + 1 /* top-left */ };
540 enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
541 enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
543 int cur_sign_bias = s->sign_bias[mb->ref_frame];
544 int8_t *sign_bias = s->sign_bias;
546 uint8_t cnt[4] = { 0 };
547 VP56RangeCoder *c = &s->c;
549 AV_ZERO32(&near_mv[0]);
550 AV_ZERO32(&near_mv[1]);
551 AV_ZERO32(&near_mv[2]);
553 /* Process MB on top, left and top-left */
554 #define MV_EDGE_CHECK(n)\
556 VP8Macroblock *edge = mb_edge[n];\
557 int edge_ref = edge->ref_frame;\
558 if (edge_ref != VP56_FRAME_CURRENT) {\
559 uint32_t mv = AV_RN32A(&edge->mv);\
561 if (cur_sign_bias != sign_bias[edge_ref]) {\
562 /* SWAR negate of the values in mv. */\
564 mv = ((mv&0x7fff7fff) + 0x00010001) ^ (mv&0x80008000);\
566 if (!n || mv != AV_RN32A(&near_mv[idx]))\
567 AV_WN32A(&near_mv[++idx], mv);\
568 cnt[idx] += 1 + (n != 2);\
570 cnt[CNT_ZERO] += 1 + (n != 2);\
578 mb->partitioning = VP8_SPLITMVMODE_NONE;
579 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) {
580 mb->mode = VP8_MVMODE_MV;
582 /* If we have three distinct MVs, merge first and last if they're the same */
583 if (cnt[CNT_SPLITMV] && AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) == AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
584 cnt[CNT_NEAREST] += 1;
586 /* Swap near and nearest if necessary */
587 if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
588 FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
589 FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
592 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
593 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
595 /* Choose the best mv out of 0,0 and the nearest mv */
596 clamp_mv(s, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
597 cnt[CNT_SPLITMV] = ((mb_edge[VP8_EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
598 (mb_edge[VP8_EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
599 (mb_edge[VP8_EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
601 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
602 mb->mode = VP8_MVMODE_SPLIT;
603 mb->mv = mb->bmv[decode_splitmvs(s, c, mb) - 1];
605 mb->mv.y += read_mv_component(c, s->prob->mvc[0]);
606 mb->mv.x += read_mv_component(c, s->prob->mvc[1]);
610 clamp_mv(s, &mb->mv, &near_mv[CNT_NEAR]);
614 clamp_mv(s, &mb->mv, &near_mv[CNT_NEAREST]);
618 mb->mode = VP8_MVMODE_ZERO;
624 static av_always_inline
625 void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c,
626 int mb_x, int keyframe)
628 uint8_t *intra4x4 = s->intra4x4_pred_mode_mb;
631 uint8_t* const top = s->intra4x4_pred_mode_top + 4 * mb_x;
632 uint8_t* const left = s->intra4x4_pred_mode_left;
633 for (y = 0; y < 4; y++) {
634 for (x = 0; x < 4; x++) {
636 ctx = vp8_pred4x4_prob_intra[top[x]][left[y]];
637 *intra4x4 = vp8_rac_get_tree(c, vp8_pred4x4_tree, ctx);
638 left[y] = top[x] = *intra4x4;
644 for (i = 0; i < 16; i++)
645 intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree, vp8_pred4x4_prob_inter);
649 static av_always_inline
650 void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment, uint8_t *ref)
652 VP56RangeCoder *c = &s->c;
654 if (s->segmentation.update_map) {
655 int bit = vp56_rac_get_prob(c, s->prob->segmentid[0]);
656 *segment = vp56_rac_get_prob(c, s->prob->segmentid[1+bit]) + 2*bit;
657 } else if (s->segmentation.enabled)
658 *segment = ref ? *ref : *segment;
659 s->segment = *segment;
661 mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0;
664 mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_intra, vp8_pred16x16_prob_intra);
666 if (mb->mode == MODE_I4x4) {
667 decode_intra4x4_modes(s, c, mb_x, 1);
669 const uint32_t modes = vp8_pred4x4_mode[mb->mode] * 0x01010101u;
670 AV_WN32A(s->intra4x4_pred_mode_top + 4 * mb_x, modes);
671 AV_WN32A(s->intra4x4_pred_mode_left, modes);
674 s->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, vp8_pred8x8c_prob_intra);
675 mb->ref_frame = VP56_FRAME_CURRENT;
676 } else if (vp56_rac_get_prob_branchy(c, s->prob->intra)) {
678 if (vp56_rac_get_prob_branchy(c, s->prob->last))
679 mb->ref_frame = vp56_rac_get_prob(c, s->prob->golden) ?
680 VP56_FRAME_GOLDEN2 /* altref */ : VP56_FRAME_GOLDEN;
682 mb->ref_frame = VP56_FRAME_PREVIOUS;
683 s->ref_count[mb->ref_frame-1]++;
685 // motion vectors, 16.3
686 decode_mvs(s, mb, mb_x, mb_y);
689 mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_inter, s->prob->pred16x16);
691 if (mb->mode == MODE_I4x4)
692 decode_intra4x4_modes(s, c, mb_x, 0);
694 s->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, s->prob->pred8x8c);
695 mb->ref_frame = VP56_FRAME_CURRENT;
696 mb->partitioning = VP8_SPLITMVMODE_NONE;
697 AV_ZERO32(&mb->bmv[0]);
701 #ifndef decode_block_coeffs_internal
703 * @param c arithmetic bitstream reader context
704 * @param block destination for block coefficients
705 * @param probs probabilities to use when reading trees from the bitstream
706 * @param i initial coeff index, 0 unless a separate DC block is coded
707 * @param qmul array holding the dc/ac dequant factor at position 0/1
708 * @return 0 if no coeffs were decoded
709 * otherwise, the index of the last coeff decoded plus one
711 static int decode_block_coeffs_internal(VP56RangeCoder *r, DCTELEM block[16],
712 uint8_t probs[16][3][NUM_DCT_TOKENS-1],
713 int i, uint8_t *token_prob, int16_t qmul[2])
715 VP56RangeCoder c = *r;
719 if (!vp56_rac_get_prob_branchy(&c, token_prob[0])) // DCT_EOB
723 if (!vp56_rac_get_prob_branchy(&c, token_prob[1])) { // DCT_0
725 break; // invalid input; blocks should end with EOB
726 token_prob = probs[i][0];
730 if (!vp56_rac_get_prob_branchy(&c, token_prob[2])) { // DCT_1
732 token_prob = probs[i+1][1];
734 if (!vp56_rac_get_prob_branchy(&c, token_prob[3])) { // DCT 2,3,4
735 coeff = vp56_rac_get_prob_branchy(&c, token_prob[4]);
737 coeff += vp56_rac_get_prob(&c, token_prob[5]);
741 if (!vp56_rac_get_prob_branchy(&c, token_prob[6])) {
742 if (!vp56_rac_get_prob_branchy(&c, token_prob[7])) { // DCT_CAT1
743 coeff = 5 + vp56_rac_get_prob(&c, vp8_dct_cat1_prob[0]);
746 coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[0]) << 1;
747 coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[1]);
749 } else { // DCT_CAT3 and up
750 int a = vp56_rac_get_prob(&c, token_prob[8]);
751 int b = vp56_rac_get_prob(&c, token_prob[9+a]);
752 int cat = (a<<1) + b;
753 coeff = 3 + (8<<cat);
754 coeff += vp8_rac_get_coeff(&c, ff_vp8_dct_cat_prob[cat]);
757 token_prob = probs[i+1][2];
759 block[zigzag_scan[i]] = (vp8_rac_get(&c) ? -coeff : coeff) * qmul[!!i];
768 * @param c arithmetic bitstream reader context
769 * @param block destination for block coefficients
770 * @param probs probabilities to use when reading trees from the bitstream
771 * @param i initial coeff index, 0 unless a separate DC block is coded
772 * @param zero_nhood the initial prediction context for number of surrounding
773 * all-zero blocks (only left/top, so 0-2)
774 * @param qmul array holding the dc/ac dequant factor at position 0/1
775 * @return 0 if no coeffs were decoded
776 * otherwise, the index of the last coeff decoded plus one
778 static av_always_inline
779 int decode_block_coeffs(VP56RangeCoder *c, DCTELEM block[16],
780 uint8_t probs[16][3][NUM_DCT_TOKENS-1],
781 int i, int zero_nhood, int16_t qmul[2])
783 uint8_t *token_prob = probs[i][zero_nhood];
784 if (!vp56_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
786 return decode_block_coeffs_internal(c, block, probs, i, token_prob, qmul);
789 static av_always_inline
790 void decode_mb_coeffs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb,
791 uint8_t t_nnz[9], uint8_t l_nnz[9])
793 int i, x, y, luma_start = 0, luma_ctx = 3;
794 int nnz_pred, nnz, nnz_total = 0;
795 int segment = s->segment;
798 if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
799 nnz_pred = t_nnz[8] + l_nnz[8];
801 // decode DC values and do hadamard
802 nnz = decode_block_coeffs(c, s->block_dc, s->prob->token[1], 0, nnz_pred,
803 s->qmat[segment].luma_dc_qmul);
804 l_nnz[8] = t_nnz[8] = !!nnz;
809 s->vp8dsp.vp8_luma_dc_wht_dc(s->block, s->block_dc);
811 s->vp8dsp.vp8_luma_dc_wht(s->block, s->block_dc);
818 for (y = 0; y < 4; y++)
819 for (x = 0; x < 4; x++) {
820 nnz_pred = l_nnz[y] + t_nnz[x];
821 nnz = decode_block_coeffs(c, s->block[y][x], s->prob->token[luma_ctx], luma_start,
822 nnz_pred, s->qmat[segment].luma_qmul);
823 // nnz+block_dc may be one more than the actual last index, but we don't care
824 s->non_zero_count_cache[y][x] = nnz + block_dc;
825 t_nnz[x] = l_nnz[y] = !!nnz;
830 // TODO: what to do about dimensions? 2nd dim for luma is x,
831 // but for chroma it's (y<<1)|x
832 for (i = 4; i < 6; i++)
833 for (y = 0; y < 2; y++)
834 for (x = 0; x < 2; x++) {
835 nnz_pred = l_nnz[i+2*y] + t_nnz[i+2*x];
836 nnz = decode_block_coeffs(c, s->block[i][(y<<1)+x], s->prob->token[2], 0,
837 nnz_pred, s->qmat[segment].chroma_qmul);
838 s->non_zero_count_cache[i][(y<<1)+x] = nnz;
839 t_nnz[i+2*x] = l_nnz[i+2*y] = !!nnz;
843 // if there were no coded coeffs despite the macroblock not being marked skip,
844 // we MUST not do the inner loop filter and should not do IDCT
845 // Since skip isn't used for bitstream prediction, just manually set it.
850 static av_always_inline
851 void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr,
852 int linesize, int uvlinesize, int simple)
854 AV_COPY128(top_border, src_y + 15*linesize);
856 AV_COPY64(top_border+16, src_cb + 7*uvlinesize);
857 AV_COPY64(top_border+24, src_cr + 7*uvlinesize);
861 static av_always_inline
862 void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr,
863 int linesize, int uvlinesize, int mb_x, int mb_y, int mb_width,
864 int simple, int xchg)
866 uint8_t *top_border_m1 = top_border-32; // for TL prediction
868 src_cb -= uvlinesize;
869 src_cr -= uvlinesize;
871 #define XCHG(a,b,xchg) do { \
872 if (xchg) AV_SWAP64(b,a); \
873 else AV_COPY64(b,a); \
876 XCHG(top_border_m1+8, src_y-8, xchg);
877 XCHG(top_border, src_y, xchg);
878 XCHG(top_border+8, src_y+8, 1);
879 if (mb_x < mb_width-1)
880 XCHG(top_border+32, src_y+16, 1);
882 // only copy chroma for normal loop filter
883 // or to initialize the top row to 127
884 if (!simple || !mb_y) {
885 XCHG(top_border_m1+16, src_cb-8, xchg);
886 XCHG(top_border_m1+24, src_cr-8, xchg);
887 XCHG(top_border+16, src_cb, 1);
888 XCHG(top_border+24, src_cr, 1);
892 static av_always_inline
893 int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
896 return mb_y ? TOP_DC_PRED8x8 : DC_128_PRED8x8;
898 return mb_y ? mode : LEFT_DC_PRED8x8;
902 static av_always_inline
903 int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y)
906 return mb_y ? VERT_PRED8x8 : DC_129_PRED8x8;
908 return mb_y ? mode : HOR_PRED8x8;
912 static av_always_inline
913 int check_intra_pred8x8_mode(int mode, int mb_x, int mb_y)
915 if (mode == DC_PRED8x8) {
916 return check_dc_pred8x8_mode(mode, mb_x, mb_y);
922 static av_always_inline
923 int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y)
927 return check_dc_pred8x8_mode(mode, mb_x, mb_y);
929 return !mb_y ? DC_127_PRED8x8 : mode;
931 return !mb_x ? DC_129_PRED8x8 : mode;
932 case PLANE_PRED8x8 /*TM*/:
933 return check_tm_pred8x8_mode(mode, mb_x, mb_y);
938 static av_always_inline
939 int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y)
942 return mb_y ? VERT_VP8_PRED : DC_129_PRED;
944 return mb_y ? mode : HOR_VP8_PRED;
948 static av_always_inline
949 int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf)
958 case DIAG_DOWN_LEFT_PRED:
960 return !mb_y ? DC_127_PRED : mode;
968 return !mb_x ? DC_129_PRED : mode;
970 return check_tm_pred4x4_mode(mode, mb_x, mb_y);
971 case DC_PRED: // 4x4 DC doesn't use the same "H.264-style" exceptions as 16x16/8x8 DC
972 case DIAG_DOWN_RIGHT_PRED:
973 case VERT_RIGHT_PRED:
982 static av_always_inline
983 void intra_predict(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb,
986 AVCodecContext *avctx = s->avctx;
990 // for the first row, we need to run xchg_mb_border to init the top edge to 127
991 // otherwise, skip it if we aren't going to deblock
992 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE && !mb_y) && (s->deblock_filter || !mb_y))
993 xchg_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2],
994 s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
995 s->filter.simple, 1);
997 if (mb->mode < MODE_I4x4) {
998 if (avctx->flags & CODEC_FLAG_EMU_EDGE) { // tested
999 mode = check_intra_pred8x8_mode_emuedge(mb->mode, mb_x, mb_y);
1001 mode = check_intra_pred8x8_mode(mb->mode, mb_x, mb_y);
1003 s->hpc.pred16x16[mode](dst[0], s->linesize);
1005 uint8_t *ptr = dst[0];
1006 uint8_t *intra4x4 = s->intra4x4_pred_mode_mb;
1007 uint8_t tr_top[4] = { 127, 127, 127, 127 };
1009 // all blocks on the right edge of the macroblock use bottom edge
1010 // the top macroblock for their topright edge
1011 uint8_t *tr_right = ptr - s->linesize + 16;
1013 // if we're on the right edge of the frame, said edge is extended
1014 // from the top macroblock
1015 if (!(!mb_y && avctx->flags & CODEC_FLAG_EMU_EDGE) &&
1016 mb_x == s->mb_width-1) {
1017 tr = tr_right[-1]*0x01010101u;
1018 tr_right = (uint8_t *)&tr;
1022 AV_ZERO128(s->non_zero_count_cache);
1024 for (y = 0; y < 4; y++) {
1025 uint8_t *topright = ptr + 4 - s->linesize;
1026 for (x = 0; x < 4; x++) {
1027 int copy = 0, linesize = s->linesize;
1028 uint8_t *dst = ptr+4*x;
1029 DECLARE_ALIGNED(4, uint8_t, copy_dst)[5*8];
1031 if ((y == 0 || x == 3) && mb_y == 0 && avctx->flags & CODEC_FLAG_EMU_EDGE) {
1034 topright = tr_right;
1036 if (avctx->flags & CODEC_FLAG_EMU_EDGE) { // mb_x+x or mb_y+y is a hack but works
1037 mode = check_intra_pred4x4_mode_emuedge(intra4x4[x], mb_x + x, mb_y + y, ©);
1039 dst = copy_dst + 12;
1043 AV_WN32A(copy_dst+4, 127U * 0x01010101U);
1045 AV_COPY32(copy_dst+4, ptr+4*x-s->linesize);
1049 copy_dst[3] = ptr[4*x-s->linesize-1];
1056 copy_dst[35] = 129U;
1058 copy_dst[11] = ptr[4*x -1];
1059 copy_dst[19] = ptr[4*x+s->linesize -1];
1060 copy_dst[27] = ptr[4*x+s->linesize*2-1];
1061 copy_dst[35] = ptr[4*x+s->linesize*3-1];
1067 s->hpc.pred4x4[mode](dst, topright, linesize);
1069 AV_COPY32(ptr+4*x , copy_dst+12);
1070 AV_COPY32(ptr+4*x+s->linesize , copy_dst+20);
1071 AV_COPY32(ptr+4*x+s->linesize*2, copy_dst+28);
1072 AV_COPY32(ptr+4*x+s->linesize*3, copy_dst+36);
1075 nnz = s->non_zero_count_cache[y][x];
1078 s->vp8dsp.vp8_idct_dc_add(ptr+4*x, s->block[y][x], s->linesize);
1080 s->vp8dsp.vp8_idct_add(ptr+4*x, s->block[y][x], s->linesize);
1085 ptr += 4*s->linesize;
1090 if (avctx->flags & CODEC_FLAG_EMU_EDGE) {
1091 mode = check_intra_pred8x8_mode_emuedge(s->chroma_pred_mode, mb_x, mb_y);
1093 mode = check_intra_pred8x8_mode(s->chroma_pred_mode, mb_x, mb_y);
1095 s->hpc.pred8x8[mode](dst[1], s->uvlinesize);
1096 s->hpc.pred8x8[mode](dst[2], s->uvlinesize);
1098 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE && !mb_y) && (s->deblock_filter || !mb_y))
1099 xchg_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2],
1100 s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1101 s->filter.simple, 0);
1104 static const uint8_t subpel_idx[3][8] = {
1105 { 0, 1, 2, 1, 2, 1, 2, 1 }, // nr. of left extra pixels,
1106 // also function pointer index
1107 { 0, 3, 5, 3, 5, 3, 5, 3 }, // nr. of extra pixels required
1108 { 0, 2, 3, 2, 3, 2, 3, 2 }, // nr. of right extra pixels
1114 * @param s VP8 decoding context
1115 * @param dst target buffer for block data at block position
1116 * @param ref reference picture buffer at origin (0, 0)
1117 * @param mv motion vector (relative to block position) to get pixel data from
1118 * @param x_off horizontal position of block from origin (0, 0)
1119 * @param y_off vertical position of block from origin (0, 0)
1120 * @param block_w width of block (16, 8 or 4)
1121 * @param block_h height of block (always same as block_w)
1122 * @param width width of src/dst plane data
1123 * @param height height of src/dst plane data
1124 * @param linesize size of a single line of plane data, including padding
1125 * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1127 static av_always_inline
1128 void vp8_mc_luma(VP8Context *s, uint8_t *dst, AVFrame *ref, const VP56mv *mv,
1129 int x_off, int y_off, int block_w, int block_h,
1130 int width, int height, int linesize,
1131 vp8_mc_func mc_func[3][3])
1133 uint8_t *src = ref->data[0];
1137 int mx = (mv->x << 1)&7, mx_idx = subpel_idx[0][mx];
1138 int my = (mv->y << 1)&7, my_idx = subpel_idx[0][my];
1140 x_off += mv->x >> 2;
1141 y_off += mv->y >> 2;
1144 ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 4, 0);
1145 src += y_off * linesize + x_off;
1146 if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1147 y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1148 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src - my_idx * linesize - mx_idx, linesize,
1149 block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
1150 x_off - mx_idx, y_off - my_idx, width, height);
1151 src = s->edge_emu_buffer + mx_idx + linesize * my_idx;
1153 mc_func[my_idx][mx_idx](dst, linesize, src, linesize, block_h, mx, my);
1155 ff_thread_await_progress(ref, (3 + y_off + block_h) >> 4, 0);
1156 mc_func[0][0](dst, linesize, src + y_off * linesize + x_off, linesize, block_h, 0, 0);
1161 * chroma MC function
1163 * @param s VP8 decoding context
1164 * @param dst1 target buffer for block data at block position (U plane)
1165 * @param dst2 target buffer for block data at block position (V plane)
1166 * @param ref reference picture buffer at origin (0, 0)
1167 * @param mv motion vector (relative to block position) to get pixel data from
1168 * @param x_off horizontal position of block from origin (0, 0)
1169 * @param y_off vertical position of block from origin (0, 0)
1170 * @param block_w width of block (16, 8 or 4)
1171 * @param block_h height of block (always same as block_w)
1172 * @param width width of src/dst plane data
1173 * @param height height of src/dst plane data
1174 * @param linesize size of a single line of plane data, including padding
1175 * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1177 static av_always_inline
1178 void vp8_mc_chroma(VP8Context *s, uint8_t *dst1, uint8_t *dst2, AVFrame *ref,
1179 const VP56mv *mv, int x_off, int y_off,
1180 int block_w, int block_h, int width, int height, int linesize,
1181 vp8_mc_func mc_func[3][3])
1183 uint8_t *src1 = ref->data[1], *src2 = ref->data[2];
1186 int mx = mv->x&7, mx_idx = subpel_idx[0][mx];
1187 int my = mv->y&7, my_idx = subpel_idx[0][my];
1189 x_off += mv->x >> 3;
1190 y_off += mv->y >> 3;
1193 src1 += y_off * linesize + x_off;
1194 src2 += y_off * linesize + x_off;
1195 ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 3, 0);
1196 if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1197 y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1198 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src1 - my_idx * linesize - mx_idx, linesize,
1199 block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
1200 x_off - mx_idx, y_off - my_idx, width, height);
1201 src1 = s->edge_emu_buffer + mx_idx + linesize * my_idx;
1202 mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1204 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src2 - my_idx * linesize - mx_idx, linesize,
1205 block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
1206 x_off - mx_idx, y_off - my_idx, width, height);
1207 src2 = s->edge_emu_buffer + mx_idx + linesize * my_idx;
1208 mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1210 mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1211 mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1214 ff_thread_await_progress(ref, (3 + y_off + block_h) >> 3, 0);
1215 mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1216 mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1220 static av_always_inline
1221 void vp8_mc_part(VP8Context *s, uint8_t *dst[3],
1222 AVFrame *ref_frame, int x_off, int y_off,
1223 int bx_off, int by_off,
1224 int block_w, int block_h,
1225 int width, int height, VP56mv *mv)
1230 vp8_mc_luma(s, dst[0] + by_off * s->linesize + bx_off,
1231 ref_frame, mv, x_off + bx_off, y_off + by_off,
1232 block_w, block_h, width, height, s->linesize,
1233 s->put_pixels_tab[block_w == 8]);
1236 if (s->profile == 3) {
1240 x_off >>= 1; y_off >>= 1;
1241 bx_off >>= 1; by_off >>= 1;
1242 width >>= 1; height >>= 1;
1243 block_w >>= 1; block_h >>= 1;
1244 vp8_mc_chroma(s, dst[1] + by_off * s->uvlinesize + bx_off,
1245 dst[2] + by_off * s->uvlinesize + bx_off, ref_frame,
1246 &uvmv, x_off + bx_off, y_off + by_off,
1247 block_w, block_h, width, height, s->uvlinesize,
1248 s->put_pixels_tab[1 + (block_w == 4)]);
1251 /* Fetch pixels for estimated mv 4 macroblocks ahead.
1252 * Optimized for 64-byte cache lines. Inspired by ffh264 prefetch_motion. */
1253 static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
1255 /* Don't prefetch refs that haven't been used very often this frame. */
1256 if (s->ref_count[ref-1] > (mb_xy >> 5)) {
1257 int x_off = mb_x << 4, y_off = mb_y << 4;
1258 int mx = (mb->mv.x>>2) + x_off + 8;
1259 int my = (mb->mv.y>>2) + y_off;
1260 uint8_t **src= s->framep[ref]->data;
1261 int off= mx + (my + (mb_x&3)*4)*s->linesize + 64;
1262 /* For threading, a ff_thread_await_progress here might be useful, but
1263 * it actually slows down the decoder. Since a bad prefetch doesn't
1264 * generate bad decoder output, we don't run it here. */
1265 s->dsp.prefetch(src[0]+off, s->linesize, 4);
1266 off= (mx>>1) + ((my>>1) + (mb_x&7))*s->uvlinesize + 64;
1267 s->dsp.prefetch(src[1]+off, src[2]-src[1], 2);
1272 * Apply motion vectors to prediction buffer, chapter 18.
1274 static av_always_inline
1275 void inter_predict(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb,
1278 int x_off = mb_x << 4, y_off = mb_y << 4;
1279 int width = 16*s->mb_width, height = 16*s->mb_height;
1280 AVFrame *ref = s->framep[mb->ref_frame];
1281 VP56mv *bmv = mb->bmv;
1283 switch (mb->partitioning) {
1284 case VP8_SPLITMVMODE_NONE:
1285 vp8_mc_part(s, dst, ref, x_off, y_off,
1286 0, 0, 16, 16, width, height, &mb->mv);
1288 case VP8_SPLITMVMODE_4x4: {
1293 for (y = 0; y < 4; y++) {
1294 for (x = 0; x < 4; x++) {
1295 vp8_mc_luma(s, dst[0] + 4*y*s->linesize + x*4,
1297 4*x + x_off, 4*y + y_off, 4, 4,
1298 width, height, s->linesize,
1299 s->put_pixels_tab[2]);
1304 x_off >>= 1; y_off >>= 1; width >>= 1; height >>= 1;
1305 for (y = 0; y < 2; y++) {
1306 for (x = 0; x < 2; x++) {
1307 uvmv.x = mb->bmv[ 2*y * 4 + 2*x ].x +
1308 mb->bmv[ 2*y * 4 + 2*x+1].x +
1309 mb->bmv[(2*y+1) * 4 + 2*x ].x +
1310 mb->bmv[(2*y+1) * 4 + 2*x+1].x;
1311 uvmv.y = mb->bmv[ 2*y * 4 + 2*x ].y +
1312 mb->bmv[ 2*y * 4 + 2*x+1].y +
1313 mb->bmv[(2*y+1) * 4 + 2*x ].y +
1314 mb->bmv[(2*y+1) * 4 + 2*x+1].y;
1315 uvmv.x = (uvmv.x + 2 + (uvmv.x >> (INT_BIT-1))) >> 2;
1316 uvmv.y = (uvmv.y + 2 + (uvmv.y >> (INT_BIT-1))) >> 2;
1317 if (s->profile == 3) {
1321 vp8_mc_chroma(s, dst[1] + 4*y*s->uvlinesize + x*4,
1322 dst[2] + 4*y*s->uvlinesize + x*4, ref, &uvmv,
1323 4*x + x_off, 4*y + y_off, 4, 4,
1324 width, height, s->uvlinesize,
1325 s->put_pixels_tab[2]);
1330 case VP8_SPLITMVMODE_16x8:
1331 vp8_mc_part(s, dst, ref, x_off, y_off,
1332 0, 0, 16, 8, width, height, &bmv[0]);
1333 vp8_mc_part(s, dst, ref, x_off, y_off,
1334 0, 8, 16, 8, width, height, &bmv[1]);
1336 case VP8_SPLITMVMODE_8x16:
1337 vp8_mc_part(s, dst, ref, x_off, y_off,
1338 0, 0, 8, 16, width, height, &bmv[0]);
1339 vp8_mc_part(s, dst, ref, x_off, y_off,
1340 8, 0, 8, 16, width, height, &bmv[1]);
1342 case VP8_SPLITMVMODE_8x8:
1343 vp8_mc_part(s, dst, ref, x_off, y_off,
1344 0, 0, 8, 8, width, height, &bmv[0]);
1345 vp8_mc_part(s, dst, ref, x_off, y_off,
1346 8, 0, 8, 8, width, height, &bmv[1]);
1347 vp8_mc_part(s, dst, ref, x_off, y_off,
1348 0, 8, 8, 8, width, height, &bmv[2]);
1349 vp8_mc_part(s, dst, ref, x_off, y_off,
1350 8, 8, 8, 8, width, height, &bmv[3]);
1355 static av_always_inline void idct_mb(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb)
1359 if (mb->mode != MODE_I4x4) {
1360 uint8_t *y_dst = dst[0];
1361 for (y = 0; y < 4; y++) {
1362 uint32_t nnz4 = AV_RL32(s->non_zero_count_cache[y]);
1364 if (nnz4&~0x01010101) {
1365 for (x = 0; x < 4; x++) {
1366 if ((uint8_t)nnz4 == 1)
1367 s->vp8dsp.vp8_idct_dc_add(y_dst+4*x, s->block[y][x], s->linesize);
1368 else if((uint8_t)nnz4 > 1)
1369 s->vp8dsp.vp8_idct_add(y_dst+4*x, s->block[y][x], s->linesize);
1375 s->vp8dsp.vp8_idct_dc_add4y(y_dst, s->block[y], s->linesize);
1378 y_dst += 4*s->linesize;
1382 for (ch = 0; ch < 2; ch++) {
1383 uint32_t nnz4 = AV_RL32(s->non_zero_count_cache[4+ch]);
1385 uint8_t *ch_dst = dst[1+ch];
1386 if (nnz4&~0x01010101) {
1387 for (y = 0; y < 2; y++) {
1388 for (x = 0; x < 2; x++) {
1389 if ((uint8_t)nnz4 == 1)
1390 s->vp8dsp.vp8_idct_dc_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize);
1391 else if((uint8_t)nnz4 > 1)
1392 s->vp8dsp.vp8_idct_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize);
1395 goto chroma_idct_end;
1397 ch_dst += 4*s->uvlinesize;
1400 s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, s->block[4+ch], s->uvlinesize);
1407 static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f )
1409 int interior_limit, filter_level;
1411 if (s->segmentation.enabled) {
1412 filter_level = s->segmentation.filter_level[s->segment];
1413 if (!s->segmentation.absolute_vals)
1414 filter_level += s->filter.level;
1416 filter_level = s->filter.level;
1418 if (s->lf_delta.enabled) {
1419 filter_level += s->lf_delta.ref[mb->ref_frame];
1420 filter_level += s->lf_delta.mode[mb->mode];
1423 filter_level = av_clip_uintp2(filter_level, 6);
1425 interior_limit = filter_level;
1426 if (s->filter.sharpness) {
1427 interior_limit >>= (s->filter.sharpness + 3) >> 2;
1428 interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
1430 interior_limit = FFMAX(interior_limit, 1);
1432 f->filter_level = filter_level;
1433 f->inner_limit = interior_limit;
1434 f->inner_filter = !mb->skip || mb->mode == MODE_I4x4 || mb->mode == VP8_MVMODE_SPLIT;
1437 static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y)
1439 int mbedge_lim, bedge_lim, hev_thresh;
1440 int filter_level = f->filter_level;
1441 int inner_limit = f->inner_limit;
1442 int inner_filter = f->inner_filter;
1443 int linesize = s->linesize;
1444 int uvlinesize = s->uvlinesize;
1445 static const uint8_t hev_thresh_lut[2][64] = {
1446 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1447 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1448 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
1450 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1451 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1452 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1459 bedge_lim = 2*filter_level + inner_limit;
1460 mbedge_lim = bedge_lim + 4;
1462 hev_thresh = hev_thresh_lut[s->keyframe][filter_level];
1465 s->vp8dsp.vp8_h_loop_filter16y(dst[0], linesize,
1466 mbedge_lim, inner_limit, hev_thresh);
1467 s->vp8dsp.vp8_h_loop_filter8uv(dst[1], dst[2], uvlinesize,
1468 mbedge_lim, inner_limit, hev_thresh);
1472 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+ 4, linesize, bedge_lim,
1473 inner_limit, hev_thresh);
1474 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+ 8, linesize, bedge_lim,
1475 inner_limit, hev_thresh);
1476 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+12, linesize, bedge_lim,
1477 inner_limit, hev_thresh);
1478 s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4,
1479 uvlinesize, bedge_lim,
1480 inner_limit, hev_thresh);
1484 s->vp8dsp.vp8_v_loop_filter16y(dst[0], linesize,
1485 mbedge_lim, inner_limit, hev_thresh);
1486 s->vp8dsp.vp8_v_loop_filter8uv(dst[1], dst[2], uvlinesize,
1487 mbedge_lim, inner_limit, hev_thresh);
1491 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+ 4*linesize,
1492 linesize, bedge_lim,
1493 inner_limit, hev_thresh);
1494 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+ 8*linesize,
1495 linesize, bedge_lim,
1496 inner_limit, hev_thresh);
1497 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+12*linesize,
1498 linesize, bedge_lim,
1499 inner_limit, hev_thresh);
1500 s->vp8dsp.vp8_v_loop_filter8uv_inner(dst[1] + 4 * uvlinesize,
1501 dst[2] + 4 * uvlinesize,
1502 uvlinesize, bedge_lim,
1503 inner_limit, hev_thresh);
1507 static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
1509 int mbedge_lim, bedge_lim;
1510 int filter_level = f->filter_level;
1511 int inner_limit = f->inner_limit;
1512 int inner_filter = f->inner_filter;
1513 int linesize = s->linesize;
1518 bedge_lim = 2*filter_level + inner_limit;
1519 mbedge_lim = bedge_lim + 4;
1522 s->vp8dsp.vp8_h_loop_filter_simple(dst, linesize, mbedge_lim);
1524 s->vp8dsp.vp8_h_loop_filter_simple(dst+ 4, linesize, bedge_lim);
1525 s->vp8dsp.vp8_h_loop_filter_simple(dst+ 8, linesize, bedge_lim);
1526 s->vp8dsp.vp8_h_loop_filter_simple(dst+12, linesize, bedge_lim);
1530 s->vp8dsp.vp8_v_loop_filter_simple(dst, linesize, mbedge_lim);
1532 s->vp8dsp.vp8_v_loop_filter_simple(dst+ 4*linesize, linesize, bedge_lim);
1533 s->vp8dsp.vp8_v_loop_filter_simple(dst+ 8*linesize, linesize, bedge_lim);
1534 s->vp8dsp.vp8_v_loop_filter_simple(dst+12*linesize, linesize, bedge_lim);
1538 static void filter_mb_row(VP8Context *s, AVFrame *curframe, int mb_y)
1540 VP8FilterStrength *f = s->filter_strength;
1542 curframe->data[0] + 16*mb_y*s->linesize,
1543 curframe->data[1] + 8*mb_y*s->uvlinesize,
1544 curframe->data[2] + 8*mb_y*s->uvlinesize
1548 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1549 backup_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2], s->linesize, s->uvlinesize, 0);
1550 filter_mb(s, dst, f++, mb_x, mb_y);
1557 static void filter_mb_row_simple(VP8Context *s, AVFrame *curframe, int mb_y)
1559 VP8FilterStrength *f = s->filter_strength;
1560 uint8_t *dst = curframe->data[0] + 16*mb_y*s->linesize;
1563 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1564 backup_mb_border(s->top_border[mb_x+1], dst, NULL, NULL, s->linesize, 0, 1);
1565 filter_mb_simple(s, dst, f++, mb_x, mb_y);
1570 static void release_queued_segmaps(VP8Context *s, int is_close)
1572 int leave_behind = is_close ? 0 : !s->maps_are_invalid;
1573 while (s->num_maps_to_be_freed > leave_behind)
1574 av_freep(&s->segmentation_maps[--s->num_maps_to_be_freed]);
1575 s->maps_are_invalid = 0;
1578 static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
1581 VP8Context *s = avctx->priv_data;
1582 int ret, mb_x, mb_y, i, y, referenced;
1583 enum AVDiscard skip_thresh;
1584 AVFrame *av_uninit(curframe), *prev_frame;
1586 release_queued_segmaps(s, 0);
1588 if ((ret = decode_frame_header(s, avpkt->data, avpkt->size)) < 0)
1591 prev_frame = s->framep[VP56_FRAME_CURRENT];
1593 referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT
1594 || s->update_altref == VP56_FRAME_CURRENT;
1596 skip_thresh = !referenced ? AVDISCARD_NONREF :
1597 !s->keyframe ? AVDISCARD_NONKEY : AVDISCARD_ALL;
1599 if (avctx->skip_frame >= skip_thresh) {
1601 memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
1604 s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
1606 // release no longer referenced frames
1607 for (i = 0; i < 5; i++)
1608 if (s->frames[i].data[0] &&
1609 &s->frames[i] != prev_frame &&
1610 &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
1611 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
1612 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
1613 vp8_release_frame(s, &s->frames[i], 1, 0);
1615 // find a free buffer
1616 for (i = 0; i < 5; i++)
1617 if (&s->frames[i] != prev_frame &&
1618 &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
1619 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
1620 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
1621 curframe = s->framep[VP56_FRAME_CURRENT] = &s->frames[i];
1625 av_log(avctx, AV_LOG_FATAL, "Ran out of free frames!\n");
1628 if (curframe->data[0])
1629 vp8_release_frame(s, curframe, 1, 0);
1631 // Given that arithmetic probabilities are updated every frame, it's quite likely
1632 // that the values we have on a random interframe are complete junk if we didn't
1633 // start decode on a keyframe. So just don't display anything rather than junk.
1634 if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
1635 !s->framep[VP56_FRAME_GOLDEN] ||
1636 !s->framep[VP56_FRAME_GOLDEN2])) {
1637 av_log(avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n");
1638 ret = AVERROR_INVALIDDATA;
1642 curframe->key_frame = s->keyframe;
1643 curframe->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1644 curframe->reference = referenced ? 3 : 0;
1645 if ((ret = vp8_alloc_frame(s, curframe))) {
1646 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed!\n");
1650 // check if golden and altref are swapped
1651 if (s->update_altref != VP56_FRAME_NONE) {
1652 s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[s->update_altref];
1654 s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[VP56_FRAME_GOLDEN2];
1656 if (s->update_golden != VP56_FRAME_NONE) {
1657 s->next_framep[VP56_FRAME_GOLDEN] = s->framep[s->update_golden];
1659 s->next_framep[VP56_FRAME_GOLDEN] = s->framep[VP56_FRAME_GOLDEN];
1661 if (s->update_last) {
1662 s->next_framep[VP56_FRAME_PREVIOUS] = curframe;
1664 s->next_framep[VP56_FRAME_PREVIOUS] = s->framep[VP56_FRAME_PREVIOUS];
1666 s->next_framep[VP56_FRAME_CURRENT] = curframe;
1668 ff_thread_finish_setup(avctx);
1670 s->linesize = curframe->linesize[0];
1671 s->uvlinesize = curframe->linesize[1];
1673 if (!s->edge_emu_buffer)
1674 s->edge_emu_buffer = av_malloc(21*s->linesize);
1676 memset(s->top_nnz, 0, s->mb_width*sizeof(*s->top_nnz));
1678 /* Zero macroblock structures for top/top-left prediction from outside the frame. */
1679 memset(s->macroblocks + s->mb_height*2 - 1, 0, (s->mb_width+1)*sizeof(*s->macroblocks));
1681 // top edge of 127 for intra prediction
1682 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
1683 s->top_border[0][15] = s->top_border[0][23] = 127;
1684 memset(s->top_border[1]-1, 127, s->mb_width*sizeof(*s->top_border)+1);
1686 memset(s->ref_count, 0, sizeof(s->ref_count));
1688 memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width*4);
1690 #define MARGIN (16 << 2)
1691 s->mv_min.y = -MARGIN;
1692 s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
1694 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1695 VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions-1)];
1696 VP8Macroblock *mb = s->macroblocks + (s->mb_height - mb_y - 1)*2;
1697 int mb_xy = mb_y*s->mb_width;
1699 curframe->data[0] + 16*mb_y*s->linesize,
1700 curframe->data[1] + 8*mb_y*s->uvlinesize,
1701 curframe->data[2] + 8*mb_y*s->uvlinesize
1704 memset(mb - 1, 0, sizeof(*mb)); // zero left macroblock
1705 memset(s->left_nnz, 0, sizeof(s->left_nnz));
1706 AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED*0x01010101);
1708 // left edge of 129 for intra prediction
1709 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
1710 for (i = 0; i < 3; i++)
1711 for (y = 0; y < 16>>!!i; y++)
1712 dst[i][y*curframe->linesize[i]-1] = 129;
1713 if (mb_y == 1) // top left edge is also 129
1714 s->top_border[0][15] = s->top_border[0][23] = s->top_border[0][31] = 129;
1717 s->mv_min.x = -MARGIN;
1718 s->mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
1719 if (prev_frame && s->segmentation.enabled && !s->segmentation.update_map)
1720 ff_thread_await_progress(prev_frame, mb_y, 0);
1722 for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
1723 /* Prefetch the current frame, 4 MBs ahead */
1724 s->dsp.prefetch(dst[0] + (mb_x&3)*4*s->linesize + 64, s->linesize, 4);
1725 s->dsp.prefetch(dst[1] + (mb_x&7)*s->uvlinesize + 64, dst[2] - dst[1], 2);
1727 decode_mb_mode(s, mb, mb_x, mb_y, curframe->ref_index[0] + mb_xy,
1728 prev_frame && prev_frame->ref_index[0] ? prev_frame->ref_index[0] + mb_xy : NULL);
1730 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
1733 decode_mb_coeffs(s, c, mb, s->top_nnz[mb_x], s->left_nnz);
1735 if (mb->mode <= MODE_I4x4)
1736 intra_predict(s, dst, mb, mb_x, mb_y);
1738 inter_predict(s, dst, mb, mb_x, mb_y);
1740 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
1743 idct_mb(s, dst, mb);
1745 AV_ZERO64(s->left_nnz);
1746 AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned
1748 // Reset DC block predictors if they would exist if the mb had coefficients
1749 if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
1751 s->top_nnz[mb_x][8] = 0;
1755 if (s->deblock_filter)
1756 filter_level_for_mb(s, mb, &s->filter_strength[mb_x]);
1758 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
1766 if (s->deblock_filter) {
1767 if (s->filter.simple)
1768 filter_mb_row_simple(s, curframe, mb_y);
1770 filter_mb_row(s, curframe, mb_y);
1775 ff_thread_report_progress(curframe, mb_y, 0);
1778 ff_thread_report_progress(curframe, INT_MAX, 0);
1779 memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4);
1782 // if future frames don't use the updated probabilities,
1783 // reset them to the values we saved
1784 if (!s->update_probabilities)
1785 s->prob[0] = s->prob[1];
1787 if (!s->invisible) {
1788 *(AVFrame*)data = *curframe;
1789 *data_size = sizeof(AVFrame);
1794 memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
1798 static av_cold int vp8_decode_init(AVCodecContext *avctx)
1800 VP8Context *s = avctx->priv_data;
1803 avctx->pix_fmt = PIX_FMT_YUV420P;
1805 ff_dsputil_init(&s->dsp, avctx);
1806 ff_h264_pred_init(&s->hpc, CODEC_ID_VP8, 8, 1);
1807 ff_vp8dsp_init(&s->vp8dsp);
1812 static av_cold int vp8_decode_free(AVCodecContext *avctx)
1814 vp8_decode_flush_impl(avctx, 0, 1, 1);
1815 release_queued_segmaps(avctx->priv_data, 1);
1819 static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
1821 VP8Context *s = avctx->priv_data;
1828 #define REBASE(pic) \
1829 pic ? pic - &s_src->frames[0] + &s->frames[0] : NULL
1831 static int vp8_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1833 VP8Context *s = dst->priv_data, *s_src = src->priv_data;
1835 if (s->macroblocks_base &&
1836 (s_src->mb_width != s->mb_width || s_src->mb_height != s->mb_height)) {
1838 s->maps_are_invalid = 1;
1839 s->mb_width = s_src->mb_width;
1840 s->mb_height = s_src->mb_height;
1843 s->prob[0] = s_src->prob[!s_src->update_probabilities];
1844 s->segmentation = s_src->segmentation;
1845 s->lf_delta = s_src->lf_delta;
1846 memcpy(s->sign_bias, s_src->sign_bias, sizeof(s->sign_bias));
1848 memcpy(&s->frames, &s_src->frames, sizeof(s->frames));
1849 s->framep[0] = REBASE(s_src->next_framep[0]);
1850 s->framep[1] = REBASE(s_src->next_framep[1]);
1851 s->framep[2] = REBASE(s_src->next_framep[2]);
1852 s->framep[3] = REBASE(s_src->next_framep[3]);
1857 AVCodec ff_vp8_decoder = {
1859 .type = AVMEDIA_TYPE_VIDEO,
1861 .priv_data_size = sizeof(VP8Context),
1862 .init = vp8_decode_init,
1863 .close = vp8_decode_free,
1864 .decode = vp8_decode_frame,
1865 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
1866 .flush = vp8_decode_flush,
1867 .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),
1868 .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp8_decode_init_thread_copy),
1869 .update_thread_context = ONLY_IF_THREADS_ENABLED(vp8_decode_update_thread_context),