2 * VP8 compatible video decoder
4 * Copyright (C) 2010 David Conrad
5 * Copyright (C) 2010 Ronald S. Bultje
6 * Copyright (C) 2010 Jason Garrett-Glaser
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "libavutil/imgutils.h"
30 #include "rectangle.h"
37 static void free_buffers(VP8Context *s)
39 av_freep(&s->macroblocks_base);
40 av_freep(&s->filter_strength);
41 av_freep(&s->intra4x4_pred_mode_top);
42 av_freep(&s->top_nnz);
43 av_freep(&s->edge_emu_buffer);
44 av_freep(&s->top_border);
46 s->macroblocks = NULL;
49 static int vp8_alloc_frame(VP8Context *s, AVFrame *f)
52 if ((ret = ff_thread_get_buffer(s->avctx, f)) < 0)
54 if (s->num_maps_to_be_freed && !s->maps_are_invalid) {
55 f->ref_index[0] = s->segmentation_maps[--s->num_maps_to_be_freed];
56 } else if (!(f->ref_index[0] = av_mallocz(s->mb_width * s->mb_height))) {
57 ff_thread_release_buffer(s->avctx, f);
58 return AVERROR(ENOMEM);
63 static void vp8_release_frame(VP8Context *s, AVFrame *f, int prefer_delayed_free, int can_direct_free)
65 if (f->ref_index[0]) {
66 if (prefer_delayed_free) {
67 /* Upon a size change, we want to free the maps but other threads may still
68 * be using them, so queue them. Upon a seek, all threads are inactive so
69 * we want to cache one to prevent re-allocation in the next decoding
70 * iteration, but the rest we can free directly. */
71 int max_queued_maps = can_direct_free ? 1 : FF_ARRAY_ELEMS(s->segmentation_maps);
72 if (s->num_maps_to_be_freed < max_queued_maps) {
73 s->segmentation_maps[s->num_maps_to_be_freed++] = f->ref_index[0];
74 } else if (can_direct_free) /* vp8_decode_flush(), but our queue is full */ {
75 av_free(f->ref_index[0]);
76 } /* else: MEMLEAK (should never happen, but better that than crash) */
77 f->ref_index[0] = NULL;
78 } else /* vp8_decode_free() */ {
79 av_free(f->ref_index[0]);
82 ff_thread_release_buffer(s->avctx, f);
85 static void vp8_decode_flush_impl(AVCodecContext *avctx,
86 int prefer_delayed_free, int can_direct_free, int free_mem)
88 VP8Context *s = avctx->priv_data;
91 if (!avctx->internal->is_copy) {
92 for (i = 0; i < 5; i++)
93 if (s->frames[i].data[0])
94 vp8_release_frame(s, &s->frames[i], prefer_delayed_free, can_direct_free);
96 memset(s->framep, 0, sizeof(s->framep));
100 s->maps_are_invalid = 1;
104 static void vp8_decode_flush(AVCodecContext *avctx)
106 vp8_decode_flush_impl(avctx, 1, 1, 0);
109 static int update_dimensions(VP8Context *s, int width, int height)
111 if (width != s->avctx->width || ((width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height) && s->macroblocks_base ||
112 height != s->avctx->height) {
113 if (av_image_check_size(width, height, 0, s->avctx))
114 return AVERROR_INVALIDDATA;
116 vp8_decode_flush_impl(s->avctx, 1, 0, 1);
118 avcodec_set_dimensions(s->avctx, width, height);
121 s->mb_width = (s->avctx->coded_width +15) / 16;
122 s->mb_height = (s->avctx->coded_height+15) / 16;
124 s->macroblocks_base = av_mallocz((s->mb_width+s->mb_height*2+1)*sizeof(*s->macroblocks));
125 s->filter_strength = av_mallocz(s->mb_width*sizeof(*s->filter_strength));
126 s->intra4x4_pred_mode_top = av_mallocz(s->mb_width*4);
127 s->top_nnz = av_mallocz(s->mb_width*sizeof(*s->top_nnz));
128 s->top_border = av_mallocz((s->mb_width+1)*sizeof(*s->top_border));
130 if (!s->macroblocks_base || !s->filter_strength || !s->intra4x4_pred_mode_top ||
131 !s->top_nnz || !s->top_border)
132 return AVERROR(ENOMEM);
134 s->macroblocks = s->macroblocks_base + 1;
139 static void parse_segment_info(VP8Context *s)
141 VP56RangeCoder *c = &s->c;
144 s->segmentation.update_map = vp8_rac_get(c);
146 if (vp8_rac_get(c)) { // update segment feature data
147 s->segmentation.absolute_vals = vp8_rac_get(c);
149 for (i = 0; i < 4; i++)
150 s->segmentation.base_quant[i] = vp8_rac_get_sint(c, 7);
152 for (i = 0; i < 4; i++)
153 s->segmentation.filter_level[i] = vp8_rac_get_sint(c, 6);
155 if (s->segmentation.update_map)
156 for (i = 0; i < 3; i++)
157 s->prob->segmentid[i] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
160 static void update_lf_deltas(VP8Context *s)
162 VP56RangeCoder *c = &s->c;
165 for (i = 0; i < 4; i++) {
166 if (vp8_rac_get(c)) {
167 s->lf_delta.ref[i] = vp8_rac_get_uint(c, 6);
170 s->lf_delta.ref[i] = -s->lf_delta.ref[i];
174 for (i = MODE_I4x4; i <= VP8_MVMODE_SPLIT; i++) {
175 if (vp8_rac_get(c)) {
176 s->lf_delta.mode[i] = vp8_rac_get_uint(c, 6);
179 s->lf_delta.mode[i] = -s->lf_delta.mode[i];
184 static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
186 const uint8_t *sizes = buf;
189 s->num_coeff_partitions = 1 << vp8_rac_get_uint(&s->c, 2);
191 buf += 3*(s->num_coeff_partitions-1);
192 buf_size -= 3*(s->num_coeff_partitions-1);
196 for (i = 0; i < s->num_coeff_partitions-1; i++) {
197 int size = AV_RL24(sizes + 3*i);
198 if (buf_size - size < 0)
201 ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, size);
205 ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
210 static void get_quants(VP8Context *s)
212 VP56RangeCoder *c = &s->c;
215 int yac_qi = vp8_rac_get_uint(c, 7);
216 int ydc_delta = vp8_rac_get_sint(c, 4);
217 int y2dc_delta = vp8_rac_get_sint(c, 4);
218 int y2ac_delta = vp8_rac_get_sint(c, 4);
219 int uvdc_delta = vp8_rac_get_sint(c, 4);
220 int uvac_delta = vp8_rac_get_sint(c, 4);
222 for (i = 0; i < 4; i++) {
223 if (s->segmentation.enabled) {
224 base_qi = s->segmentation.base_quant[i];
225 if (!s->segmentation.absolute_vals)
230 s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + ydc_delta , 7)];
231 s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi , 7)];
232 s->qmat[i].luma_dc_qmul[0] = 2 * vp8_dc_qlookup[av_clip_uintp2(base_qi + y2dc_delta, 7)];
233 s->qmat[i].luma_dc_qmul[1] = 155 * vp8_ac_qlookup[av_clip_uintp2(base_qi + y2ac_delta, 7)] / 100;
234 s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + uvdc_delta, 7)];
235 s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + uvac_delta, 7)];
237 s->qmat[i].luma_dc_qmul[1] = FFMAX(s->qmat[i].luma_dc_qmul[1], 8);
238 s->qmat[i].chroma_qmul[0] = FFMIN(s->qmat[i].chroma_qmul[0], 132);
243 * Determine which buffers golden and altref should be updated with after this frame.
244 * The spec isn't clear here, so I'm going by my understanding of what libvpx does
246 * Intra frames update all 3 references
247 * Inter frames update VP56_FRAME_PREVIOUS if the update_last flag is set
248 * If the update (golden|altref) flag is set, it's updated with the current frame
249 * if update_last is set, and VP56_FRAME_PREVIOUS otherwise.
250 * If the flag is not set, the number read means:
252 * 1: VP56_FRAME_PREVIOUS
253 * 2: update golden with altref, or update altref with golden
255 static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
257 VP56RangeCoder *c = &s->c;
260 return VP56_FRAME_CURRENT;
262 switch (vp8_rac_get_uint(c, 2)) {
264 return VP56_FRAME_PREVIOUS;
266 return (ref == VP56_FRAME_GOLDEN) ? VP56_FRAME_GOLDEN2 : VP56_FRAME_GOLDEN;
268 return VP56_FRAME_NONE;
271 static void update_refs(VP8Context *s)
273 VP56RangeCoder *c = &s->c;
275 int update_golden = vp8_rac_get(c);
276 int update_altref = vp8_rac_get(c);
278 s->update_golden = ref_to_update(s, update_golden, VP56_FRAME_GOLDEN);
279 s->update_altref = ref_to_update(s, update_altref, VP56_FRAME_GOLDEN2);
282 static int decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
284 VP56RangeCoder *c = &s->c;
285 int header_size, hscale, vscale, i, j, k, l, m, ret;
286 int width = s->avctx->width;
287 int height = s->avctx->height;
289 s->keyframe = !(buf[0] & 1);
290 s->profile = (buf[0]>>1) & 7;
291 s->invisible = !(buf[0] & 0x10);
292 header_size = AV_RL24(buf) >> 5;
297 av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
300 memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
301 else // profile 1-3 use bilinear, 4+ aren't defined so whatever
302 memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_bilinear_pixels_tab, sizeof(s->put_pixels_tab));
304 if (header_size > buf_size - 7*s->keyframe) {
305 av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
306 return AVERROR_INVALIDDATA;
310 if (AV_RL24(buf) != 0x2a019d) {
311 av_log(s->avctx, AV_LOG_ERROR, "Invalid start code 0x%x\n", AV_RL24(buf));
312 return AVERROR_INVALIDDATA;
314 width = AV_RL16(buf+3) & 0x3fff;
315 height = AV_RL16(buf+5) & 0x3fff;
316 hscale = buf[4] >> 6;
317 vscale = buf[6] >> 6;
321 if (hscale || vscale)
322 av_log_missing_feature(s->avctx, "Upscaling", 1);
324 s->update_golden = s->update_altref = VP56_FRAME_CURRENT;
325 for (i = 0; i < 4; i++)
326 for (j = 0; j < 16; j++)
327 memcpy(s->prob->token[i][j], vp8_token_default_probs[i][vp8_coeff_band[j]],
328 sizeof(s->prob->token[i][j]));
329 memcpy(s->prob->pred16x16, vp8_pred16x16_prob_inter, sizeof(s->prob->pred16x16));
330 memcpy(s->prob->pred8x8c , vp8_pred8x8c_prob_inter , sizeof(s->prob->pred8x8c));
331 memcpy(s->prob->mvc , vp8_mv_default_prob , sizeof(s->prob->mvc));
332 memset(&s->segmentation, 0, sizeof(s->segmentation));
335 if (!s->macroblocks_base || /* first frame */
336 width != s->avctx->width || height != s->avctx->height || (width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height) {
337 if ((ret = update_dimensions(s, width, height)) < 0)
341 ff_vp56_init_range_decoder(c, buf, header_size);
343 buf_size -= header_size;
347 av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
348 vp8_rac_get(c); // whether we can skip clamping in dsp functions
351 if ((s->segmentation.enabled = vp8_rac_get(c)))
352 parse_segment_info(s);
354 s->segmentation.update_map = 0; // FIXME: move this to some init function?
356 s->filter.simple = vp8_rac_get(c);
357 s->filter.level = vp8_rac_get_uint(c, 6);
358 s->filter.sharpness = vp8_rac_get_uint(c, 3);
360 if ((s->lf_delta.enabled = vp8_rac_get(c)))
364 if (setup_partitions(s, buf, buf_size)) {
365 av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
366 return AVERROR_INVALIDDATA;
373 s->sign_bias[VP56_FRAME_GOLDEN] = vp8_rac_get(c);
374 s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp8_rac_get(c);
377 // if we aren't saving this frame's probabilities for future frames,
378 // make a copy of the current probabilities
379 if (!(s->update_probabilities = vp8_rac_get(c)))
380 s->prob[1] = s->prob[0];
382 s->update_last = s->keyframe || vp8_rac_get(c);
384 for (i = 0; i < 4; i++)
385 for (j = 0; j < 8; j++)
386 for (k = 0; k < 3; k++)
387 for (l = 0; l < NUM_DCT_TOKENS-1; l++)
388 if (vp56_rac_get_prob_branchy(c, vp8_token_update_probs[i][j][k][l])) {
389 int prob = vp8_rac_get_uint(c, 8);
390 for (m = 0; vp8_coeff_band_indexes[j][m] >= 0; m++)
391 s->prob->token[i][vp8_coeff_band_indexes[j][m]][k][l] = prob;
394 if ((s->mbskip_enabled = vp8_rac_get(c)))
395 s->prob->mbskip = vp8_rac_get_uint(c, 8);
398 s->prob->intra = vp8_rac_get_uint(c, 8);
399 s->prob->last = vp8_rac_get_uint(c, 8);
400 s->prob->golden = vp8_rac_get_uint(c, 8);
403 for (i = 0; i < 4; i++)
404 s->prob->pred16x16[i] = vp8_rac_get_uint(c, 8);
406 for (i = 0; i < 3; i++)
407 s->prob->pred8x8c[i] = vp8_rac_get_uint(c, 8);
409 // 17.2 MV probability update
410 for (i = 0; i < 2; i++)
411 for (j = 0; j < 19; j++)
412 if (vp56_rac_get_prob_branchy(c, vp8_mv_update_prob[i][j]))
413 s->prob->mvc[i][j] = vp8_rac_get_nn(c);
419 static av_always_inline void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
421 dst->x = av_clip(src->x, s->mv_min.x, s->mv_max.x);
422 dst->y = av_clip(src->y, s->mv_min.y, s->mv_max.y);
426 * Motion vector coding, 17.1.
428 static int read_mv_component(VP56RangeCoder *c, const uint8_t *p)
432 if (vp56_rac_get_prob_branchy(c, p[0])) {
435 for (i = 0; i < 3; i++)
436 x += vp56_rac_get_prob(c, p[9 + i]) << i;
437 for (i = 9; i > 3; i--)
438 x += vp56_rac_get_prob(c, p[9 + i]) << i;
439 if (!(x & 0xFFF0) || vp56_rac_get_prob(c, p[12]))
443 const uint8_t *ps = p+2;
444 bit = vp56_rac_get_prob(c, *ps);
447 bit = vp56_rac_get_prob(c, *ps);
450 x += vp56_rac_get_prob(c, *ps);
453 return (x && vp56_rac_get_prob(c, p[1])) ? -x : x;
456 static av_always_inline
457 const uint8_t *get_submv_prob(uint32_t left, uint32_t top)
460 return vp8_submv_prob[4-!!left];
462 return vp8_submv_prob[2];
463 return vp8_submv_prob[1-!!left];
467 * Split motion vector prediction, 16.4.
468 * @returns the number of motion vectors parsed (2, 4 or 16)
470 static av_always_inline
471 int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb)
475 VP8Macroblock *top_mb = &mb[2];
476 VP8Macroblock *left_mb = &mb[-1];
477 const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning],
478 *mbsplits_top = vp8_mbsplits[top_mb->partitioning],
479 *mbsplits_cur, *firstidx;
480 VP56mv *top_mv = top_mb->bmv;
481 VP56mv *left_mv = left_mb->bmv;
482 VP56mv *cur_mv = mb->bmv;
484 if (vp56_rac_get_prob_branchy(c, vp8_mbsplit_prob[0])) {
485 if (vp56_rac_get_prob_branchy(c, vp8_mbsplit_prob[1])) {
486 part_idx = VP8_SPLITMVMODE_16x8 + vp56_rac_get_prob(c, vp8_mbsplit_prob[2]);
488 part_idx = VP8_SPLITMVMODE_8x8;
491 part_idx = VP8_SPLITMVMODE_4x4;
494 num = vp8_mbsplit_count[part_idx];
495 mbsplits_cur = vp8_mbsplits[part_idx],
496 firstidx = vp8_mbfirstidx[part_idx];
497 mb->partitioning = part_idx;
499 for (n = 0; n < num; n++) {
501 uint32_t left, above;
502 const uint8_t *submv_prob;
505 left = AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
507 left = AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
509 above = AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
511 above = AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
513 submv_prob = get_submv_prob(left, above);
515 if (vp56_rac_get_prob_branchy(c, submv_prob[0])) {
516 if (vp56_rac_get_prob_branchy(c, submv_prob[1])) {
517 if (vp56_rac_get_prob_branchy(c, submv_prob[2])) {
518 mb->bmv[n].y = mb->mv.y + read_mv_component(c, s->prob->mvc[0]);
519 mb->bmv[n].x = mb->mv.x + read_mv_component(c, s->prob->mvc[1]);
521 AV_ZERO32(&mb->bmv[n]);
524 AV_WN32A(&mb->bmv[n], above);
527 AV_WN32A(&mb->bmv[n], left);
534 static av_always_inline
535 void decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y)
537 VP8Macroblock *mb_edge[3] = { mb + 2 /* top */,
539 mb + 1 /* top-left */ };
540 enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
541 enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
543 int cur_sign_bias = s->sign_bias[mb->ref_frame];
544 int8_t *sign_bias = s->sign_bias;
546 uint8_t cnt[4] = { 0 };
547 VP56RangeCoder *c = &s->c;
549 AV_ZERO32(&near_mv[0]);
550 AV_ZERO32(&near_mv[1]);
551 AV_ZERO32(&near_mv[2]);
553 /* Process MB on top, left and top-left */
554 #define MV_EDGE_CHECK(n)\
556 VP8Macroblock *edge = mb_edge[n];\
557 int edge_ref = edge->ref_frame;\
558 if (edge_ref != VP56_FRAME_CURRENT) {\
559 uint32_t mv = AV_RN32A(&edge->mv);\
561 if (cur_sign_bias != sign_bias[edge_ref]) {\
562 /* SWAR negate of the values in mv. */\
564 mv = ((mv&0x7fff7fff) + 0x00010001) ^ (mv&0x80008000);\
566 if (!n || mv != AV_RN32A(&near_mv[idx]))\
567 AV_WN32A(&near_mv[++idx], mv);\
568 cnt[idx] += 1 + (n != 2);\
570 cnt[CNT_ZERO] += 1 + (n != 2);\
578 mb->partitioning = VP8_SPLITMVMODE_NONE;
579 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) {
580 mb->mode = VP8_MVMODE_MV;
582 /* If we have three distinct MVs, merge first and last if they're the same */
583 if (cnt[CNT_SPLITMV] && AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) == AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
584 cnt[CNT_NEAREST] += 1;
586 /* Swap near and nearest if necessary */
587 if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
588 FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
589 FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
592 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
593 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
595 /* Choose the best mv out of 0,0 and the nearest mv */
596 clamp_mv(s, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
597 cnt[CNT_SPLITMV] = ((mb_edge[VP8_EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
598 (mb_edge[VP8_EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
599 (mb_edge[VP8_EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
601 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
602 mb->mode = VP8_MVMODE_SPLIT;
603 mb->mv = mb->bmv[decode_splitmvs(s, c, mb) - 1];
605 mb->mv.y += read_mv_component(c, s->prob->mvc[0]);
606 mb->mv.x += read_mv_component(c, s->prob->mvc[1]);
610 clamp_mv(s, &mb->mv, &near_mv[CNT_NEAR]);
614 clamp_mv(s, &mb->mv, &near_mv[CNT_NEAREST]);
618 mb->mode = VP8_MVMODE_ZERO;
624 static av_always_inline
625 void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c,
626 int mb_x, int keyframe)
628 uint8_t *intra4x4 = s->intra4x4_pred_mode_mb;
631 uint8_t* const top = s->intra4x4_pred_mode_top + 4 * mb_x;
632 uint8_t* const left = s->intra4x4_pred_mode_left;
633 for (y = 0; y < 4; y++) {
634 for (x = 0; x < 4; x++) {
636 ctx = vp8_pred4x4_prob_intra[top[x]][left[y]];
637 *intra4x4 = vp8_rac_get_tree(c, vp8_pred4x4_tree, ctx);
638 left[y] = top[x] = *intra4x4;
644 for (i = 0; i < 16; i++)
645 intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree, vp8_pred4x4_prob_inter);
649 static av_always_inline
650 void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment, uint8_t *ref)
652 VP56RangeCoder *c = &s->c;
654 if (s->segmentation.update_map) {
655 int bit = vp56_rac_get_prob(c, s->prob->segmentid[0]);
656 *segment = vp56_rac_get_prob(c, s->prob->segmentid[1+bit]) + 2*bit;
657 } else if (s->segmentation.enabled)
658 *segment = ref ? *ref : *segment;
659 s->segment = *segment;
661 mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0;
664 mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_intra, vp8_pred16x16_prob_intra);
666 if (mb->mode == MODE_I4x4) {
667 decode_intra4x4_modes(s, c, mb_x, 1);
669 const uint32_t modes = vp8_pred4x4_mode[mb->mode] * 0x01010101u;
670 AV_WN32A(s->intra4x4_pred_mode_top + 4 * mb_x, modes);
671 AV_WN32A(s->intra4x4_pred_mode_left, modes);
674 s->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, vp8_pred8x8c_prob_intra);
675 mb->ref_frame = VP56_FRAME_CURRENT;
676 } else if (vp56_rac_get_prob_branchy(c, s->prob->intra)) {
678 if (vp56_rac_get_prob_branchy(c, s->prob->last))
679 mb->ref_frame = vp56_rac_get_prob(c, s->prob->golden) ?
680 VP56_FRAME_GOLDEN2 /* altref */ : VP56_FRAME_GOLDEN;
682 mb->ref_frame = VP56_FRAME_PREVIOUS;
683 s->ref_count[mb->ref_frame-1]++;
685 // motion vectors, 16.3
686 decode_mvs(s, mb, mb_x, mb_y);
689 mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_inter, s->prob->pred16x16);
691 if (mb->mode == MODE_I4x4)
692 decode_intra4x4_modes(s, c, mb_x, 0);
694 s->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, s->prob->pred8x8c);
695 mb->ref_frame = VP56_FRAME_CURRENT;
696 mb->partitioning = VP8_SPLITMVMODE_NONE;
697 AV_ZERO32(&mb->bmv[0]);
701 #ifndef decode_block_coeffs_internal
703 * @param c arithmetic bitstream reader context
704 * @param block destination for block coefficients
705 * @param probs probabilities to use when reading trees from the bitstream
706 * @param i initial coeff index, 0 unless a separate DC block is coded
707 * @param qmul array holding the dc/ac dequant factor at position 0/1
708 * @return 0 if no coeffs were decoded
709 * otherwise, the index of the last coeff decoded plus one
711 static int decode_block_coeffs_internal(VP56RangeCoder *c, DCTELEM block[16],
712 uint8_t probs[16][3][NUM_DCT_TOKENS-1],
713 int i, uint8_t *token_prob, int16_t qmul[2])
718 if (!vp56_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
722 if (!vp56_rac_get_prob_branchy(c, token_prob[1])) { // DCT_0
724 return i; // invalid input; blocks should end with EOB
725 token_prob = probs[i][0];
729 if (!vp56_rac_get_prob_branchy(c, token_prob[2])) { // DCT_1
731 token_prob = probs[i+1][1];
733 if (!vp56_rac_get_prob_branchy(c, token_prob[3])) { // DCT 2,3,4
734 coeff = vp56_rac_get_prob_branchy(c, token_prob[4]);
736 coeff += vp56_rac_get_prob(c, token_prob[5]);
740 if (!vp56_rac_get_prob_branchy(c, token_prob[6])) {
741 if (!vp56_rac_get_prob_branchy(c, token_prob[7])) { // DCT_CAT1
742 coeff = 5 + vp56_rac_get_prob(c, vp8_dct_cat1_prob[0]);
745 coeff += vp56_rac_get_prob(c, vp8_dct_cat2_prob[0]) << 1;
746 coeff += vp56_rac_get_prob(c, vp8_dct_cat2_prob[1]);
748 } else { // DCT_CAT3 and up
749 int a = vp56_rac_get_prob(c, token_prob[8]);
750 int b = vp56_rac_get_prob(c, token_prob[9+a]);
751 int cat = (a<<1) + b;
752 coeff = 3 + (8<<cat);
753 coeff += vp8_rac_get_coeff(c, ff_vp8_dct_cat_prob[cat]);
756 token_prob = probs[i+1][2];
758 block[zigzag_scan[i]] = (vp8_rac_get(c) ? -coeff : coeff) * qmul[!!i];
766 * @param c arithmetic bitstream reader context
767 * @param block destination for block coefficients
768 * @param probs probabilities to use when reading trees from the bitstream
769 * @param i initial coeff index, 0 unless a separate DC block is coded
770 * @param zero_nhood the initial prediction context for number of surrounding
771 * all-zero blocks (only left/top, so 0-2)
772 * @param qmul array holding the dc/ac dequant factor at position 0/1
773 * @return 0 if no coeffs were decoded
774 * otherwise, the index of the last coeff decoded plus one
776 static av_always_inline
777 int decode_block_coeffs(VP56RangeCoder *c, DCTELEM block[16],
778 uint8_t probs[16][3][NUM_DCT_TOKENS-1],
779 int i, int zero_nhood, int16_t qmul[2])
781 uint8_t *token_prob = probs[i][zero_nhood];
782 if (!vp56_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
784 return decode_block_coeffs_internal(c, block, probs, i, token_prob, qmul);
787 static av_always_inline
788 void decode_mb_coeffs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb,
789 uint8_t t_nnz[9], uint8_t l_nnz[9])
791 int i, x, y, luma_start = 0, luma_ctx = 3;
792 int nnz_pred, nnz, nnz_total = 0;
793 int segment = s->segment;
796 if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
797 nnz_pred = t_nnz[8] + l_nnz[8];
799 // decode DC values and do hadamard
800 nnz = decode_block_coeffs(c, s->block_dc, s->prob->token[1], 0, nnz_pred,
801 s->qmat[segment].luma_dc_qmul);
802 l_nnz[8] = t_nnz[8] = !!nnz;
807 s->vp8dsp.vp8_luma_dc_wht_dc(s->block, s->block_dc);
809 s->vp8dsp.vp8_luma_dc_wht(s->block, s->block_dc);
816 for (y = 0; y < 4; y++)
817 for (x = 0; x < 4; x++) {
818 nnz_pred = l_nnz[y] + t_nnz[x];
819 nnz = decode_block_coeffs(c, s->block[y][x], s->prob->token[luma_ctx], luma_start,
820 nnz_pred, s->qmat[segment].luma_qmul);
821 // nnz+block_dc may be one more than the actual last index, but we don't care
822 s->non_zero_count_cache[y][x] = nnz + block_dc;
823 t_nnz[x] = l_nnz[y] = !!nnz;
828 // TODO: what to do about dimensions? 2nd dim for luma is x,
829 // but for chroma it's (y<<1)|x
830 for (i = 4; i < 6; i++)
831 for (y = 0; y < 2; y++)
832 for (x = 0; x < 2; x++) {
833 nnz_pred = l_nnz[i+2*y] + t_nnz[i+2*x];
834 nnz = decode_block_coeffs(c, s->block[i][(y<<1)+x], s->prob->token[2], 0,
835 nnz_pred, s->qmat[segment].chroma_qmul);
836 s->non_zero_count_cache[i][(y<<1)+x] = nnz;
837 t_nnz[i+2*x] = l_nnz[i+2*y] = !!nnz;
841 // if there were no coded coeffs despite the macroblock not being marked skip,
842 // we MUST not do the inner loop filter and should not do IDCT
843 // Since skip isn't used for bitstream prediction, just manually set it.
848 static av_always_inline
849 void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr,
850 int linesize, int uvlinesize, int simple)
852 AV_COPY128(top_border, src_y + 15*linesize);
854 AV_COPY64(top_border+16, src_cb + 7*uvlinesize);
855 AV_COPY64(top_border+24, src_cr + 7*uvlinesize);
859 static av_always_inline
860 void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr,
861 int linesize, int uvlinesize, int mb_x, int mb_y, int mb_width,
862 int simple, int xchg)
864 uint8_t *top_border_m1 = top_border-32; // for TL prediction
866 src_cb -= uvlinesize;
867 src_cr -= uvlinesize;
869 #define XCHG(a,b,xchg) do { \
870 if (xchg) AV_SWAP64(b,a); \
871 else AV_COPY64(b,a); \
874 XCHG(top_border_m1+8, src_y-8, xchg);
875 XCHG(top_border, src_y, xchg);
876 XCHG(top_border+8, src_y+8, 1);
877 if (mb_x < mb_width-1)
878 XCHG(top_border+32, src_y+16, 1);
880 // only copy chroma for normal loop filter
881 // or to initialize the top row to 127
882 if (!simple || !mb_y) {
883 XCHG(top_border_m1+16, src_cb-8, xchg);
884 XCHG(top_border_m1+24, src_cr-8, xchg);
885 XCHG(top_border+16, src_cb, 1);
886 XCHG(top_border+24, src_cr, 1);
890 static av_always_inline
891 int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
894 return mb_y ? TOP_DC_PRED8x8 : DC_128_PRED8x8;
896 return mb_y ? mode : LEFT_DC_PRED8x8;
900 static av_always_inline
901 int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y)
904 return mb_y ? VERT_PRED8x8 : DC_129_PRED8x8;
906 return mb_y ? mode : HOR_PRED8x8;
910 static av_always_inline
911 int check_intra_pred8x8_mode(int mode, int mb_x, int mb_y)
913 if (mode == DC_PRED8x8) {
914 return check_dc_pred8x8_mode(mode, mb_x, mb_y);
920 static av_always_inline
921 int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y)
925 return check_dc_pred8x8_mode(mode, mb_x, mb_y);
927 return !mb_y ? DC_127_PRED8x8 : mode;
929 return !mb_x ? DC_129_PRED8x8 : mode;
930 case PLANE_PRED8x8 /*TM*/:
931 return check_tm_pred8x8_mode(mode, mb_x, mb_y);
936 static av_always_inline
937 int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y)
940 return mb_y ? VERT_VP8_PRED : DC_129_PRED;
942 return mb_y ? mode : HOR_VP8_PRED;
946 static av_always_inline
947 int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf)
956 case DIAG_DOWN_LEFT_PRED:
958 return !mb_y ? DC_127_PRED : mode;
966 return !mb_x ? DC_129_PRED : mode;
968 return check_tm_pred4x4_mode(mode, mb_x, mb_y);
969 case DC_PRED: // 4x4 DC doesn't use the same "H.264-style" exceptions as 16x16/8x8 DC
970 case DIAG_DOWN_RIGHT_PRED:
971 case VERT_RIGHT_PRED:
980 static av_always_inline
981 void intra_predict(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb,
984 AVCodecContext *avctx = s->avctx;
988 // for the first row, we need to run xchg_mb_border to init the top edge to 127
989 // otherwise, skip it if we aren't going to deblock
990 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE && !mb_y) && (s->deblock_filter || !mb_y))
991 xchg_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2],
992 s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
993 s->filter.simple, 1);
995 if (mb->mode < MODE_I4x4) {
996 if (avctx->flags & CODEC_FLAG_EMU_EDGE) { // tested
997 mode = check_intra_pred8x8_mode_emuedge(mb->mode, mb_x, mb_y);
999 mode = check_intra_pred8x8_mode(mb->mode, mb_x, mb_y);
1001 s->hpc.pred16x16[mode](dst[0], s->linesize);
1003 uint8_t *ptr = dst[0];
1004 uint8_t *intra4x4 = s->intra4x4_pred_mode_mb;
1005 uint8_t tr_top[4] = { 127, 127, 127, 127 };
1007 // all blocks on the right edge of the macroblock use bottom edge
1008 // the top macroblock for their topright edge
1009 uint8_t *tr_right = ptr - s->linesize + 16;
1011 // if we're on the right edge of the frame, said edge is extended
1012 // from the top macroblock
1013 if (!(!mb_y && avctx->flags & CODEC_FLAG_EMU_EDGE) &&
1014 mb_x == s->mb_width-1) {
1015 tr = tr_right[-1]*0x01010101u;
1016 tr_right = (uint8_t *)&tr;
1020 AV_ZERO128(s->non_zero_count_cache);
1022 for (y = 0; y < 4; y++) {
1023 uint8_t *topright = ptr + 4 - s->linesize;
1024 for (x = 0; x < 4; x++) {
1025 int copy = 0, linesize = s->linesize;
1026 uint8_t *dst = ptr+4*x;
1027 DECLARE_ALIGNED(4, uint8_t, copy_dst)[5*8];
1029 if ((y == 0 || x == 3) && mb_y == 0 && avctx->flags & CODEC_FLAG_EMU_EDGE) {
1032 topright = tr_right;
1034 if (avctx->flags & CODEC_FLAG_EMU_EDGE) { // mb_x+x or mb_y+y is a hack but works
1035 mode = check_intra_pred4x4_mode_emuedge(intra4x4[x], mb_x + x, mb_y + y, ©);
1037 dst = copy_dst + 12;
1041 AV_WN32A(copy_dst+4, 127U * 0x01010101U);
1043 AV_COPY32(copy_dst+4, ptr+4*x-s->linesize);
1047 copy_dst[3] = ptr[4*x-s->linesize-1];
1054 copy_dst[35] = 129U;
1056 copy_dst[11] = ptr[4*x -1];
1057 copy_dst[19] = ptr[4*x+s->linesize -1];
1058 copy_dst[27] = ptr[4*x+s->linesize*2-1];
1059 copy_dst[35] = ptr[4*x+s->linesize*3-1];
1065 s->hpc.pred4x4[mode](dst, topright, linesize);
1067 AV_COPY32(ptr+4*x , copy_dst+12);
1068 AV_COPY32(ptr+4*x+s->linesize , copy_dst+20);
1069 AV_COPY32(ptr+4*x+s->linesize*2, copy_dst+28);
1070 AV_COPY32(ptr+4*x+s->linesize*3, copy_dst+36);
1073 nnz = s->non_zero_count_cache[y][x];
1076 s->vp8dsp.vp8_idct_dc_add(ptr+4*x, s->block[y][x], s->linesize);
1078 s->vp8dsp.vp8_idct_add(ptr+4*x, s->block[y][x], s->linesize);
1083 ptr += 4*s->linesize;
1088 if (avctx->flags & CODEC_FLAG_EMU_EDGE) {
1089 mode = check_intra_pred8x8_mode_emuedge(s->chroma_pred_mode, mb_x, mb_y);
1091 mode = check_intra_pred8x8_mode(s->chroma_pred_mode, mb_x, mb_y);
1093 s->hpc.pred8x8[mode](dst[1], s->uvlinesize);
1094 s->hpc.pred8x8[mode](dst[2], s->uvlinesize);
1096 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE && !mb_y) && (s->deblock_filter || !mb_y))
1097 xchg_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2],
1098 s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1099 s->filter.simple, 0);
1102 static const uint8_t subpel_idx[3][8] = {
1103 { 0, 1, 2, 1, 2, 1, 2, 1 }, // nr. of left extra pixels,
1104 // also function pointer index
1105 { 0, 3, 5, 3, 5, 3, 5, 3 }, // nr. of extra pixels required
1106 { 0, 2, 3, 2, 3, 2, 3, 2 }, // nr. of right extra pixels
1112 * @param s VP8 decoding context
1113 * @param dst target buffer for block data at block position
1114 * @param ref reference picture buffer at origin (0, 0)
1115 * @param mv motion vector (relative to block position) to get pixel data from
1116 * @param x_off horizontal position of block from origin (0, 0)
1117 * @param y_off vertical position of block from origin (0, 0)
1118 * @param block_w width of block (16, 8 or 4)
1119 * @param block_h height of block (always same as block_w)
1120 * @param width width of src/dst plane data
1121 * @param height height of src/dst plane data
1122 * @param linesize size of a single line of plane data, including padding
1123 * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1125 static av_always_inline
1126 void vp8_mc_luma(VP8Context *s, uint8_t *dst, AVFrame *ref, const VP56mv *mv,
1127 int x_off, int y_off, int block_w, int block_h,
1128 int width, int height, int linesize,
1129 vp8_mc_func mc_func[3][3])
1131 uint8_t *src = ref->data[0];
1135 int mx = (mv->x << 1)&7, mx_idx = subpel_idx[0][mx];
1136 int my = (mv->y << 1)&7, my_idx = subpel_idx[0][my];
1138 x_off += mv->x >> 2;
1139 y_off += mv->y >> 2;
1142 ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 4, 0);
1143 src += y_off * linesize + x_off;
1144 if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1145 y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1146 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src - my_idx * linesize - mx_idx, linesize,
1147 block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
1148 x_off - mx_idx, y_off - my_idx, width, height);
1149 src = s->edge_emu_buffer + mx_idx + linesize * my_idx;
1151 mc_func[my_idx][mx_idx](dst, linesize, src, linesize, block_h, mx, my);
1153 ff_thread_await_progress(ref, (3 + y_off + block_h) >> 4, 0);
1154 mc_func[0][0](dst, linesize, src + y_off * linesize + x_off, linesize, block_h, 0, 0);
1159 * chroma MC function
1161 * @param s VP8 decoding context
1162 * @param dst1 target buffer for block data at block position (U plane)
1163 * @param dst2 target buffer for block data at block position (V plane)
1164 * @param ref reference picture buffer at origin (0, 0)
1165 * @param mv motion vector (relative to block position) to get pixel data from
1166 * @param x_off horizontal position of block from origin (0, 0)
1167 * @param y_off vertical position of block from origin (0, 0)
1168 * @param block_w width of block (16, 8 or 4)
1169 * @param block_h height of block (always same as block_w)
1170 * @param width width of src/dst plane data
1171 * @param height height of src/dst plane data
1172 * @param linesize size of a single line of plane data, including padding
1173 * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1175 static av_always_inline
1176 void vp8_mc_chroma(VP8Context *s, uint8_t *dst1, uint8_t *dst2, AVFrame *ref,
1177 const VP56mv *mv, int x_off, int y_off,
1178 int block_w, int block_h, int width, int height, int linesize,
1179 vp8_mc_func mc_func[3][3])
1181 uint8_t *src1 = ref->data[1], *src2 = ref->data[2];
1184 int mx = mv->x&7, mx_idx = subpel_idx[0][mx];
1185 int my = mv->y&7, my_idx = subpel_idx[0][my];
1187 x_off += mv->x >> 3;
1188 y_off += mv->y >> 3;
1191 src1 += y_off * linesize + x_off;
1192 src2 += y_off * linesize + x_off;
1193 ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 3, 0);
1194 if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1195 y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1196 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src1 - my_idx * linesize - mx_idx, linesize,
1197 block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
1198 x_off - mx_idx, y_off - my_idx, width, height);
1199 src1 = s->edge_emu_buffer + mx_idx + linesize * my_idx;
1200 mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1202 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src2 - my_idx * linesize - mx_idx, linesize,
1203 block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
1204 x_off - mx_idx, y_off - my_idx, width, height);
1205 src2 = s->edge_emu_buffer + mx_idx + linesize * my_idx;
1206 mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1208 mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1209 mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1212 ff_thread_await_progress(ref, (3 + y_off + block_h) >> 3, 0);
1213 mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1214 mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1218 static av_always_inline
1219 void vp8_mc_part(VP8Context *s, uint8_t *dst[3],
1220 AVFrame *ref_frame, int x_off, int y_off,
1221 int bx_off, int by_off,
1222 int block_w, int block_h,
1223 int width, int height, VP56mv *mv)
1228 vp8_mc_luma(s, dst[0] + by_off * s->linesize + bx_off,
1229 ref_frame, mv, x_off + bx_off, y_off + by_off,
1230 block_w, block_h, width, height, s->linesize,
1231 s->put_pixels_tab[block_w == 8]);
1234 if (s->profile == 3) {
1238 x_off >>= 1; y_off >>= 1;
1239 bx_off >>= 1; by_off >>= 1;
1240 width >>= 1; height >>= 1;
1241 block_w >>= 1; block_h >>= 1;
1242 vp8_mc_chroma(s, dst[1] + by_off * s->uvlinesize + bx_off,
1243 dst[2] + by_off * s->uvlinesize + bx_off, ref_frame,
1244 &uvmv, x_off + bx_off, y_off + by_off,
1245 block_w, block_h, width, height, s->uvlinesize,
1246 s->put_pixels_tab[1 + (block_w == 4)]);
1249 /* Fetch pixels for estimated mv 4 macroblocks ahead.
1250 * Optimized for 64-byte cache lines. Inspired by ffh264 prefetch_motion. */
1251 static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
1253 /* Don't prefetch refs that haven't been used very often this frame. */
1254 if (s->ref_count[ref-1] > (mb_xy >> 5)) {
1255 int x_off = mb_x << 4, y_off = mb_y << 4;
1256 int mx = (mb->mv.x>>2) + x_off + 8;
1257 int my = (mb->mv.y>>2) + y_off;
1258 uint8_t **src= s->framep[ref]->data;
1259 int off= mx + (my + (mb_x&3)*4)*s->linesize + 64;
1260 /* For threading, a ff_thread_await_progress here might be useful, but
1261 * it actually slows down the decoder. Since a bad prefetch doesn't
1262 * generate bad decoder output, we don't run it here. */
1263 s->dsp.prefetch(src[0]+off, s->linesize, 4);
1264 off= (mx>>1) + ((my>>1) + (mb_x&7))*s->uvlinesize + 64;
1265 s->dsp.prefetch(src[1]+off, src[2]-src[1], 2);
1270 * Apply motion vectors to prediction buffer, chapter 18.
1272 static av_always_inline
1273 void inter_predict(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb,
1276 int x_off = mb_x << 4, y_off = mb_y << 4;
1277 int width = 16*s->mb_width, height = 16*s->mb_height;
1278 AVFrame *ref = s->framep[mb->ref_frame];
1279 VP56mv *bmv = mb->bmv;
1281 switch (mb->partitioning) {
1282 case VP8_SPLITMVMODE_NONE:
1283 vp8_mc_part(s, dst, ref, x_off, y_off,
1284 0, 0, 16, 16, width, height, &mb->mv);
1286 case VP8_SPLITMVMODE_4x4: {
1291 for (y = 0; y < 4; y++) {
1292 for (x = 0; x < 4; x++) {
1293 vp8_mc_luma(s, dst[0] + 4*y*s->linesize + x*4,
1295 4*x + x_off, 4*y + y_off, 4, 4,
1296 width, height, s->linesize,
1297 s->put_pixels_tab[2]);
1302 x_off >>= 1; y_off >>= 1; width >>= 1; height >>= 1;
1303 for (y = 0; y < 2; y++) {
1304 for (x = 0; x < 2; x++) {
1305 uvmv.x = mb->bmv[ 2*y * 4 + 2*x ].x +
1306 mb->bmv[ 2*y * 4 + 2*x+1].x +
1307 mb->bmv[(2*y+1) * 4 + 2*x ].x +
1308 mb->bmv[(2*y+1) * 4 + 2*x+1].x;
1309 uvmv.y = mb->bmv[ 2*y * 4 + 2*x ].y +
1310 mb->bmv[ 2*y * 4 + 2*x+1].y +
1311 mb->bmv[(2*y+1) * 4 + 2*x ].y +
1312 mb->bmv[(2*y+1) * 4 + 2*x+1].y;
1313 uvmv.x = (uvmv.x + 2 + (uvmv.x >> (INT_BIT-1))) >> 2;
1314 uvmv.y = (uvmv.y + 2 + (uvmv.y >> (INT_BIT-1))) >> 2;
1315 if (s->profile == 3) {
1319 vp8_mc_chroma(s, dst[1] + 4*y*s->uvlinesize + x*4,
1320 dst[2] + 4*y*s->uvlinesize + x*4, ref, &uvmv,
1321 4*x + x_off, 4*y + y_off, 4, 4,
1322 width, height, s->uvlinesize,
1323 s->put_pixels_tab[2]);
1328 case VP8_SPLITMVMODE_16x8:
1329 vp8_mc_part(s, dst, ref, x_off, y_off,
1330 0, 0, 16, 8, width, height, &bmv[0]);
1331 vp8_mc_part(s, dst, ref, x_off, y_off,
1332 0, 8, 16, 8, width, height, &bmv[1]);
1334 case VP8_SPLITMVMODE_8x16:
1335 vp8_mc_part(s, dst, ref, x_off, y_off,
1336 0, 0, 8, 16, width, height, &bmv[0]);
1337 vp8_mc_part(s, dst, ref, x_off, y_off,
1338 8, 0, 8, 16, width, height, &bmv[1]);
1340 case VP8_SPLITMVMODE_8x8:
1341 vp8_mc_part(s, dst, ref, x_off, y_off,
1342 0, 0, 8, 8, width, height, &bmv[0]);
1343 vp8_mc_part(s, dst, ref, x_off, y_off,
1344 8, 0, 8, 8, width, height, &bmv[1]);
1345 vp8_mc_part(s, dst, ref, x_off, y_off,
1346 0, 8, 8, 8, width, height, &bmv[2]);
1347 vp8_mc_part(s, dst, ref, x_off, y_off,
1348 8, 8, 8, 8, width, height, &bmv[3]);
1353 static av_always_inline void idct_mb(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb)
1357 if (mb->mode != MODE_I4x4) {
1358 uint8_t *y_dst = dst[0];
1359 for (y = 0; y < 4; y++) {
1360 uint32_t nnz4 = AV_RL32(s->non_zero_count_cache[y]);
1362 if (nnz4&~0x01010101) {
1363 for (x = 0; x < 4; x++) {
1364 if ((uint8_t)nnz4 == 1)
1365 s->vp8dsp.vp8_idct_dc_add(y_dst+4*x, s->block[y][x], s->linesize);
1366 else if((uint8_t)nnz4 > 1)
1367 s->vp8dsp.vp8_idct_add(y_dst+4*x, s->block[y][x], s->linesize);
1373 s->vp8dsp.vp8_idct_dc_add4y(y_dst, s->block[y], s->linesize);
1376 y_dst += 4*s->linesize;
1380 for (ch = 0; ch < 2; ch++) {
1381 uint32_t nnz4 = AV_RL32(s->non_zero_count_cache[4+ch]);
1383 uint8_t *ch_dst = dst[1+ch];
1384 if (nnz4&~0x01010101) {
1385 for (y = 0; y < 2; y++) {
1386 for (x = 0; x < 2; x++) {
1387 if ((uint8_t)nnz4 == 1)
1388 s->vp8dsp.vp8_idct_dc_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize);
1389 else if((uint8_t)nnz4 > 1)
1390 s->vp8dsp.vp8_idct_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize);
1393 goto chroma_idct_end;
1395 ch_dst += 4*s->uvlinesize;
1398 s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, s->block[4+ch], s->uvlinesize);
1405 static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f )
1407 int interior_limit, filter_level;
1409 if (s->segmentation.enabled) {
1410 filter_level = s->segmentation.filter_level[s->segment];
1411 if (!s->segmentation.absolute_vals)
1412 filter_level += s->filter.level;
1414 filter_level = s->filter.level;
1416 if (s->lf_delta.enabled) {
1417 filter_level += s->lf_delta.ref[mb->ref_frame];
1418 filter_level += s->lf_delta.mode[mb->mode];
1421 filter_level = av_clip_uintp2(filter_level, 6);
1423 interior_limit = filter_level;
1424 if (s->filter.sharpness) {
1425 interior_limit >>= (s->filter.sharpness + 3) >> 2;
1426 interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
1428 interior_limit = FFMAX(interior_limit, 1);
1430 f->filter_level = filter_level;
1431 f->inner_limit = interior_limit;
1432 f->inner_filter = !mb->skip || mb->mode == MODE_I4x4 || mb->mode == VP8_MVMODE_SPLIT;
1435 static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y)
1437 int mbedge_lim, bedge_lim, hev_thresh;
1438 int filter_level = f->filter_level;
1439 int inner_limit = f->inner_limit;
1440 int inner_filter = f->inner_filter;
1441 int linesize = s->linesize;
1442 int uvlinesize = s->uvlinesize;
1443 static const uint8_t hev_thresh_lut[2][64] = {
1444 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1445 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1446 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
1448 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1449 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1450 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1457 bedge_lim = 2*filter_level + inner_limit;
1458 mbedge_lim = bedge_lim + 4;
1460 hev_thresh = hev_thresh_lut[s->keyframe][filter_level];
1463 s->vp8dsp.vp8_h_loop_filter16y(dst[0], linesize,
1464 mbedge_lim, inner_limit, hev_thresh);
1465 s->vp8dsp.vp8_h_loop_filter8uv(dst[1], dst[2], uvlinesize,
1466 mbedge_lim, inner_limit, hev_thresh);
1470 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+ 4, linesize, bedge_lim,
1471 inner_limit, hev_thresh);
1472 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+ 8, linesize, bedge_lim,
1473 inner_limit, hev_thresh);
1474 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+12, linesize, bedge_lim,
1475 inner_limit, hev_thresh);
1476 s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4,
1477 uvlinesize, bedge_lim,
1478 inner_limit, hev_thresh);
1482 s->vp8dsp.vp8_v_loop_filter16y(dst[0], linesize,
1483 mbedge_lim, inner_limit, hev_thresh);
1484 s->vp8dsp.vp8_v_loop_filter8uv(dst[1], dst[2], uvlinesize,
1485 mbedge_lim, inner_limit, hev_thresh);
1489 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+ 4*linesize,
1490 linesize, bedge_lim,
1491 inner_limit, hev_thresh);
1492 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+ 8*linesize,
1493 linesize, bedge_lim,
1494 inner_limit, hev_thresh);
1495 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+12*linesize,
1496 linesize, bedge_lim,
1497 inner_limit, hev_thresh);
1498 s->vp8dsp.vp8_v_loop_filter8uv_inner(dst[1] + 4 * uvlinesize,
1499 dst[2] + 4 * uvlinesize,
1500 uvlinesize, bedge_lim,
1501 inner_limit, hev_thresh);
1505 static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
1507 int mbedge_lim, bedge_lim;
1508 int filter_level = f->filter_level;
1509 int inner_limit = f->inner_limit;
1510 int inner_filter = f->inner_filter;
1511 int linesize = s->linesize;
1516 bedge_lim = 2*filter_level + inner_limit;
1517 mbedge_lim = bedge_lim + 4;
1520 s->vp8dsp.vp8_h_loop_filter_simple(dst, linesize, mbedge_lim);
1522 s->vp8dsp.vp8_h_loop_filter_simple(dst+ 4, linesize, bedge_lim);
1523 s->vp8dsp.vp8_h_loop_filter_simple(dst+ 8, linesize, bedge_lim);
1524 s->vp8dsp.vp8_h_loop_filter_simple(dst+12, linesize, bedge_lim);
1528 s->vp8dsp.vp8_v_loop_filter_simple(dst, linesize, mbedge_lim);
1530 s->vp8dsp.vp8_v_loop_filter_simple(dst+ 4*linesize, linesize, bedge_lim);
1531 s->vp8dsp.vp8_v_loop_filter_simple(dst+ 8*linesize, linesize, bedge_lim);
1532 s->vp8dsp.vp8_v_loop_filter_simple(dst+12*linesize, linesize, bedge_lim);
1536 static void filter_mb_row(VP8Context *s, AVFrame *curframe, int mb_y)
1538 VP8FilterStrength *f = s->filter_strength;
1540 curframe->data[0] + 16*mb_y*s->linesize,
1541 curframe->data[1] + 8*mb_y*s->uvlinesize,
1542 curframe->data[2] + 8*mb_y*s->uvlinesize
1546 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1547 backup_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2], s->linesize, s->uvlinesize, 0);
1548 filter_mb(s, dst, f++, mb_x, mb_y);
1555 static void filter_mb_row_simple(VP8Context *s, AVFrame *curframe, int mb_y)
1557 VP8FilterStrength *f = s->filter_strength;
1558 uint8_t *dst = curframe->data[0] + 16*mb_y*s->linesize;
1561 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1562 backup_mb_border(s->top_border[mb_x+1], dst, NULL, NULL, s->linesize, 0, 1);
1563 filter_mb_simple(s, dst, f++, mb_x, mb_y);
1568 static void release_queued_segmaps(VP8Context *s, int is_close)
1570 int leave_behind = is_close ? 0 : !s->maps_are_invalid;
1571 while (s->num_maps_to_be_freed > leave_behind)
1572 av_freep(&s->segmentation_maps[--s->num_maps_to_be_freed]);
1573 s->maps_are_invalid = 0;
1576 static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
1579 VP8Context *s = avctx->priv_data;
1580 int ret, mb_x, mb_y, i, y, referenced;
1581 enum AVDiscard skip_thresh;
1582 AVFrame *av_uninit(curframe), *prev_frame;
1584 release_queued_segmaps(s, 0);
1586 if ((ret = decode_frame_header(s, avpkt->data, avpkt->size)) < 0)
1589 prev_frame = s->framep[VP56_FRAME_CURRENT];
1591 referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT
1592 || s->update_altref == VP56_FRAME_CURRENT;
1594 skip_thresh = !referenced ? AVDISCARD_NONREF :
1595 !s->keyframe ? AVDISCARD_NONKEY : AVDISCARD_ALL;
1597 if (avctx->skip_frame >= skip_thresh) {
1599 memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
1602 s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
1604 // release no longer referenced frames
1605 for (i = 0; i < 5; i++)
1606 if (s->frames[i].data[0] &&
1607 &s->frames[i] != prev_frame &&
1608 &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
1609 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
1610 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
1611 vp8_release_frame(s, &s->frames[i], 1, 0);
1613 // find a free buffer
1614 for (i = 0; i < 5; i++)
1615 if (&s->frames[i] != prev_frame &&
1616 &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
1617 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
1618 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
1619 curframe = s->framep[VP56_FRAME_CURRENT] = &s->frames[i];
1623 av_log(avctx, AV_LOG_FATAL, "Ran out of free frames!\n");
1626 if (curframe->data[0])
1627 vp8_release_frame(s, curframe, 1, 0);
1629 // Given that arithmetic probabilities are updated every frame, it's quite likely
1630 // that the values we have on a random interframe are complete junk if we didn't
1631 // start decode on a keyframe. So just don't display anything rather than junk.
1632 if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
1633 !s->framep[VP56_FRAME_GOLDEN] ||
1634 !s->framep[VP56_FRAME_GOLDEN2])) {
1635 av_log(avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n");
1636 ret = AVERROR_INVALIDDATA;
1640 curframe->key_frame = s->keyframe;
1641 curframe->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1642 curframe->reference = referenced ? 3 : 0;
1643 if ((ret = vp8_alloc_frame(s, curframe))) {
1644 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed!\n");
1648 // check if golden and altref are swapped
1649 if (s->update_altref != VP56_FRAME_NONE) {
1650 s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[s->update_altref];
1652 s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[VP56_FRAME_GOLDEN2];
1654 if (s->update_golden != VP56_FRAME_NONE) {
1655 s->next_framep[VP56_FRAME_GOLDEN] = s->framep[s->update_golden];
1657 s->next_framep[VP56_FRAME_GOLDEN] = s->framep[VP56_FRAME_GOLDEN];
1659 if (s->update_last) {
1660 s->next_framep[VP56_FRAME_PREVIOUS] = curframe;
1662 s->next_framep[VP56_FRAME_PREVIOUS] = s->framep[VP56_FRAME_PREVIOUS];
1664 s->next_framep[VP56_FRAME_CURRENT] = curframe;
1666 ff_thread_finish_setup(avctx);
1668 s->linesize = curframe->linesize[0];
1669 s->uvlinesize = curframe->linesize[1];
1671 if (!s->edge_emu_buffer)
1672 s->edge_emu_buffer = av_malloc(21*s->linesize);
1674 memset(s->top_nnz, 0, s->mb_width*sizeof(*s->top_nnz));
1676 /* Zero macroblock structures for top/top-left prediction from outside the frame. */
1677 memset(s->macroblocks + s->mb_height*2 - 1, 0, (s->mb_width+1)*sizeof(*s->macroblocks));
1679 // top edge of 127 for intra prediction
1680 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
1681 s->top_border[0][15] = s->top_border[0][23] = 127;
1682 memset(s->top_border[1]-1, 127, s->mb_width*sizeof(*s->top_border)+1);
1684 memset(s->ref_count, 0, sizeof(s->ref_count));
1686 memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width*4);
1688 #define MARGIN (16 << 2)
1689 s->mv_min.y = -MARGIN;
1690 s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
1692 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1693 VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions-1)];
1694 VP8Macroblock *mb = s->macroblocks + (s->mb_height - mb_y - 1)*2;
1695 int mb_xy = mb_y*s->mb_width;
1697 curframe->data[0] + 16*mb_y*s->linesize,
1698 curframe->data[1] + 8*mb_y*s->uvlinesize,
1699 curframe->data[2] + 8*mb_y*s->uvlinesize
1702 memset(mb - 1, 0, sizeof(*mb)); // zero left macroblock
1703 memset(s->left_nnz, 0, sizeof(s->left_nnz));
1704 AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED*0x01010101);
1706 // left edge of 129 for intra prediction
1707 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
1708 for (i = 0; i < 3; i++)
1709 for (y = 0; y < 16>>!!i; y++)
1710 dst[i][y*curframe->linesize[i]-1] = 129;
1711 if (mb_y == 1) // top left edge is also 129
1712 s->top_border[0][15] = s->top_border[0][23] = s->top_border[0][31] = 129;
1715 s->mv_min.x = -MARGIN;
1716 s->mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
1717 if (prev_frame && s->segmentation.enabled && !s->segmentation.update_map)
1718 ff_thread_await_progress(prev_frame, mb_y, 0);
1720 for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
1721 /* Prefetch the current frame, 4 MBs ahead */
1722 s->dsp.prefetch(dst[0] + (mb_x&3)*4*s->linesize + 64, s->linesize, 4);
1723 s->dsp.prefetch(dst[1] + (mb_x&7)*s->uvlinesize + 64, dst[2] - dst[1], 2);
1725 decode_mb_mode(s, mb, mb_x, mb_y, curframe->ref_index[0] + mb_xy,
1726 prev_frame && prev_frame->ref_index[0] ? prev_frame->ref_index[0] + mb_xy : NULL);
1728 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
1731 decode_mb_coeffs(s, c, mb, s->top_nnz[mb_x], s->left_nnz);
1733 if (mb->mode <= MODE_I4x4)
1734 intra_predict(s, dst, mb, mb_x, mb_y);
1736 inter_predict(s, dst, mb, mb_x, mb_y);
1738 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
1741 idct_mb(s, dst, mb);
1743 AV_ZERO64(s->left_nnz);
1744 AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned
1746 // Reset DC block predictors if they would exist if the mb had coefficients
1747 if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
1749 s->top_nnz[mb_x][8] = 0;
1753 if (s->deblock_filter)
1754 filter_level_for_mb(s, mb, &s->filter_strength[mb_x]);
1756 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
1764 if (s->deblock_filter) {
1765 if (s->filter.simple)
1766 filter_mb_row_simple(s, curframe, mb_y);
1768 filter_mb_row(s, curframe, mb_y);
1773 ff_thread_report_progress(curframe, mb_y, 0);
1776 ff_thread_report_progress(curframe, INT_MAX, 0);
1777 memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4);
1780 // if future frames don't use the updated probabilities,
1781 // reset them to the values we saved
1782 if (!s->update_probabilities)
1783 s->prob[0] = s->prob[1];
1785 if (!s->invisible) {
1786 *(AVFrame*)data = *curframe;
1787 *data_size = sizeof(AVFrame);
1792 memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
1796 static av_cold int vp8_decode_init(AVCodecContext *avctx)
1798 VP8Context *s = avctx->priv_data;
1801 avctx->pix_fmt = PIX_FMT_YUV420P;
1803 ff_dsputil_init(&s->dsp, avctx);
1804 ff_h264_pred_init(&s->hpc, CODEC_ID_VP8, 8, 1);
1805 ff_vp8dsp_init(&s->vp8dsp);
1810 static av_cold int vp8_decode_free(AVCodecContext *avctx)
1812 vp8_decode_flush_impl(avctx, 0, 1, 1);
1813 release_queued_segmaps(avctx->priv_data, 1);
1817 static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
1819 VP8Context *s = avctx->priv_data;
1826 #define REBASE(pic) \
1827 pic ? pic - &s_src->frames[0] + &s->frames[0] : NULL
1829 static int vp8_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1831 VP8Context *s = dst->priv_data, *s_src = src->priv_data;
1833 if (s->macroblocks_base &&
1834 (s_src->mb_width != s->mb_width || s_src->mb_height != s->mb_height)) {
1836 s->maps_are_invalid = 1;
1839 s->prob[0] = s_src->prob[!s_src->update_probabilities];
1840 s->segmentation = s_src->segmentation;
1841 s->lf_delta = s_src->lf_delta;
1842 memcpy(s->sign_bias, s_src->sign_bias, sizeof(s->sign_bias));
1844 memcpy(&s->frames, &s_src->frames, sizeof(s->frames));
1845 s->framep[0] = REBASE(s_src->next_framep[0]);
1846 s->framep[1] = REBASE(s_src->next_framep[1]);
1847 s->framep[2] = REBASE(s_src->next_framep[2]);
1848 s->framep[3] = REBASE(s_src->next_framep[3]);
1853 AVCodec ff_vp8_decoder = {
1855 .type = AVMEDIA_TYPE_VIDEO,
1857 .priv_data_size = sizeof(VP8Context),
1858 .init = vp8_decode_init,
1859 .close = vp8_decode_free,
1860 .decode = vp8_decode_frame,
1861 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
1862 .flush = vp8_decode_flush,
1863 .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),
1864 .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp8_decode_init_thread_copy),
1865 .update_thread_context = ONLY_IF_THREADS_ENABLED(vp8_decode_update_thread_context),