2 * VP9 compatible video decoder
4 * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5 * Copyright (C) 2013 Clément Bœsch <u pkh me>
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "libavutil/avassert.h"
33 static const uint8_t bwh_tab[2][N_BS_SIZES][2] = {
35 { 16, 16 }, { 16, 8 }, { 8, 16 }, { 8, 8 }, { 8, 4 }, { 4, 8 },
36 { 4, 4 }, { 4, 2 }, { 2, 4 }, { 2, 2 }, { 2, 1 }, { 1, 2 }, { 1, 1 },
38 { 8, 8 }, { 8, 4 }, { 4, 8 }, { 4, 4 }, { 4, 2 }, { 2, 4 },
39 { 2, 2 }, { 2, 1 }, { 1, 2 }, { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 },
43 static av_always_inline void setctx_2d(uint8_t *ptr, int w, int h,
44 ptrdiff_t stride, int v)
62 uint32_t v32 = v * 0x01010101;
71 uint64_t v64 = v * 0x0101010101010101ULL;
77 uint32_t v32 = v * 0x01010101;
80 AV_WN32A(ptr + 4, v32);
89 static void decode_mode(AVCodecContext *avctx)
91 static const uint8_t left_ctx[N_BS_SIZES] = {
92 0x0, 0x8, 0x0, 0x8, 0xc, 0x8, 0xc, 0xe, 0xc, 0xe, 0xf, 0xe, 0xf
94 static const uint8_t above_ctx[N_BS_SIZES] = {
95 0x0, 0x0, 0x8, 0x8, 0x8, 0xc, 0xc, 0xc, 0xe, 0xe, 0xe, 0xf, 0xf
97 static const uint8_t max_tx_for_bl_bp[N_BS_SIZES] = {
98 TX_32X32, TX_32X32, TX_32X32, TX_32X32, TX_16X16, TX_16X16,
99 TX_16X16, TX_8X8, TX_8X8, TX_8X8, TX_4X4, TX_4X4, TX_4X4
101 VP9Context *s = avctx->priv_data;
103 int row = s->row, col = s->col, row7 = s->row7;
104 enum TxfmMode max_tx = max_tx_for_bl_bp[b->bs];
105 int bw4 = bwh_tab[1][b->bs][0], w4 = FFMIN(s->cols - col, bw4);
106 int bh4 = bwh_tab[1][b->bs][1], h4 = FFMIN(s->rows - row, bh4), y;
107 int have_a = row > 0, have_l = col > s->tile_col_start;
110 if (!s->s.h.segmentation.enabled) {
112 } else if (s->s.h.keyframe || s->s.h.intraonly) {
113 b->seg_id = !s->s.h.segmentation.update_map ? 0 :
114 vp8_rac_get_tree(&s->c, ff_vp9_segmentation_tree, s->s.h.segmentation.prob);
115 } else if (!s->s.h.segmentation.update_map ||
116 (s->s.h.segmentation.temporal &&
117 vp56_rac_get_prob_branchy(&s->c,
118 s->s.h.segmentation.pred_prob[s->above_segpred_ctx[col] +
119 s->left_segpred_ctx[row7]]))) {
120 if (!s->s.h.errorres && s->s.frames[REF_FRAME_SEGMAP].segmentation_map) {
122 uint8_t *refsegmap = s->s.frames[REF_FRAME_SEGMAP].segmentation_map;
124 if (!s->s.frames[REF_FRAME_SEGMAP].uses_2pass)
125 ff_thread_await_progress(&s->s.frames[REF_FRAME_SEGMAP].tf, row >> 3, 0);
126 for (y = 0; y < h4; y++) {
127 int idx_base = (y + row) * 8 * s->sb_cols + col;
128 for (x = 0; x < w4; x++)
129 pred = FFMIN(pred, refsegmap[idx_base + x]);
131 av_assert1(pred < 8);
137 memset(&s->above_segpred_ctx[col], 1, w4);
138 memset(&s->left_segpred_ctx[row7], 1, h4);
140 b->seg_id = vp8_rac_get_tree(&s->c, ff_vp9_segmentation_tree,
141 s->s.h.segmentation.prob);
143 memset(&s->above_segpred_ctx[col], 0, w4);
144 memset(&s->left_segpred_ctx[row7], 0, h4);
146 if (s->s.h.segmentation.enabled &&
147 (s->s.h.segmentation.update_map || s->s.h.keyframe || s->s.h.intraonly)) {
148 setctx_2d(&s->s.frames[CUR_FRAME].segmentation_map[row * 8 * s->sb_cols + col],
149 bw4, bh4, 8 * s->sb_cols, b->seg_id);
152 b->skip = s->s.h.segmentation.enabled &&
153 s->s.h.segmentation.feat[b->seg_id].skip_enabled;
155 int c = s->left_skip_ctx[row7] + s->above_skip_ctx[col];
156 b->skip = vp56_rac_get_prob(&s->c, s->prob.p.skip[c]);
157 s->counts.skip[c][b->skip]++;
160 if (s->s.h.keyframe || s->s.h.intraonly) {
162 } else if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[b->seg_id].ref_enabled) {
163 b->intra = !s->s.h.segmentation.feat[b->seg_id].ref_val;
167 if (have_a && have_l) {
168 c = s->above_intra_ctx[col] + s->left_intra_ctx[row7];
171 c = have_a ? 2 * s->above_intra_ctx[col] :
172 have_l ? 2 * s->left_intra_ctx[row7] : 0;
174 bit = vp56_rac_get_prob(&s->c, s->prob.p.intra[c]);
175 s->counts.intra[c][bit]++;
179 if ((b->intra || !b->skip) && s->s.h.txfmmode == TX_SWITCHABLE) {
183 c = (s->above_skip_ctx[col] ? max_tx :
184 s->above_txfm_ctx[col]) +
185 (s->left_skip_ctx[row7] ? max_tx :
186 s->left_txfm_ctx[row7]) > max_tx;
188 c = s->above_skip_ctx[col] ? 1 :
189 (s->above_txfm_ctx[col] * 2 > max_tx);
192 c = s->left_skip_ctx[row7] ? 1 :
193 (s->left_txfm_ctx[row7] * 2 > max_tx);
199 b->tx = vp56_rac_get_prob(&s->c, s->prob.p.tx32p[c][0]);
201 b->tx += vp56_rac_get_prob(&s->c, s->prob.p.tx32p[c][1]);
203 b->tx += vp56_rac_get_prob(&s->c, s->prob.p.tx32p[c][2]);
205 s->counts.tx32p[c][b->tx]++;
208 b->tx = vp56_rac_get_prob(&s->c, s->prob.p.tx16p[c][0]);
210 b->tx += vp56_rac_get_prob(&s->c, s->prob.p.tx16p[c][1]);
211 s->counts.tx16p[c][b->tx]++;
214 b->tx = vp56_rac_get_prob(&s->c, s->prob.p.tx8p[c]);
215 s->counts.tx8p[c][b->tx]++;
222 b->tx = FFMIN(max_tx, s->s.h.txfmmode);
225 if (s->s.h.keyframe || s->s.h.intraonly) {
226 uint8_t *a = &s->above_mode_ctx[col * 2];
227 uint8_t *l = &s->left_mode_ctx[(row7) << 1];
230 if (b->bs > BS_8x8) {
231 // FIXME the memory storage intermediates here aren't really
232 // necessary, they're just there to make the code slightly
235 a[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
236 ff_vp9_default_kf_ymode_probs[a[0]][l[0]]);
237 if (b->bs != BS_8x4) {
238 b->mode[1] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
239 ff_vp9_default_kf_ymode_probs[a[1]][b->mode[0]]);
245 b->mode[1] = b->mode[0];
247 if (b->bs != BS_4x8) {
249 a[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
250 ff_vp9_default_kf_ymode_probs[a[0]][l[1]]);
251 if (b->bs != BS_8x4) {
252 b->mode[3] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
253 ff_vp9_default_kf_ymode_probs[a[1]][b->mode[2]]);
259 b->mode[3] = b->mode[2];
262 b->mode[2] = b->mode[0];
265 b->mode[3] = b->mode[1];
268 b->mode[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
269 ff_vp9_default_kf_ymode_probs[*a][*l]);
272 b->mode[1] = b->mode[0];
273 // FIXME this can probably be optimized
274 memset(a, b->mode[0], bwh_tab[0][b->bs][0]);
275 memset(l, b->mode[0], bwh_tab[0][b->bs][1]);
277 b->uvmode = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
278 ff_vp9_default_kf_uvmode_probs[b->mode[3]]);
279 } else if (b->intra) {
281 if (b->bs > BS_8x8) {
282 b->mode[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
283 s->prob.p.y_mode[0]);
284 s->counts.y_mode[0][b->mode[0]]++;
285 if (b->bs != BS_8x4) {
286 b->mode[1] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
287 s->prob.p.y_mode[0]);
288 s->counts.y_mode[0][b->mode[1]]++;
290 b->mode[1] = b->mode[0];
292 if (b->bs != BS_4x8) {
293 b->mode[2] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
294 s->prob.p.y_mode[0]);
295 s->counts.y_mode[0][b->mode[2]]++;
296 if (b->bs != BS_8x4) {
297 b->mode[3] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
298 s->prob.p.y_mode[0]);
299 s->counts.y_mode[0][b->mode[3]]++;
301 b->mode[3] = b->mode[2];
304 b->mode[2] = b->mode[0];
305 b->mode[3] = b->mode[1];
308 static const uint8_t size_group[10] = {
309 3, 3, 3, 3, 2, 2, 2, 1, 1, 1
311 int sz = size_group[b->bs];
313 b->mode[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
314 s->prob.p.y_mode[sz]);
317 b->mode[3] = b->mode[0];
318 s->counts.y_mode[sz][b->mode[3]]++;
320 b->uvmode = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
321 s->prob.p.uv_mode[b->mode[3]]);
322 s->counts.uv_mode[b->mode[3]][b->uvmode]++;
324 static const uint8_t inter_mode_ctx_lut[14][14] = {
325 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
326 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
327 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
328 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
329 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
330 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
331 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
332 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
333 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
334 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
335 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 1, 3 },
336 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 1, 3 },
337 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 0, 3 },
338 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 3, 3, 4 },
341 if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[b->seg_id].ref_enabled) {
342 av_assert2(s->s.h.segmentation.feat[b->seg_id].ref_val != 0);
344 b->ref[0] = s->s.h.segmentation.feat[b->seg_id].ref_val - 1;
346 // read comp_pred flag
347 if (s->s.h.comppredmode != PRED_SWITCHABLE) {
348 b->comp = s->s.h.comppredmode == PRED_COMPREF;
352 // FIXME add intra as ref=0xff (or -1) to make these easier?
355 if (s->above_comp_ctx[col] && s->left_comp_ctx[row7]) {
357 } else if (s->above_comp_ctx[col]) {
358 c = 2 + (s->left_intra_ctx[row7] ||
359 s->left_ref_ctx[row7] == s->s.h.fixcompref);
360 } else if (s->left_comp_ctx[row7]) {
361 c = 2 + (s->above_intra_ctx[col] ||
362 s->above_ref_ctx[col] == s->s.h.fixcompref);
364 c = (!s->above_intra_ctx[col] &&
365 s->above_ref_ctx[col] == s->s.h.fixcompref) ^
366 (!s->left_intra_ctx[row7] &&
367 s->left_ref_ctx[row & 7] == s->s.h.fixcompref);
370 c = s->above_comp_ctx[col] ? 3 :
371 (!s->above_intra_ctx[col] && s->above_ref_ctx[col] == s->s.h.fixcompref);
374 c = s->left_comp_ctx[row7] ? 3 :
375 (!s->left_intra_ctx[row7] && s->left_ref_ctx[row7] == s->s.h.fixcompref);
379 b->comp = vp56_rac_get_prob(&s->c, s->prob.p.comp[c]);
380 s->counts.comp[c][b->comp]++;
383 // read actual references
384 // FIXME probably cache a few variables here to prevent repetitive
385 // memory accesses below
386 if (b->comp) { /* two references */
387 int fix_idx = s->s.h.signbias[s->s.h.fixcompref], var_idx = !fix_idx, c, bit;
389 b->ref[fix_idx] = s->s.h.fixcompref;
390 // FIXME can this codeblob be replaced by some sort of LUT?
393 if (s->above_intra_ctx[col]) {
394 if (s->left_intra_ctx[row7]) {
397 c = 1 + 2 * (s->left_ref_ctx[row7] != s->s.h.varcompref[1]);
399 } else if (s->left_intra_ctx[row7]) {
400 c = 1 + 2 * (s->above_ref_ctx[col] != s->s.h.varcompref[1]);
402 int refl = s->left_ref_ctx[row7], refa = s->above_ref_ctx[col];
404 if (refl == refa && refa == s->s.h.varcompref[1]) {
406 } else if (!s->left_comp_ctx[row7] && !s->above_comp_ctx[col]) {
407 if ((refa == s->s.h.fixcompref && refl == s->s.h.varcompref[0]) ||
408 (refl == s->s.h.fixcompref && refa == s->s.h.varcompref[0])) {
411 c = (refa == refl) ? 3 : 1;
413 } else if (!s->left_comp_ctx[row7]) {
414 if (refa == s->s.h.varcompref[1] && refl != s->s.h.varcompref[1]) {
417 c = (refl == s->s.h.varcompref[1] &&
418 refa != s->s.h.varcompref[1]) ? 2 : 4;
420 } else if (!s->above_comp_ctx[col]) {
421 if (refl == s->s.h.varcompref[1] && refa != s->s.h.varcompref[1]) {
424 c = (refa == s->s.h.varcompref[1] &&
425 refl != s->s.h.varcompref[1]) ? 2 : 4;
428 c = (refl == refa) ? 4 : 2;
432 if (s->above_intra_ctx[col]) {
434 } else if (s->above_comp_ctx[col]) {
435 c = 4 * (s->above_ref_ctx[col] != s->s.h.varcompref[1]);
437 c = 3 * (s->above_ref_ctx[col] != s->s.h.varcompref[1]);
441 if (s->left_intra_ctx[row7]) {
443 } else if (s->left_comp_ctx[row7]) {
444 c = 4 * (s->left_ref_ctx[row7] != s->s.h.varcompref[1]);
446 c = 3 * (s->left_ref_ctx[row7] != s->s.h.varcompref[1]);
451 bit = vp56_rac_get_prob(&s->c, s->prob.p.comp_ref[c]);
452 b->ref[var_idx] = s->s.h.varcompref[bit];
453 s->counts.comp_ref[c][bit]++;
454 } else /* single reference */ {
457 if (have_a && !s->above_intra_ctx[col]) {
458 if (have_l && !s->left_intra_ctx[row7]) {
459 if (s->left_comp_ctx[row7]) {
460 if (s->above_comp_ctx[col]) {
461 c = 1 + (!s->s.h.fixcompref || !s->left_ref_ctx[row7] ||
462 !s->above_ref_ctx[col]);
464 c = (3 * !s->above_ref_ctx[col]) +
465 (!s->s.h.fixcompref || !s->left_ref_ctx[row7]);
467 } else if (s->above_comp_ctx[col]) {
468 c = (3 * !s->left_ref_ctx[row7]) +
469 (!s->s.h.fixcompref || !s->above_ref_ctx[col]);
471 c = 2 * !s->left_ref_ctx[row7] + 2 * !s->above_ref_ctx[col];
473 } else if (s->above_intra_ctx[col]) {
475 } else if (s->above_comp_ctx[col]) {
476 c = 1 + (!s->s.h.fixcompref || !s->above_ref_ctx[col]);
478 c = 4 * (!s->above_ref_ctx[col]);
480 } else if (have_l && !s->left_intra_ctx[row7]) {
481 if (s->left_intra_ctx[row7]) {
483 } else if (s->left_comp_ctx[row7]) {
484 c = 1 + (!s->s.h.fixcompref || !s->left_ref_ctx[row7]);
486 c = 4 * (!s->left_ref_ctx[row7]);
491 bit = vp56_rac_get_prob(&s->c, s->prob.p.single_ref[c][0]);
492 s->counts.single_ref[c][0][bit]++;
496 // FIXME can this codeblob be replaced by some sort of LUT?
499 if (s->left_intra_ctx[row7]) {
500 if (s->above_intra_ctx[col]) {
502 } else if (s->above_comp_ctx[col]) {
503 c = 1 + 2 * (s->s.h.fixcompref == 1 ||
504 s->above_ref_ctx[col] == 1);
505 } else if (!s->above_ref_ctx[col]) {
508 c = 4 * (s->above_ref_ctx[col] == 1);
510 } else if (s->above_intra_ctx[col]) {
511 if (s->left_intra_ctx[row7]) {
513 } else if (s->left_comp_ctx[row7]) {
514 c = 1 + 2 * (s->s.h.fixcompref == 1 ||
515 s->left_ref_ctx[row7] == 1);
516 } else if (!s->left_ref_ctx[row7]) {
519 c = 4 * (s->left_ref_ctx[row7] == 1);
521 } else if (s->above_comp_ctx[col]) {
522 if (s->left_comp_ctx[row7]) {
523 if (s->left_ref_ctx[row7] == s->above_ref_ctx[col]) {
524 c = 3 * (s->s.h.fixcompref == 1 ||
525 s->left_ref_ctx[row7] == 1);
529 } else if (!s->left_ref_ctx[row7]) {
530 c = 1 + 2 * (s->s.h.fixcompref == 1 ||
531 s->above_ref_ctx[col] == 1);
533 c = 3 * (s->left_ref_ctx[row7] == 1) +
534 (s->s.h.fixcompref == 1 || s->above_ref_ctx[col] == 1);
536 } else if (s->left_comp_ctx[row7]) {
537 if (!s->above_ref_ctx[col]) {
538 c = 1 + 2 * (s->s.h.fixcompref == 1 ||
539 s->left_ref_ctx[row7] == 1);
541 c = 3 * (s->above_ref_ctx[col] == 1) +
542 (s->s.h.fixcompref == 1 || s->left_ref_ctx[row7] == 1);
544 } else if (!s->above_ref_ctx[col]) {
545 if (!s->left_ref_ctx[row7]) {
548 c = 4 * (s->left_ref_ctx[row7] == 1);
550 } else if (!s->left_ref_ctx[row7]) {
551 c = 4 * (s->above_ref_ctx[col] == 1);
553 c = 2 * (s->left_ref_ctx[row7] == 1) +
554 2 * (s->above_ref_ctx[col] == 1);
557 if (s->above_intra_ctx[col] ||
558 (!s->above_comp_ctx[col] && !s->above_ref_ctx[col])) {
560 } else if (s->above_comp_ctx[col]) {
561 c = 3 * (s->s.h.fixcompref == 1 || s->above_ref_ctx[col] == 1);
563 c = 4 * (s->above_ref_ctx[col] == 1);
567 if (s->left_intra_ctx[row7] ||
568 (!s->left_comp_ctx[row7] && !s->left_ref_ctx[row7])) {
570 } else if (s->left_comp_ctx[row7]) {
571 c = 3 * (s->s.h.fixcompref == 1 || s->left_ref_ctx[row7] == 1);
573 c = 4 * (s->left_ref_ctx[row7] == 1);
578 bit = vp56_rac_get_prob(&s->c, s->prob.p.single_ref[c][1]);
579 s->counts.single_ref[c][1][bit]++;
585 if (b->bs <= BS_8x8) {
586 if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[b->seg_id].skip_enabled) {
592 static const uint8_t off[10] = {
593 3, 0, 0, 1, 0, 0, 0, 0, 0, 0
596 // FIXME this needs to use the LUT tables from find_ref_mvs
597 // because not all are -1,0/0,-1
598 int c = inter_mode_ctx_lut[s->above_mode_ctx[col + off[b->bs]]]
599 [s->left_mode_ctx[row7 + off[b->bs]]];
601 b->mode[0] = vp8_rac_get_tree(&s->c, ff_vp9_inter_mode_tree,
602 s->prob.p.mv_mode[c]);
605 b->mode[3] = b->mode[0];
606 s->counts.mv_mode[c][b->mode[0] - 10]++;
610 if (s->s.h.filtermode == FILTER_SWITCHABLE) {
613 if (have_a && s->above_mode_ctx[col] >= NEARESTMV) {
614 if (have_l && s->left_mode_ctx[row7] >= NEARESTMV) {
615 c = s->above_filter_ctx[col] == s->left_filter_ctx[row7] ?
616 s->left_filter_ctx[row7] : 3;
618 c = s->above_filter_ctx[col];
620 } else if (have_l && s->left_mode_ctx[row7] >= NEARESTMV) {
621 c = s->left_filter_ctx[row7];
626 filter_id = vp8_rac_get_tree(&s->c, ff_vp9_filter_tree,
627 s->prob.p.filter[c]);
628 s->counts.filter[c][filter_id]++;
629 b->filter = ff_vp9_filter_lut[filter_id];
631 b->filter = s->s.h.filtermode;
634 if (b->bs > BS_8x8) {
635 int c = inter_mode_ctx_lut[s->above_mode_ctx[col]][s->left_mode_ctx[row7]];
637 b->mode[0] = vp8_rac_get_tree(&s->c, ff_vp9_inter_mode_tree,
638 s->prob.p.mv_mode[c]);
639 s->counts.mv_mode[c][b->mode[0] - 10]++;
640 ff_vp9_fill_mv(s, b->mv[0], b->mode[0], 0);
642 if (b->bs != BS_8x4) {
643 b->mode[1] = vp8_rac_get_tree(&s->c, ff_vp9_inter_mode_tree,
644 s->prob.p.mv_mode[c]);
645 s->counts.mv_mode[c][b->mode[1] - 10]++;
646 ff_vp9_fill_mv(s, b->mv[1], b->mode[1], 1);
648 b->mode[1] = b->mode[0];
649 AV_COPY32(&b->mv[1][0], &b->mv[0][0]);
650 AV_COPY32(&b->mv[1][1], &b->mv[0][1]);
653 if (b->bs != BS_4x8) {
654 b->mode[2] = vp8_rac_get_tree(&s->c, ff_vp9_inter_mode_tree,
655 s->prob.p.mv_mode[c]);
656 s->counts.mv_mode[c][b->mode[2] - 10]++;
657 ff_vp9_fill_mv(s, b->mv[2], b->mode[2], 2);
659 if (b->bs != BS_8x4) {
660 b->mode[3] = vp8_rac_get_tree(&s->c, ff_vp9_inter_mode_tree,
661 s->prob.p.mv_mode[c]);
662 s->counts.mv_mode[c][b->mode[3] - 10]++;
663 ff_vp9_fill_mv(s, b->mv[3], b->mode[3], 3);
665 b->mode[3] = b->mode[2];
666 AV_COPY32(&b->mv[3][0], &b->mv[2][0]);
667 AV_COPY32(&b->mv[3][1], &b->mv[2][1]);
670 b->mode[2] = b->mode[0];
671 AV_COPY32(&b->mv[2][0], &b->mv[0][0]);
672 AV_COPY32(&b->mv[2][1], &b->mv[0][1]);
673 b->mode[3] = b->mode[1];
674 AV_COPY32(&b->mv[3][0], &b->mv[1][0]);
675 AV_COPY32(&b->mv[3][1], &b->mv[1][1]);
678 ff_vp9_fill_mv(s, b->mv[0], b->mode[0], -1);
679 AV_COPY32(&b->mv[1][0], &b->mv[0][0]);
680 AV_COPY32(&b->mv[2][0], &b->mv[0][0]);
681 AV_COPY32(&b->mv[3][0], &b->mv[0][0]);
682 AV_COPY32(&b->mv[1][1], &b->mv[0][1]);
683 AV_COPY32(&b->mv[2][1], &b->mv[0][1]);
684 AV_COPY32(&b->mv[3][1], &b->mv[0][1]);
687 vref = b->ref[b->comp ? s->s.h.signbias[s->s.h.varcompref[0]] : 0];
691 #define SPLAT_CTX(var, val, n) \
693 case 1: var = val; break; \
694 case 2: AV_WN16A(&var, val * 0x0101); break; \
695 case 4: AV_WN32A(&var, val * 0x01010101); break; \
696 case 8: AV_WN64A(&var, val * 0x0101010101010101ULL); break; \
698 uint64_t v64 = val * 0x0101010101010101ULL; \
699 AV_WN64A( &var, v64); \
700 AV_WN64A(&((uint8_t *) &var)[8], v64); \
705 #define SPLAT_CTX(var, val, n) \
707 case 1: var = val; break; \
708 case 2: AV_WN16A(&var, val * 0x0101); break; \
709 case 4: AV_WN32A(&var, val * 0x01010101); break; \
711 uint32_t v32 = val * 0x01010101; \
712 AV_WN32A( &var, v32); \
713 AV_WN32A(&((uint8_t *) &var)[4], v32); \
717 uint32_t v32 = val * 0x01010101; \
718 AV_WN32A( &var, v32); \
719 AV_WN32A(&((uint8_t *) &var)[4], v32); \
720 AV_WN32A(&((uint8_t *) &var)[8], v32); \
721 AV_WN32A(&((uint8_t *) &var)[12], v32); \
727 switch (bwh_tab[1][b->bs][0]) {
728 #define SET_CTXS(dir, off, n) \
730 SPLAT_CTX(s->dir##_skip_ctx[off], b->skip, n); \
731 SPLAT_CTX(s->dir##_txfm_ctx[off], b->tx, n); \
732 SPLAT_CTX(s->dir##_partition_ctx[off], dir##_ctx[b->bs], n); \
733 if (!s->s.h.keyframe && !s->s.h.intraonly) { \
734 SPLAT_CTX(s->dir##_intra_ctx[off], b->intra, n); \
735 SPLAT_CTX(s->dir##_comp_ctx[off], b->comp, n); \
736 SPLAT_CTX(s->dir##_mode_ctx[off], b->mode[3], n); \
738 SPLAT_CTX(s->dir##_ref_ctx[off], vref, n); \
739 if (s->s.h.filtermode == FILTER_SWITCHABLE) { \
740 SPLAT_CTX(s->dir##_filter_ctx[off], filter_id, n); \
745 case 1: SET_CTXS(above, col, 1); break;
746 case 2: SET_CTXS(above, col, 2); break;
747 case 4: SET_CTXS(above, col, 4); break;
748 case 8: SET_CTXS(above, col, 8); break;
750 switch (bwh_tab[1][b->bs][1]) {
751 case 1: SET_CTXS(left, row7, 1); break;
752 case 2: SET_CTXS(left, row7, 2); break;
753 case 4: SET_CTXS(left, row7, 4); break;
754 case 8: SET_CTXS(left, row7, 8); break;
759 if (!s->s.h.keyframe && !s->s.h.intraonly) {
760 if (b->bs > BS_8x8) {
761 int mv0 = AV_RN32A(&b->mv[3][0]), mv1 = AV_RN32A(&b->mv[3][1]);
763 AV_COPY32(&s->left_mv_ctx[row7 * 2 + 0][0], &b->mv[1][0]);
764 AV_COPY32(&s->left_mv_ctx[row7 * 2 + 0][1], &b->mv[1][1]);
765 AV_WN32A(&s->left_mv_ctx[row7 * 2 + 1][0], mv0);
766 AV_WN32A(&s->left_mv_ctx[row7 * 2 + 1][1], mv1);
767 AV_COPY32(&s->above_mv_ctx[col * 2 + 0][0], &b->mv[2][0]);
768 AV_COPY32(&s->above_mv_ctx[col * 2 + 0][1], &b->mv[2][1]);
769 AV_WN32A(&s->above_mv_ctx[col * 2 + 1][0], mv0);
770 AV_WN32A(&s->above_mv_ctx[col * 2 + 1][1], mv1);
772 int n, mv0 = AV_RN32A(&b->mv[3][0]), mv1 = AV_RN32A(&b->mv[3][1]);
774 for (n = 0; n < w4 * 2; n++) {
775 AV_WN32A(&s->above_mv_ctx[col * 2 + n][0], mv0);
776 AV_WN32A(&s->above_mv_ctx[col * 2 + n][1], mv1);
778 for (n = 0; n < h4 * 2; n++) {
779 AV_WN32A(&s->left_mv_ctx[row7 * 2 + n][0], mv0);
780 AV_WN32A(&s->left_mv_ctx[row7 * 2 + n][1], mv1);
786 for (y = 0; y < h4; y++) {
787 int x, o = (row + y) * s->sb_cols * 8 + col;
788 VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[o];
791 for (x = 0; x < w4; x++) {
795 } else if (b->comp) {
796 for (x = 0; x < w4; x++) {
797 mv[x].ref[0] = b->ref[0];
798 mv[x].ref[1] = b->ref[1];
799 AV_COPY32(&mv[x].mv[0], &b->mv[3][0]);
800 AV_COPY32(&mv[x].mv[1], &b->mv[3][1]);
803 for (x = 0; x < w4; x++) {
804 mv[x].ref[0] = b->ref[0];
806 AV_COPY32(&mv[x].mv[0], &b->mv[3][0]);
812 // FIXME merge cnt/eob arguments?
813 static av_always_inline int
814 decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs,
815 int is_tx32x32, int is8bitsperpixel, int bpp, unsigned (*cnt)[6][3],
816 unsigned (*eob)[6][2], uint8_t (*p)[6][11],
817 int nnz, const int16_t *scan, const int16_t (*nb)[2],
818 const int16_t *band_counts, const int16_t *qmul)
820 int i = 0, band = 0, band_left = band_counts[band];
821 uint8_t *tp = p[0][nnz];
827 val = vp56_rac_get_prob_branchy(c, tp[0]); // eob
828 eob[band][nnz][val]++;
833 if (!vp56_rac_get_prob_branchy(c, tp[1])) { // zero
836 band_left = band_counts[++band];
838 nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1;
841 break; //invalid input; blocks should end with EOB
846 if (!vp56_rac_get_prob_branchy(c, tp[2])) { // one
851 // fill in p[3-10] (model fill) - only once per frame for each pos
853 memcpy(&tp[3], ff_vp9_model_pareto8[tp[2]], 8);
856 if (!vp56_rac_get_prob_branchy(c, tp[3])) { // 2, 3, 4
857 if (!vp56_rac_get_prob_branchy(c, tp[4])) {
860 val = 3 + vp56_rac_get_prob(c, tp[5]);
863 } else if (!vp56_rac_get_prob_branchy(c, tp[6])) { // cat1/2
865 if (!vp56_rac_get_prob_branchy(c, tp[7])) {
866 val = vp56_rac_get_prob(c, 159) + 5;
868 val = (vp56_rac_get_prob(c, 165) << 1) + 7;
869 val += vp56_rac_get_prob(c, 145);
873 if (!vp56_rac_get_prob_branchy(c, tp[8])) {
874 if (!vp56_rac_get_prob_branchy(c, tp[9])) {
875 val = 11 + (vp56_rac_get_prob(c, 173) << 2);
876 val += (vp56_rac_get_prob(c, 148) << 1);
877 val += vp56_rac_get_prob(c, 140);
879 val = 19 + (vp56_rac_get_prob(c, 176) << 3);
880 val += (vp56_rac_get_prob(c, 155) << 2);
881 val += (vp56_rac_get_prob(c, 140) << 1);
882 val += vp56_rac_get_prob(c, 135);
884 } else if (!vp56_rac_get_prob_branchy(c, tp[10])) {
885 val = (vp56_rac_get_prob(c, 180) << 4) + 35;
886 val += (vp56_rac_get_prob(c, 157) << 3);
887 val += (vp56_rac_get_prob(c, 141) << 2);
888 val += (vp56_rac_get_prob(c, 134) << 1);
889 val += vp56_rac_get_prob(c, 130);
892 if (!is8bitsperpixel) {
894 val += vp56_rac_get_prob(c, 255) << 17;
895 val += vp56_rac_get_prob(c, 255) << 16;
897 val += (vp56_rac_get_prob(c, 255) << 15);
898 val += (vp56_rac_get_prob(c, 255) << 14);
900 val += (vp56_rac_get_prob(c, 254) << 13);
901 val += (vp56_rac_get_prob(c, 254) << 12);
902 val += (vp56_rac_get_prob(c, 254) << 11);
903 val += (vp56_rac_get_prob(c, 252) << 10);
904 val += (vp56_rac_get_prob(c, 249) << 9);
905 val += (vp56_rac_get_prob(c, 243) << 8);
906 val += (vp56_rac_get_prob(c, 230) << 7);
907 val += (vp56_rac_get_prob(c, 196) << 6);
908 val += (vp56_rac_get_prob(c, 177) << 5);
909 val += (vp56_rac_get_prob(c, 153) << 4);
910 val += (vp56_rac_get_prob(c, 140) << 3);
911 val += (vp56_rac_get_prob(c, 133) << 2);
912 val += (vp56_rac_get_prob(c, 130) << 1);
913 val += vp56_rac_get_prob(c, 129);
917 #define STORE_COEF(c, i, v) do { \
918 if (is8bitsperpixel) { \
921 AV_WN32A(&c[i * 2], v); \
925 band_left = band_counts[++band];
927 STORE_COEF(coef, rc, ((vp8_rac_get(c) ? -val : val) * qmul[!!i]) / 2);
929 STORE_COEF(coef, rc, (vp8_rac_get(c) ? -val : val) * qmul[!!i]);
930 nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1;
932 } while (++i < n_coeffs);
937 static int decode_coeffs_b_8bpp(VP9Context *s, int16_t *coef, int n_coeffs,
938 unsigned (*cnt)[6][3], unsigned (*eob)[6][2],
939 uint8_t (*p)[6][11], int nnz, const int16_t *scan,
940 const int16_t (*nb)[2], const int16_t *band_counts,
943 return decode_coeffs_b_generic(&s->c, coef, n_coeffs, 0, 1, 8, cnt, eob, p,
944 nnz, scan, nb, band_counts, qmul);
947 static int decode_coeffs_b32_8bpp(VP9Context *s, int16_t *coef, int n_coeffs,
948 unsigned (*cnt)[6][3], unsigned (*eob)[6][2],
949 uint8_t (*p)[6][11], int nnz, const int16_t *scan,
950 const int16_t (*nb)[2], const int16_t *band_counts,
953 return decode_coeffs_b_generic(&s->c, coef, n_coeffs, 1, 1, 8, cnt, eob, p,
954 nnz, scan, nb, band_counts, qmul);
957 static int decode_coeffs_b_16bpp(VP9Context *s, int16_t *coef, int n_coeffs,
958 unsigned (*cnt)[6][3], unsigned (*eob)[6][2],
959 uint8_t (*p)[6][11], int nnz, const int16_t *scan,
960 const int16_t (*nb)[2], const int16_t *band_counts,
963 return decode_coeffs_b_generic(&s->c, coef, n_coeffs, 0, 0, s->s.h.bpp, cnt, eob, p,
964 nnz, scan, nb, band_counts, qmul);
967 static int decode_coeffs_b32_16bpp(VP9Context *s, int16_t *coef, int n_coeffs,
968 unsigned (*cnt)[6][3], unsigned (*eob)[6][2],
969 uint8_t (*p)[6][11], int nnz, const int16_t *scan,
970 const int16_t (*nb)[2], const int16_t *band_counts,
973 return decode_coeffs_b_generic(&s->c, coef, n_coeffs, 1, 0, s->s.h.bpp, cnt, eob, p,
974 nnz, scan, nb, band_counts, qmul);
977 static av_always_inline int decode_coeffs(AVCodecContext *avctx, int is8bitsperpixel)
979 VP9Context *s = avctx->priv_data;
981 int row = s->row, col = s->col;
982 uint8_t (*p)[6][11] = s->prob.coef[b->tx][0 /* y */][!b->intra];
983 unsigned (*c)[6][3] = s->counts.coef[b->tx][0 /* y */][!b->intra];
984 unsigned (*e)[6][2] = s->counts.eob[b->tx][0 /* y */][!b->intra];
985 int w4 = bwh_tab[1][b->bs][0] << 1, h4 = bwh_tab[1][b->bs][1] << 1;
986 int end_x = FFMIN(2 * (s->cols - col), w4);
987 int end_y = FFMIN(2 * (s->rows - row), h4);
988 int n, pl, x, y, ret;
989 int16_t (*qmul)[2] = s->s.h.segmentation.feat[b->seg_id].qmul;
990 int tx = 4 * s->s.h.lossless + b->tx;
991 const int16_t * const *yscans = ff_vp9_scans[tx];
992 const int16_t (* const *ynbs)[2] = ff_vp9_scans_nb[tx];
993 const int16_t *uvscan = ff_vp9_scans[b->uvtx][DCT_DCT];
994 const int16_t (*uvnb)[2] = ff_vp9_scans_nb[b->uvtx][DCT_DCT];
995 uint8_t *a = &s->above_y_nnz_ctx[col * 2];
996 uint8_t *l = &s->left_y_nnz_ctx[(row & 7) << 1];
997 static const int16_t band_counts[4][8] = {
998 { 1, 2, 3, 4, 3, 16 - 13 },
999 { 1, 2, 3, 4, 11, 64 - 21 },
1000 { 1, 2, 3, 4, 11, 256 - 21 },
1001 { 1, 2, 3, 4, 11, 1024 - 21 },
1003 const int16_t *y_band_counts = band_counts[b->tx];
1004 const int16_t *uv_band_counts = band_counts[b->uvtx];
1005 int bytesperpixel = is8bitsperpixel ? 1 : 2;
1006 int total_coeff = 0;
1008 #define MERGE(la, end, step, rd) \
1009 for (n = 0; n < end; n += step) \
1010 la[n] = !!rd(&la[n])
1011 #define MERGE_CTX(step, rd) \
1013 MERGE(l, end_y, step, rd); \
1014 MERGE(a, end_x, step, rd); \
1017 #define DECODE_Y_COEF_LOOP(step, mode_index, v) \
1018 for (n = 0, y = 0; y < end_y; y += step) { \
1019 for (x = 0; x < end_x; x += step, n += step * step) { \
1020 enum TxfmType txtp = ff_vp9_intra_txfm_type[b->mode[mode_index]]; \
1021 ret = (is8bitsperpixel ? decode_coeffs_b##v##_8bpp : decode_coeffs_b##v##_16bpp) \
1022 (s, s->block + 16 * n * bytesperpixel, 16 * step * step, \
1023 c, e, p, a[x] + l[y], yscans[txtp], \
1024 ynbs[txtp], y_band_counts, qmul[0]); \
1025 a[x] = l[y] = !!ret; \
1026 total_coeff |= !!ret; \
1028 AV_WN16A(&s->eob[n], ret); \
1035 #define SPLAT(la, end, step, cond) \
1037 for (n = 1; n < end; n += step) \
1038 la[n] = la[n - 1]; \
1039 } else if (step == 4) { \
1041 for (n = 0; n < end; n += step) \
1042 AV_WN32A(&la[n], la[n] * 0x01010101); \
1044 for (n = 0; n < end; n += step) \
1045 memset(&la[n + 1], la[n], FFMIN(end - n - 1, 3)); \
1047 } else /* step == 8 */ { \
1049 if (HAVE_FAST_64BIT) { \
1050 for (n = 0; n < end; n += step) \
1051 AV_WN64A(&la[n], la[n] * 0x0101010101010101ULL); \
1053 for (n = 0; n < end; n += step) { \
1054 uint32_t v32 = la[n] * 0x01010101; \
1055 AV_WN32A(&la[n], v32); \
1056 AV_WN32A(&la[n + 4], v32); \
1060 for (n = 0; n < end; n += step) \
1061 memset(&la[n + 1], la[n], FFMIN(end - n - 1, 7)); \
1064 #define SPLAT_CTX(step) \
1066 SPLAT(a, end_x, step, end_x == w4); \
1067 SPLAT(l, end_y, step, end_y == h4); \
1073 DECODE_Y_COEF_LOOP(1, b->bs > BS_8x8 ? n : 0,);
1076 MERGE_CTX(2, AV_RN16A);
1077 DECODE_Y_COEF_LOOP(2, 0,);
1081 MERGE_CTX(4, AV_RN32A);
1082 DECODE_Y_COEF_LOOP(4, 0,);
1086 MERGE_CTX(8, AV_RN64A);
1087 DECODE_Y_COEF_LOOP(8, 0, 32);
1092 #define DECODE_UV_COEF_LOOP(step, v) \
1093 for (n = 0, y = 0; y < end_y; y += step) { \
1094 for (x = 0; x < end_x; x += step, n += step * step) { \
1095 ret = (is8bitsperpixel ? decode_coeffs_b##v##_8bpp : decode_coeffs_b##v##_16bpp) \
1096 (s, s->uvblock[pl] + 16 * n * bytesperpixel, \
1097 16 * step * step, c, e, p, a[x] + l[y], \
1098 uvscan, uvnb, uv_band_counts, qmul[1]); \
1099 a[x] = l[y] = !!ret; \
1100 total_coeff |= !!ret; \
1102 AV_WN16A(&s->uveob[pl][n], ret); \
1104 s->uveob[pl][n] = ret; \
1109 p = s->prob.coef[b->uvtx][1 /* uv */][!b->intra];
1110 c = s->counts.coef[b->uvtx][1 /* uv */][!b->intra];
1111 e = s->counts.eob[b->uvtx][1 /* uv */][!b->intra];
1116 for (pl = 0; pl < 2; pl++) {
1117 a = &s->above_uv_nnz_ctx[pl][col << !s->ss_h];
1118 l = &s->left_uv_nnz_ctx[pl][(row & 7) << !s->ss_v];
1121 DECODE_UV_COEF_LOOP(1,);
1124 MERGE_CTX(2, AV_RN16A);
1125 DECODE_UV_COEF_LOOP(2,);
1129 MERGE_CTX(4, AV_RN32A);
1130 DECODE_UV_COEF_LOOP(4,);
1134 MERGE_CTX(8, AV_RN64A);
1135 DECODE_UV_COEF_LOOP(8, 32);
1144 static int decode_coeffs_8bpp(AVCodecContext *avctx)
1146 return decode_coeffs(avctx, 1);
1149 static int decode_coeffs_16bpp(AVCodecContext *avctx)
1151 return decode_coeffs(avctx, 0);
1154 static av_always_inline int check_intra_mode(VP9Context *s, int mode, uint8_t **a,
1155 uint8_t *dst_edge, ptrdiff_t stride_edge,
1156 uint8_t *dst_inner, ptrdiff_t stride_inner,
1157 uint8_t *l, int col, int x, int w,
1158 int row, int y, enum TxfmMode tx,
1159 int p, int ss_h, int ss_v, int bytesperpixel)
1161 int have_top = row > 0 || y > 0;
1162 int have_left = col > s->tile_col_start || x > 0;
1163 int have_right = x < w - 1;
1164 int bpp = s->s.h.bpp;
1165 static const uint8_t mode_conv[10][2 /* have_left */][2 /* have_top */] = {
1166 [VERT_PRED] = { { DC_127_PRED, VERT_PRED },
1167 { DC_127_PRED, VERT_PRED } },
1168 [HOR_PRED] = { { DC_129_PRED, DC_129_PRED },
1169 { HOR_PRED, HOR_PRED } },
1170 [DC_PRED] = { { DC_128_PRED, TOP_DC_PRED },
1171 { LEFT_DC_PRED, DC_PRED } },
1172 [DIAG_DOWN_LEFT_PRED] = { { DC_127_PRED, DIAG_DOWN_LEFT_PRED },
1173 { DC_127_PRED, DIAG_DOWN_LEFT_PRED } },
1174 [DIAG_DOWN_RIGHT_PRED] = { { DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED },
1175 { DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED } },
1176 [VERT_RIGHT_PRED] = { { VERT_RIGHT_PRED, VERT_RIGHT_PRED },
1177 { VERT_RIGHT_PRED, VERT_RIGHT_PRED } },
1178 [HOR_DOWN_PRED] = { { HOR_DOWN_PRED, HOR_DOWN_PRED },
1179 { HOR_DOWN_PRED, HOR_DOWN_PRED } },
1180 [VERT_LEFT_PRED] = { { DC_127_PRED, VERT_LEFT_PRED },
1181 { DC_127_PRED, VERT_LEFT_PRED } },
1182 [HOR_UP_PRED] = { { DC_129_PRED, DC_129_PRED },
1183 { HOR_UP_PRED, HOR_UP_PRED } },
1184 [TM_VP8_PRED] = { { DC_129_PRED, VERT_PRED },
1185 { HOR_PRED, TM_VP8_PRED } },
1187 static const struct {
1188 uint8_t needs_left:1;
1189 uint8_t needs_top:1;
1190 uint8_t needs_topleft:1;
1191 uint8_t needs_topright:1;
1192 uint8_t invert_left:1;
1193 } edges[N_INTRA_PRED_MODES] = {
1194 [VERT_PRED] = { .needs_top = 1 },
1195 [HOR_PRED] = { .needs_left = 1 },
1196 [DC_PRED] = { .needs_top = 1, .needs_left = 1 },
1197 [DIAG_DOWN_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
1198 [DIAG_DOWN_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1,
1199 .needs_topleft = 1 },
1200 [VERT_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1,
1201 .needs_topleft = 1 },
1202 [HOR_DOWN_PRED] = { .needs_left = 1, .needs_top = 1,
1203 .needs_topleft = 1 },
1204 [VERT_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
1205 [HOR_UP_PRED] = { .needs_left = 1, .invert_left = 1 },
1206 [TM_VP8_PRED] = { .needs_left = 1, .needs_top = 1,
1207 .needs_topleft = 1 },
1208 [LEFT_DC_PRED] = { .needs_left = 1 },
1209 [TOP_DC_PRED] = { .needs_top = 1 },
1210 [DC_128_PRED] = { 0 },
1211 [DC_127_PRED] = { 0 },
1212 [DC_129_PRED] = { 0 }
1215 av_assert2(mode >= 0 && mode < 10);
1216 mode = mode_conv[mode][have_left][have_top];
1217 if (edges[mode].needs_top) {
1218 uint8_t *top, *topleft;
1219 int n_px_need = 4 << tx, n_px_have = (((s->cols - col) << !ss_h) - x) * 4;
1220 int n_px_need_tr = 0;
1222 if (tx == TX_4X4 && edges[mode].needs_topright && have_right)
1225 // if top of sb64-row, use s->intra_pred_data[] instead of
1226 // dst[-stride] for intra prediction (it contains pre- instead of
1227 // post-loopfilter data)
1229 top = !(row & 7) && !y ?
1230 s->intra_pred_data[p] + (col * (8 >> ss_h) + x * 4) * bytesperpixel :
1231 y == 0 ? &dst_edge[-stride_edge] : &dst_inner[-stride_inner];
1233 topleft = !(row & 7) && !y ?
1234 s->intra_pred_data[p] + (col * (8 >> ss_h) + x * 4) * bytesperpixel :
1235 y == 0 || x == 0 ? &dst_edge[-stride_edge] :
1236 &dst_inner[-stride_inner];
1240 (!edges[mode].needs_topleft || (have_left && top == topleft)) &&
1241 (tx != TX_4X4 || !edges[mode].needs_topright || have_right) &&
1242 n_px_need + n_px_need_tr <= n_px_have) {
1246 if (n_px_need <= n_px_have) {
1247 memcpy(*a, top, n_px_need * bytesperpixel);
1249 #define memset_bpp(c, i1, v, i2, num) do { \
1250 if (bytesperpixel == 1) { \
1251 memset(&(c)[(i1)], (v)[(i2)], (num)); \
1253 int n, val = AV_RN16A(&(v)[(i2) * 2]); \
1254 for (n = 0; n < (num); n++) { \
1255 AV_WN16A(&(c)[((i1) + n) * 2], val); \
1259 memcpy(*a, top, n_px_have * bytesperpixel);
1260 memset_bpp(*a, n_px_have, (*a), n_px_have - 1, n_px_need - n_px_have);
1263 #define memset_val(c, val, num) do { \
1264 if (bytesperpixel == 1) { \
1265 memset((c), (val), (num)); \
1268 for (n = 0; n < (num); n++) { \
1269 AV_WN16A(&(c)[n * 2], (val)); \
1273 memset_val(*a, (128 << (bpp - 8)) - 1, n_px_need);
1275 if (edges[mode].needs_topleft) {
1276 if (have_left && have_top) {
1277 #define assign_bpp(c, i1, v, i2) do { \
1278 if (bytesperpixel == 1) { \
1279 (c)[(i1)] = (v)[(i2)]; \
1281 AV_COPY16(&(c)[(i1) * 2], &(v)[(i2) * 2]); \
1284 assign_bpp(*a, -1, topleft, -1);
1286 #define assign_val(c, i, v) do { \
1287 if (bytesperpixel == 1) { \
1290 AV_WN16A(&(c)[(i) * 2], (v)); \
1293 assign_val((*a), -1, (128 << (bpp - 8)) + (have_top ? +1 : -1));
1296 if (tx == TX_4X4 && edges[mode].needs_topright) {
1297 if (have_top && have_right &&
1298 n_px_need + n_px_need_tr <= n_px_have) {
1299 memcpy(&(*a)[4 * bytesperpixel], &top[4 * bytesperpixel], 4 * bytesperpixel);
1301 memset_bpp(*a, 4, *a, 3, 4);
1306 if (edges[mode].needs_left) {
1308 int n_px_need = 4 << tx, i, n_px_have = (((s->rows - row) << !ss_v) - y) * 4;
1309 uint8_t *dst = x == 0 ? dst_edge : dst_inner;
1310 ptrdiff_t stride = x == 0 ? stride_edge : stride_inner;
1312 if (edges[mode].invert_left) {
1313 if (n_px_need <= n_px_have) {
1314 for (i = 0; i < n_px_need; i++)
1315 assign_bpp(l, i, &dst[i * stride], -1);
1317 for (i = 0; i < n_px_have; i++)
1318 assign_bpp(l, i, &dst[i * stride], -1);
1319 memset_bpp(l, n_px_have, l, n_px_have - 1, n_px_need - n_px_have);
1322 if (n_px_need <= n_px_have) {
1323 for (i = 0; i < n_px_need; i++)
1324 assign_bpp(l, n_px_need - 1 - i, &dst[i * stride], -1);
1326 for (i = 0; i < n_px_have; i++)
1327 assign_bpp(l, n_px_need - 1 - i, &dst[i * stride], -1);
1328 memset_bpp(l, 0, l, n_px_need - n_px_have, n_px_need - n_px_have);
1332 memset_val(l, (128 << (bpp - 8)) + 1, 4 << tx);
1339 static av_always_inline void intra_recon(AVCodecContext *avctx, ptrdiff_t y_off,
1340 ptrdiff_t uv_off, int bytesperpixel)
1342 VP9Context *s = avctx->priv_data;
1344 int row = s->row, col = s->col;
1345 int w4 = bwh_tab[1][b->bs][0] << 1, step1d = 1 << b->tx, n;
1346 int h4 = bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2);
1347 int end_x = FFMIN(2 * (s->cols - col), w4);
1348 int end_y = FFMIN(2 * (s->rows - row), h4);
1349 int tx = 4 * s->s.h.lossless + b->tx, uvtx = b->uvtx + 4 * s->s.h.lossless;
1350 int uvstep1d = 1 << b->uvtx, p;
1351 uint8_t *dst = s->dst[0], *dst_r = s->s.frames[CUR_FRAME].tf.f->data[0] + y_off;
1352 LOCAL_ALIGNED_32(uint8_t, a_buf, [96]);
1353 LOCAL_ALIGNED_32(uint8_t, l, [64]);
1355 for (n = 0, y = 0; y < end_y; y += step1d) {
1356 uint8_t *ptr = dst, *ptr_r = dst_r;
1357 for (x = 0; x < end_x; x += step1d, ptr += 4 * step1d * bytesperpixel,
1358 ptr_r += 4 * step1d * bytesperpixel, n += step) {
1359 int mode = b->mode[b->bs > BS_8x8 && b->tx == TX_4X4 ?
1361 uint8_t *a = &a_buf[32];
1362 enum TxfmType txtp = ff_vp9_intra_txfm_type[mode];
1363 int eob = b->skip ? 0 : b->tx > TX_8X8 ? AV_RN16A(&s->eob[n]) : s->eob[n];
1365 mode = check_intra_mode(s, mode, &a, ptr_r,
1366 s->s.frames[CUR_FRAME].tf.f->linesize[0],
1367 ptr, s->y_stride, l,
1368 col, x, w4, row, y, b->tx, 0, 0, 0, bytesperpixel);
1369 s->dsp.intra_pred[b->tx][mode](ptr, s->y_stride, l, a);
1371 s->dsp.itxfm_add[tx][txtp](ptr, s->y_stride,
1372 s->block + 16 * n * bytesperpixel, eob);
1374 dst_r += 4 * step1d * s->s.frames[CUR_FRAME].tf.f->linesize[0];
1375 dst += 4 * step1d * s->y_stride;
1382 step = 1 << (b->uvtx * 2);
1383 for (p = 0; p < 2; p++) {
1384 dst = s->dst[1 + p];
1385 dst_r = s->s.frames[CUR_FRAME].tf.f->data[1 + p] + uv_off;
1386 for (n = 0, y = 0; y < end_y; y += uvstep1d) {
1387 uint8_t *ptr = dst, *ptr_r = dst_r;
1388 for (x = 0; x < end_x; x += uvstep1d, ptr += 4 * uvstep1d * bytesperpixel,
1389 ptr_r += 4 * uvstep1d * bytesperpixel, n += step) {
1390 int mode = b->uvmode;
1391 uint8_t *a = &a_buf[32];
1392 int eob = b->skip ? 0 : b->uvtx > TX_8X8 ? AV_RN16A(&s->uveob[p][n]) : s->uveob[p][n];
1394 mode = check_intra_mode(s, mode, &a, ptr_r,
1395 s->s.frames[CUR_FRAME].tf.f->linesize[1],
1396 ptr, s->uv_stride, l, col, x, w4, row, y,
1397 b->uvtx, p + 1, s->ss_h, s->ss_v, bytesperpixel);
1398 s->dsp.intra_pred[b->uvtx][mode](ptr, s->uv_stride, l, a);
1400 s->dsp.itxfm_add[uvtx][DCT_DCT](ptr, s->uv_stride,
1401 s->uvblock[p] + 16 * n * bytesperpixel, eob);
1403 dst_r += 4 * uvstep1d * s->s.frames[CUR_FRAME].tf.f->linesize[1];
1404 dst += 4 * uvstep1d * s->uv_stride;
1409 static void intra_recon_8bpp(AVCodecContext *avctx, ptrdiff_t y_off, ptrdiff_t uv_off)
1411 intra_recon(avctx, y_off, uv_off, 1);
1414 static void intra_recon_16bpp(AVCodecContext *avctx, ptrdiff_t y_off, ptrdiff_t uv_off)
1416 intra_recon(avctx, y_off, uv_off, 2);
1419 static av_always_inline void mc_luma_unscaled(VP9Context *s, vp9_mc_func (*mc)[2],
1420 uint8_t *dst, ptrdiff_t dst_stride,
1421 const uint8_t *ref, ptrdiff_t ref_stride,
1422 ThreadFrame *ref_frame,
1423 ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
1424 int bw, int bh, int w, int h, int bytesperpixel)
1426 int mx = mv->x, my = mv->y, th;
1430 ref += y * ref_stride + x * bytesperpixel;
1433 // FIXME bilinear filter only needs 0/1 pixels, not 3/4
1434 // we use +7 because the last 7 pixels of each sbrow can be changed in
1435 // the longest loopfilter of the next sbrow
1436 th = (y + bh + 4 * !!my + 7) >> 6;
1437 ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
1438 // The arm/aarch64 _hv filters read one more row than what actually is
1439 // needed, so switch to emulated edge one pixel sooner vertically
1440 // (!!my * 5) than horizontally (!!mx * 4).
1441 if (x < !!mx * 3 || y < !!my * 3 ||
1442 x + !!mx * 4 > w - bw || y + !!my * 5 > h - bh) {
1443 s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
1444 ref - !!my * 3 * ref_stride - !!mx * 3 * bytesperpixel,
1446 bw + !!mx * 7, bh + !!my * 7,
1447 x - !!mx * 3, y - !!my * 3, w, h);
1448 ref = s->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
1451 mc[!!mx][!!my](dst, dst_stride, ref, ref_stride, bh, mx << 1, my << 1);
1454 static av_always_inline void mc_chroma_unscaled(VP9Context *s, vp9_mc_func (*mc)[2],
1455 uint8_t *dst_u, uint8_t *dst_v,
1456 ptrdiff_t dst_stride,
1457 const uint8_t *ref_u, ptrdiff_t src_stride_u,
1458 const uint8_t *ref_v, ptrdiff_t src_stride_v,
1459 ThreadFrame *ref_frame,
1460 ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
1461 int bw, int bh, int w, int h, int bytesperpixel)
1463 int mx = mv->x * (1 << !s->ss_h), my = mv->y * (1 << !s->ss_v), th;
1467 ref_u += y * src_stride_u + x * bytesperpixel;
1468 ref_v += y * src_stride_v + x * bytesperpixel;
1471 // FIXME bilinear filter only needs 0/1 pixels, not 3/4
1472 // we use +7 because the last 7 pixels of each sbrow can be changed in
1473 // the longest loopfilter of the next sbrow
1474 th = (y + bh + 4 * !!my + 7) >> (6 - s->ss_v);
1475 ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
1476 // The arm/aarch64 _hv filters read one more row than what actually is
1477 // needed, so switch to emulated edge one pixel sooner vertically
1478 // (!!my * 5) than horizontally (!!mx * 4).
1479 if (x < !!mx * 3 || y < !!my * 3 ||
1480 x + !!mx * 4 > w - bw || y + !!my * 5 > h - bh) {
1481 s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
1482 ref_u - !!my * 3 * src_stride_u - !!mx * 3 * bytesperpixel,
1484 bw + !!mx * 7, bh + !!my * 7,
1485 x - !!mx * 3, y - !!my * 3, w, h);
1486 ref_u = s->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
1487 mc[!!mx][!!my](dst_u, dst_stride, ref_u, 160, bh, mx, my);
1489 s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
1490 ref_v - !!my * 3 * src_stride_v - !!mx * 3 * bytesperpixel,
1492 bw + !!mx * 7, bh + !!my * 7,
1493 x - !!mx * 3, y - !!my * 3, w, h);
1494 ref_v = s->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
1495 mc[!!mx][!!my](dst_v, dst_stride, ref_v, 160, bh, mx, my);
1497 mc[!!mx][!!my](dst_u, dst_stride, ref_u, src_stride_u, bh, mx, my);
1498 mc[!!mx][!!my](dst_v, dst_stride, ref_v, src_stride_v, bh, mx, my);
1502 #define mc_luma_dir(s, mc, dst, dst_ls, src, src_ls, tref, row, col, mv, \
1503 px, py, pw, ph, bw, bh, w, h, i) \
1504 mc_luma_unscaled(s, s->dsp.mc, dst, dst_ls, src, src_ls, tref, row, col, \
1505 mv, bw, bh, w, h, bytesperpixel)
1506 #define mc_chroma_dir(s, mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
1507 row, col, mv, px, py, pw, ph, bw, bh, w, h, i) \
1508 mc_chroma_unscaled(s, s->dsp.mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
1509 row, col, mv, bw, bh, w, h, bytesperpixel)
1511 #define FN(x) x##_8bpp
1512 #define BYTES_PER_PIXEL 1
1513 #include "vp9_mc_template.c"
1515 #undef BYTES_PER_PIXEL
1516 #define FN(x) x##_16bpp
1517 #define BYTES_PER_PIXEL 2
1518 #include "vp9_mc_template.c"
1520 #undef mc_chroma_dir
1522 #undef BYTES_PER_PIXEL
1525 static av_always_inline void mc_luma_scaled(VP9Context *s, vp9_scaled_mc_func smc,
1526 vp9_mc_func (*mc)[2],
1527 uint8_t *dst, ptrdiff_t dst_stride,
1528 const uint8_t *ref, ptrdiff_t ref_stride,
1529 ThreadFrame *ref_frame,
1530 ptrdiff_t y, ptrdiff_t x, const VP56mv *in_mv,
1531 int px, int py, int pw, int ph,
1532 int bw, int bh, int w, int h, int bytesperpixel,
1533 const uint16_t *scale, const uint8_t *step)
1535 if (s->s.frames[CUR_FRAME].tf.f->width == ref_frame->f->width &&
1536 s->s.frames[CUR_FRAME].tf.f->height == ref_frame->f->height) {
1537 mc_luma_unscaled(s, mc, dst, dst_stride, ref, ref_stride, ref_frame,
1538 y, x, in_mv, bw, bh, w, h, bytesperpixel);
1540 #define scale_mv(n, dim) (((int64_t)(n) * scale[dim]) >> 14)
1542 int refbw_m1, refbh_m1;
1546 mv.x = av_clip(in_mv->x, -(x + pw - px + 4) * 8, (s->cols * 8 - x + px + 3) * 8);
1547 mv.y = av_clip(in_mv->y, -(y + ph - py + 4) * 8, (s->rows * 8 - y + py + 3) * 8);
1548 // BUG libvpx seems to scale the two components separately. This introduces
1549 // rounding errors but we have to reproduce them to be exactly compatible
1550 // with the output from libvpx...
1551 mx = scale_mv(mv.x * 2, 0) + scale_mv(x * 16, 0);
1552 my = scale_mv(mv.y * 2, 1) + scale_mv(y * 16, 1);
1556 ref += y * ref_stride + x * bytesperpixel;
1559 refbw_m1 = ((bw - 1) * step[0] + mx) >> 4;
1560 refbh_m1 = ((bh - 1) * step[1] + my) >> 4;
1561 // FIXME bilinear filter only needs 0/1 pixels, not 3/4
1562 // we use +7 because the last 7 pixels of each sbrow can be changed in
1563 // the longest loopfilter of the next sbrow
1564 th = (y + refbh_m1 + 4 + 7) >> 6;
1565 ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
1566 // The arm/aarch64 _hv filters read one more row than what actually is
1567 // needed, so switch to emulated edge one pixel sooner vertically
1568 // (y + 5 >= h - refbh_m1) than horizontally (x + 4 >= w - refbw_m1).
1569 if (x < 3 || y < 3 || x + 4 >= w - refbw_m1 || y + 5 >= h - refbh_m1) {
1570 s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
1571 ref - 3 * ref_stride - 3 * bytesperpixel,
1573 refbw_m1 + 8, refbh_m1 + 8,
1574 x - 3, y - 3, w, h);
1575 ref = s->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
1578 smc(dst, dst_stride, ref, ref_stride, bh, mx, my, step[0], step[1]);
1582 static av_always_inline void mc_chroma_scaled(VP9Context *s, vp9_scaled_mc_func smc,
1583 vp9_mc_func (*mc)[2],
1584 uint8_t *dst_u, uint8_t *dst_v,
1585 ptrdiff_t dst_stride,
1586 const uint8_t *ref_u, ptrdiff_t src_stride_u,
1587 const uint8_t *ref_v, ptrdiff_t src_stride_v,
1588 ThreadFrame *ref_frame,
1589 ptrdiff_t y, ptrdiff_t x, const VP56mv *in_mv,
1590 int px, int py, int pw, int ph,
1591 int bw, int bh, int w, int h, int bytesperpixel,
1592 const uint16_t *scale, const uint8_t *step)
1594 if (s->s.frames[CUR_FRAME].tf.f->width == ref_frame->f->width &&
1595 s->s.frames[CUR_FRAME].tf.f->height == ref_frame->f->height) {
1596 mc_chroma_unscaled(s, mc, dst_u, dst_v, dst_stride, ref_u, src_stride_u,
1597 ref_v, src_stride_v, ref_frame,
1598 y, x, in_mv, bw, bh, w, h, bytesperpixel);
1601 int refbw_m1, refbh_m1;
1606 // BUG https://code.google.com/p/webm/issues/detail?id=820
1607 mv.x = av_clip(in_mv->x, -(x + pw - px + 4) * 16, (s->cols * 4 - x + px + 3) * 16);
1608 mx = scale_mv(mv.x, 0) + (scale_mv(x * 16, 0) & ~15) + (scale_mv(x * 32, 0) & 15);
1610 mv.x = av_clip(in_mv->x, -(x + pw - px + 4) * 8, (s->cols * 8 - x + px + 3) * 8);
1611 mx = scale_mv(mv.x * 2, 0) + scale_mv(x * 16, 0);
1614 // BUG https://code.google.com/p/webm/issues/detail?id=820
1615 mv.y = av_clip(in_mv->y, -(y + ph - py + 4) * 16, (s->rows * 4 - y + py + 3) * 16);
1616 my = scale_mv(mv.y, 1) + (scale_mv(y * 16, 1) & ~15) + (scale_mv(y * 32, 1) & 15);
1618 mv.y = av_clip(in_mv->y, -(y + ph - py + 4) * 8, (s->rows * 8 - y + py + 3) * 8);
1619 my = scale_mv(mv.y * 2, 1) + scale_mv(y * 16, 1);
1624 ref_u += y * src_stride_u + x * bytesperpixel;
1625 ref_v += y * src_stride_v + x * bytesperpixel;
1628 refbw_m1 = ((bw - 1) * step[0] + mx) >> 4;
1629 refbh_m1 = ((bh - 1) * step[1] + my) >> 4;
1630 // FIXME bilinear filter only needs 0/1 pixels, not 3/4
1631 // we use +7 because the last 7 pixels of each sbrow can be changed in
1632 // the longest loopfilter of the next sbrow
1633 th = (y + refbh_m1 + 4 + 7) >> (6 - s->ss_v);
1634 ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
1635 // The arm/aarch64 _hv filters read one more row than what actually is
1636 // needed, so switch to emulated edge one pixel sooner vertically
1637 // (y + 5 >= h - refbh_m1) than horizontally (x + 4 >= w - refbw_m1).
1638 if (x < 3 || y < 3 || x + 4 >= w - refbw_m1 || y + 5 >= h - refbh_m1) {
1639 s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
1640 ref_u - 3 * src_stride_u - 3 * bytesperpixel,
1642 refbw_m1 + 8, refbh_m1 + 8,
1643 x - 3, y - 3, w, h);
1644 ref_u = s->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
1645 smc(dst_u, dst_stride, ref_u, 288, bh, mx, my, step[0], step[1]);
1647 s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
1648 ref_v - 3 * src_stride_v - 3 * bytesperpixel,
1650 refbw_m1 + 8, refbh_m1 + 8,
1651 x - 3, y - 3, w, h);
1652 ref_v = s->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
1653 smc(dst_v, dst_stride, ref_v, 288, bh, mx, my, step[0], step[1]);
1655 smc(dst_u, dst_stride, ref_u, src_stride_u, bh, mx, my, step[0], step[1]);
1656 smc(dst_v, dst_stride, ref_v, src_stride_v, bh, mx, my, step[0], step[1]);
1661 #define mc_luma_dir(s, mc, dst, dst_ls, src, src_ls, tref, row, col, mv, \
1662 px, py, pw, ph, bw, bh, w, h, i) \
1663 mc_luma_scaled(s, s->dsp.s##mc, s->dsp.mc, dst, dst_ls, src, src_ls, tref, row, col, \
1664 mv, px, py, pw, ph, bw, bh, w, h, bytesperpixel, \
1665 s->mvscale[b->ref[i]], s->mvstep[b->ref[i]])
1666 #define mc_chroma_dir(s, mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
1667 row, col, mv, px, py, pw, ph, bw, bh, w, h, i) \
1668 mc_chroma_scaled(s, s->dsp.s##mc, s->dsp.mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
1669 row, col, mv, px, py, pw, ph, bw, bh, w, h, bytesperpixel, \
1670 s->mvscale[b->ref[i]], s->mvstep[b->ref[i]])
1672 #define FN(x) x##_scaled_8bpp
1673 #define BYTES_PER_PIXEL 1
1674 #include "vp9_mc_template.c"
1676 #undef BYTES_PER_PIXEL
1677 #define FN(x) x##_scaled_16bpp
1678 #define BYTES_PER_PIXEL 2
1679 #include "vp9_mc_template.c"
1681 #undef mc_chroma_dir
1683 #undef BYTES_PER_PIXEL
1686 static av_always_inline void inter_recon(AVCodecContext *avctx, int bytesperpixel)
1688 VP9Context *s = avctx->priv_data;
1690 int row = s->row, col = s->col;
1692 if (s->mvscale[b->ref[0]][0] || (b->comp && s->mvscale[b->ref[1]][0])) {
1693 if (bytesperpixel == 1) {
1694 inter_pred_scaled_8bpp(avctx);
1696 inter_pred_scaled_16bpp(avctx);
1699 if (bytesperpixel == 1) {
1700 inter_pred_8bpp(avctx);
1702 inter_pred_16bpp(avctx);
1707 /* mostly copied intra_recon() */
1709 int w4 = bwh_tab[1][b->bs][0] << 1, step1d = 1 << b->tx, n;
1710 int h4 = bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2);
1711 int end_x = FFMIN(2 * (s->cols - col), w4);
1712 int end_y = FFMIN(2 * (s->rows - row), h4);
1713 int tx = 4 * s->s.h.lossless + b->tx, uvtx = b->uvtx + 4 * s->s.h.lossless;
1714 int uvstep1d = 1 << b->uvtx, p;
1715 uint8_t *dst = s->dst[0];
1718 for (n = 0, y = 0; y < end_y; y += step1d) {
1720 for (x = 0; x < end_x; x += step1d,
1721 ptr += 4 * step1d * bytesperpixel, n += step) {
1722 int eob = b->tx > TX_8X8 ? AV_RN16A(&s->eob[n]) : s->eob[n];
1725 s->dsp.itxfm_add[tx][DCT_DCT](ptr, s->y_stride,
1726 s->block + 16 * n * bytesperpixel, eob);
1728 dst += 4 * s->y_stride * step1d;
1734 step = 1 << (b->uvtx * 2);
1735 for (p = 0; p < 2; p++) {
1736 dst = s->dst[p + 1];
1737 for (n = 0, y = 0; y < end_y; y += uvstep1d) {
1739 for (x = 0; x < end_x; x += uvstep1d,
1740 ptr += 4 * uvstep1d * bytesperpixel, n += step) {
1741 int eob = b->uvtx > TX_8X8 ? AV_RN16A(&s->uveob[p][n]) : s->uveob[p][n];
1744 s->dsp.itxfm_add[uvtx][DCT_DCT](ptr, s->uv_stride,
1745 s->uvblock[p] + 16 * n * bytesperpixel, eob);
1747 dst += 4 * uvstep1d * s->uv_stride;
1753 static void inter_recon_8bpp(AVCodecContext *avctx)
1755 inter_recon(avctx, 1);
1758 static void inter_recon_16bpp(AVCodecContext *avctx)
1760 inter_recon(avctx, 2);
1763 static av_always_inline void mask_edges(uint8_t (*mask)[8][4], int ss_h, int ss_v,
1764 int row_and_7, int col_and_7,
1765 int w, int h, int col_end, int row_end,
1766 enum TxfmMode tx, int skip_inter)
1768 static const unsigned wide_filter_col_mask[2] = { 0x11, 0x01 };
1769 static const unsigned wide_filter_row_mask[2] = { 0x03, 0x07 };
1771 // FIXME I'm pretty sure all loops can be replaced by a single LUT if
1772 // we make VP9Filter.mask uint64_t (i.e. row/col all single variable)
1773 // and make the LUT 5-indexed (bl, bp, is_uv, tx and row/col), and then
1774 // use row_and_7/col_and_7 as shifts (1*col_and_7+8*row_and_7)
1776 // the intended behaviour of the vp9 loopfilter is to work on 8-pixel
1777 // edges. This means that for UV, we work on two subsampled blocks at
1778 // a time, and we only use the topleft block's mode information to set
1779 // things like block strength. Thus, for any block size smaller than
1780 // 16x16, ignore the odd portion of the block.
1781 if (tx == TX_4X4 && (ss_v | ss_h)) {
1796 if (tx == TX_4X4 && !skip_inter) {
1797 int t = 1 << col_and_7, m_col = (t << w) - t, y;
1798 // on 32-px edges, use the 8-px wide loopfilter; else, use 4-px wide
1799 int m_row_8 = m_col & wide_filter_col_mask[ss_h], m_row_4 = m_col - m_row_8;
1801 for (y = row_and_7; y < h + row_and_7; y++) {
1802 int col_mask_id = 2 - !(y & wide_filter_row_mask[ss_v]);
1804 mask[0][y][1] |= m_row_8;
1805 mask[0][y][2] |= m_row_4;
1806 // for odd lines, if the odd col is not being filtered,
1807 // skip odd row also:
1814 // if a/c are even row/col and b/d are odd, and d is skipped,
1815 // e.g. right edge of size-66x66.webm, then skip b also (bug)
1816 if ((ss_h & ss_v) && (col_end & 1) && (y & 1)) {
1817 mask[1][y][col_mask_id] |= (t << (w - 1)) - t;
1819 mask[1][y][col_mask_id] |= m_col;
1822 mask[0][y][3] |= m_col;
1824 if (ss_h && (col_end & 1))
1825 mask[1][y][3] |= (t << (w - 1)) - t;
1827 mask[1][y][3] |= m_col;
1831 int y, t = 1 << col_and_7, m_col = (t << w) - t;
1834 int mask_id = (tx == TX_8X8);
1835 int l2 = tx + ss_h - 1, step1d;
1836 static const unsigned masks[4] = { 0xff, 0x55, 0x11, 0x01 };
1837 int m_row = m_col & masks[l2];
1839 // at odd UV col/row edges tx16/tx32 loopfilter edges, force
1840 // 8wd loopfilter to prevent going off the visible edge.
1841 if (ss_h && tx > TX_8X8 && (w ^ (w - 1)) == 1) {
1842 int m_row_16 = ((t << (w - 1)) - t) & masks[l2];
1843 int m_row_8 = m_row - m_row_16;
1845 for (y = row_and_7; y < h + row_and_7; y++) {
1846 mask[0][y][0] |= m_row_16;
1847 mask[0][y][1] |= m_row_8;
1850 for (y = row_and_7; y < h + row_and_7; y++)
1851 mask[0][y][mask_id] |= m_row;
1856 if (ss_v && tx > TX_8X8 && (h ^ (h - 1)) == 1) {
1857 for (y = row_and_7; y < h + row_and_7 - 1; y += step1d)
1858 mask[1][y][0] |= m_col;
1859 if (y - row_and_7 == h - 1)
1860 mask[1][y][1] |= m_col;
1862 for (y = row_and_7; y < h + row_and_7; y += step1d)
1863 mask[1][y][mask_id] |= m_col;
1865 } else if (tx != TX_4X4) {
1868 mask_id = (tx == TX_8X8) || (h == ss_v);
1869 mask[1][row_and_7][mask_id] |= m_col;
1870 mask_id = (tx == TX_8X8) || (w == ss_h);
1871 for (y = row_and_7; y < h + row_and_7; y++)
1872 mask[0][y][mask_id] |= t;
1874 int t8 = t & wide_filter_col_mask[ss_h], t4 = t - t8;
1876 for (y = row_and_7; y < h + row_and_7; y++) {
1877 mask[0][y][2] |= t4;
1878 mask[0][y][1] |= t8;
1880 mask[1][row_and_7][2 - !(row_and_7 & wide_filter_row_mask[ss_v])] |= m_col;
1885 void ff_vp9_decode_block(AVCodecContext *avctx, int row, int col,
1886 VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff,
1887 enum BlockLevel bl, enum BlockPartition bp)
1889 VP9Context *s = avctx->priv_data;
1891 enum BlockSize bs = bl * 3 + bp;
1892 int bytesperpixel = s->bytesperpixel;
1893 int w4 = bwh_tab[1][bs][0], h4 = bwh_tab[1][bs][1], lvl;
1895 AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1902 s->min_mv.x = -(128 + col * 64);
1903 s->min_mv.y = -(128 + row * 64);
1904 s->max_mv.x = 128 + (s->cols - col - w4) * 64;
1905 s->max_mv.y = 128 + (s->rows - row - h4) * 64;
1912 b->uvtx = b->tx - ((s->ss_h && w4 * 2 == (1 << b->tx)) ||
1913 (s->ss_v && h4 * 2 == (1 << b->tx)));
1918 if (bytesperpixel == 1) {
1919 has_coeffs = decode_coeffs_8bpp(avctx);
1921 has_coeffs = decode_coeffs_16bpp(avctx);
1923 if (!has_coeffs && b->bs <= BS_8x8 && !b->intra) {
1925 memset(&s->above_skip_ctx[col], 1, w4);
1926 memset(&s->left_skip_ctx[s->row7], 1, h4);
1931 #define SPLAT_ZERO_CTX(v, n) \
1933 case 1: v = 0; break; \
1934 case 2: AV_ZERO16(&v); break; \
1935 case 4: AV_ZERO32(&v); break; \
1936 case 8: AV_ZERO64(&v); break; \
1937 case 16: AV_ZERO128(&v); break; \
1939 #define SPLAT_ZERO_YUV(dir, var, off, n, dir2) \
1941 SPLAT_ZERO_CTX(s->dir##_y_##var[off * 2], n * 2); \
1942 if (s->ss_##dir2) { \
1943 SPLAT_ZERO_CTX(s->dir##_uv_##var[0][off], n); \
1944 SPLAT_ZERO_CTX(s->dir##_uv_##var[1][off], n); \
1946 SPLAT_ZERO_CTX(s->dir##_uv_##var[0][off * 2], n * 2); \
1947 SPLAT_ZERO_CTX(s->dir##_uv_##var[1][off * 2], n * 2); \
1952 case 1: SPLAT_ZERO_YUV(above, nnz_ctx, col, 1, h); break;
1953 case 2: SPLAT_ZERO_YUV(above, nnz_ctx, col, 2, h); break;
1954 case 4: SPLAT_ZERO_YUV(above, nnz_ctx, col, 4, h); break;
1955 case 8: SPLAT_ZERO_YUV(above, nnz_ctx, col, 8, h); break;
1958 case 1: SPLAT_ZERO_YUV(left, nnz_ctx, row7, 1, v); break;
1959 case 2: SPLAT_ZERO_YUV(left, nnz_ctx, row7, 2, v); break;
1960 case 4: SPLAT_ZERO_YUV(left, nnz_ctx, row7, 4, v); break;
1961 case 8: SPLAT_ZERO_YUV(left, nnz_ctx, row7, 8, v); break;
1967 s->block += w4 * h4 * 64 * bytesperpixel;
1968 s->uvblock[0] += w4 * h4 * 64 * bytesperpixel >> (s->ss_h + s->ss_v);
1969 s->uvblock[1] += w4 * h4 * 64 * bytesperpixel >> (s->ss_h + s->ss_v);
1970 s->eob += 4 * w4 * h4;
1971 s->uveob[0] += 4 * w4 * h4 >> (s->ss_h + s->ss_v);
1972 s->uveob[1] += 4 * w4 * h4 >> (s->ss_h + s->ss_v);
1978 // emulated overhangs if the stride of the target buffer can't hold. This
1979 // makes it possible to support emu-edge and so on even if we have large block
1981 emu[0] = (col + w4) * 8 * bytesperpixel > f->linesize[0] ||
1982 (row + h4) > s->rows;
1983 emu[1] = ((col + w4) * 8 >> s->ss_h) * bytesperpixel > f->linesize[1] ||
1984 (row + h4) > s->rows;
1986 s->dst[0] = s->tmp_y;
1989 s->dst[0] = f->data[0] + yoff;
1990 s->y_stride = f->linesize[0];
1993 s->dst[1] = s->tmp_uv[0];
1994 s->dst[2] = s->tmp_uv[1];
1997 s->dst[1] = f->data[1] + uvoff;
1998 s->dst[2] = f->data[2] + uvoff;
1999 s->uv_stride = f->linesize[1];
2002 if (s->s.h.bpp > 8) {
2003 intra_recon_16bpp(avctx, yoff, uvoff);
2005 intra_recon_8bpp(avctx, yoff, uvoff);
2008 if (s->s.h.bpp > 8) {
2009 inter_recon_16bpp(avctx);
2011 inter_recon_8bpp(avctx);
2015 int w = FFMIN(s->cols - col, w4) * 8, h = FFMIN(s->rows - row, h4) * 8, n, o = 0;
2017 for (n = 0; o < w; n++) {
2022 s->dsp.mc[n][0][0][0][0](f->data[0] + yoff + o * bytesperpixel, f->linesize[0],
2023 s->tmp_y + o * bytesperpixel, 128, h, 0, 0);
2029 int w = FFMIN(s->cols - col, w4) * 8 >> s->ss_h;
2030 int h = FFMIN(s->rows - row, h4) * 8 >> s->ss_v, n, o = 0;
2032 for (n = s->ss_h; o < w; n++) {
2037 s->dsp.mc[n][0][0][0][0](f->data[1] + uvoff + o * bytesperpixel, f->linesize[1],
2038 s->tmp_uv[0] + o * bytesperpixel, 128, h, 0, 0);
2039 s->dsp.mc[n][0][0][0][0](f->data[2] + uvoff + o * bytesperpixel, f->linesize[2],
2040 s->tmp_uv[1] + o * bytesperpixel, 128, h, 0, 0);
2046 // pick filter level and find edges to apply filter to
2047 if (s->s.h.filter.level &&
2048 (lvl = s->s.h.segmentation.feat[b->seg_id].lflvl[b->intra ? 0 : b->ref[0] + 1]
2049 [b->mode[3] != ZEROMV]) > 0) {
2050 int x_end = FFMIN(s->cols - col, w4), y_end = FFMIN(s->rows - row, h4);
2051 int skip_inter = !b->intra && b->skip, col7 = s->col7, row7 = s->row7;
2053 setctx_2d(&lflvl->level[row7 * 8 + col7], w4, h4, 8, lvl);
2054 mask_edges(lflvl->mask[0], 0, 0, row7, col7, x_end, y_end, 0, 0, b->tx, skip_inter);
2055 if (s->ss_h || s->ss_v)
2056 mask_edges(lflvl->mask[1], s->ss_h, s->ss_v, row7, col7, x_end, y_end,
2057 s->cols & 1 && col + w4 >= s->cols ? s->cols & 7 : 0,
2058 s->rows & 1 && row + h4 >= s->rows ? s->rows & 7 : 0,
2059 b->uvtx, skip_inter);
2061 if (!s->filter_lut.lim_lut[lvl]) {
2062 int sharp = s->s.h.filter.sharpness;
2066 limit >>= (sharp + 3) >> 2;
2067 limit = FFMIN(limit, 9 - sharp);
2069 limit = FFMAX(limit, 1);
2071 s->filter_lut.lim_lut[lvl] = limit;
2072 s->filter_lut.mblim_lut[lvl] = 2 * (lvl + 2) + limit;
2078 s->block += w4 * h4 * 64 * bytesperpixel;
2079 s->uvblock[0] += w4 * h4 * 64 * bytesperpixel >> (s->ss_v + s->ss_h);
2080 s->uvblock[1] += w4 * h4 * 64 * bytesperpixel >> (s->ss_v + s->ss_h);
2081 s->eob += 4 * w4 * h4;
2082 s->uveob[0] += 4 * w4 * h4 >> (s->ss_v + s->ss_h);
2083 s->uveob[1] += 4 * w4 * h4 >> (s->ss_v + s->ss_h);