2 * VP9 compatible video decoder
4 * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5 * Copyright (C) 2013 Clément Bœsch <u pkh me>
7 * This file is part of Libav.
9 * Libav is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * Libav is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with Libav; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29 static av_always_inline void clamp_mv(VP56mv *dst, const VP56mv *src,
32 dst->x = av_clip(src->x, s->min_mv.x, s->max_mv.x);
33 dst->y = av_clip(src->y, s->min_mv.y, s->max_mv.y);
36 static void find_ref_mvs(VP9Context *s,
37 VP56mv *pmv, int ref, int z, int idx, int sb)
39 static const int8_t mv_ref_blk_off[N_BS_SIZES][8][2] = {
40 [BS_64x64] = { { 3, -1 }, { -1, 3 }, { 4, -1 }, { -1, 4 },
41 { -1, -1 }, { 0, -1 }, { -1, 0 }, { 6, -1 } },
42 [BS_64x32] = { { 0, -1 }, { -1, 0 }, { 4, -1 }, { -1, 2 },
43 { -1, -1 }, { 0, -3 }, { -3, 0 }, { 2, -1 } },
44 [BS_32x64] = { { -1, 0 }, { 0, -1 }, { -1, 4 }, { 2, -1 },
45 { -1, -1 }, { -3, 0 }, { 0, -3 }, { -1, 2 } },
46 [BS_32x32] = { { 1, -1 }, { -1, 1 }, { 2, -1 }, { -1, 2 },
47 { -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
48 [BS_32x16] = { { 0, -1 }, { -1, 0 }, { 2, -1 }, { -1, -1 },
49 { -1, 1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
50 [BS_16x32] = { { -1, 0 }, { 0, -1 }, { -1, 2 }, { -1, -1 },
51 { 1, -1 }, { -3, 0 }, { 0, -3 }, { -3, -3 } },
52 [BS_16x16] = { { 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, 1 },
53 { -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
54 [BS_16x8] = { { 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, -1 },
55 { 0, -2 }, { -2, 0 }, { -2, -1 }, { -1, -2 } },
56 [BS_8x16] = { { -1, 0 }, { 0, -1 }, { -1, 1 }, { -1, -1 },
57 { -2, 0 }, { 0, -2 }, { -1, -2 }, { -2, -1 } },
58 [BS_8x8] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
59 { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
60 [BS_8x4] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
61 { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
62 [BS_4x8] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
63 { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
64 [BS_4x4] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
65 { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
68 int row = b->row, col = b->col, row7 = b->row7;
69 const int8_t (*p)[2] = mv_ref_blk_off[b->bs];
70 #define INVALID_MV 0x80008000U
71 uint32_t mem = INVALID_MV;
74 #define RETURN_DIRECT_MV(mv) \
76 uint32_t m = AV_RN32A(&mv); \
80 } else if (mem == INVALID_MV) { \
82 } else if (m != mem) { \
89 if (sb == 2 || sb == 1) {
90 RETURN_DIRECT_MV(b->mv[0][z]);
92 RETURN_DIRECT_MV(b->mv[2][z]);
93 RETURN_DIRECT_MV(b->mv[1][z]);
94 RETURN_DIRECT_MV(b->mv[0][z]);
97 #define RETURN_MV(mv) \
102 clamp_mv(&tmp, &mv, s); \
103 m = AV_RN32A(&tmp); \
107 } else if (mem == INVALID_MV) { \
109 } else if (m != mem) { \
114 uint32_t m = AV_RN32A(&mv); \
116 clamp_mv(pmv, &mv, s); \
118 } else if (mem == INVALID_MV) { \
120 } else if (m != mem) { \
121 clamp_mv(pmv, &mv, s); \
128 VP9MVRefPair *mv = &s->frames[CUR_FRAME].mv[(row - 1) * s->sb_cols * 8 + col];
130 if (mv->ref[0] == ref)
131 RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][0]);
132 else if (mv->ref[1] == ref)
133 RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][1]);
135 if (col > s->tiling.tile_col_start) {
136 VP9MVRefPair *mv = &s->frames[CUR_FRAME].mv[row * s->sb_cols * 8 + col - 1];
138 if (mv->ref[0] == ref)
139 RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][0]);
140 else if (mv->ref[1] == ref)
141 RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][1]);
148 // previously coded MVs in the neighborhood, using same reference frame
150 int c = p[i][0] + col, r = p[i][1] + row;
152 if (c >= s->tiling.tile_col_start && c < s->cols &&
153 r >= 0 && r < s->rows) {
154 VP9MVRefPair *mv = &s->frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c];
156 if (mv->ref[0] == ref)
157 RETURN_MV(mv->mv[0]);
158 else if (mv->ref[1] == ref)
159 RETURN_MV(mv->mv[1]);
163 // MV at this position in previous frame, using same reference frame
164 if (s->use_last_frame_mvs) {
165 VP9MVRefPair *mv = &s->frames[LAST_FRAME].mv[row * s->sb_cols * 8 + col];
167 if (!s->last_uses_2pass)
168 ff_thread_await_progress(&s->frames[LAST_FRAME].tf, row >> 3, 0);
170 if (mv->ref[0] == ref)
171 RETURN_MV(mv->mv[0]);
172 else if (mv->ref[1] == ref)
173 RETURN_MV(mv->mv[1]);
176 #define RETURN_SCALE_MV(mv, scale) \
179 VP56mv mv_temp = { -mv.x, -mv.y }; \
180 RETURN_MV(mv_temp); \
186 // previously coded MVs in the neighborhood, using different reference frame
187 for (i = 0; i < 8; i++) {
188 int c = p[i][0] + col, r = p[i][1] + row;
190 if (c >= s->tiling.tile_col_start && c < s->cols &&
191 r >= 0 && r < s->rows) {
192 VP9MVRefPair *mv = &s->frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c];
194 if (mv->ref[0] != ref && mv->ref[0] >= 0)
195 RETURN_SCALE_MV(mv->mv[0],
196 s->signbias[mv->ref[0]] != s->signbias[ref]);
197 if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
198 // BUG - libvpx has this condition regardless of whether
199 // we used the first ref MV and pre-scaling
200 AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) {
201 RETURN_SCALE_MV(mv->mv[1],
202 s->signbias[mv->ref[1]] != s->signbias[ref]);
207 // MV at this position in previous frame, using different reference frame
208 if (s->use_last_frame_mvs) {
209 VP9MVRefPair *mv = &s->frames[LAST_FRAME].mv[row * s->sb_cols * 8 + col];
211 // no need to await_progress, because we already did that above
212 if (mv->ref[0] != ref && mv->ref[0] >= 0)
213 RETURN_SCALE_MV(mv->mv[0],
214 s->signbias[mv->ref[0]] != s->signbias[ref]);
215 if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
216 // BUG - libvpx has this condition regardless of whether
217 // we used the first ref MV and pre-scaling
218 AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) {
219 RETURN_SCALE_MV(mv->mv[1],
220 s->signbias[mv->ref[1]] != s->signbias[ref]);
227 #undef RETURN_SCALE_MV
230 static av_always_inline int read_mv_component(VP9Context *s, int idx, int hp)
232 int bit, sign = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].sign);
233 int n, c = vp8_rac_get_tree(&s->c, ff_vp9_mv_class_tree,
234 s->prob.p.mv_comp[idx].classes);
236 s->counts.mv_comp[idx].sign[sign]++;
237 s->counts.mv_comp[idx].classes[c]++;
241 for (n = 0, m = 0; m < c; m++) {
242 bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].bits[m]);
244 s->counts.mv_comp[idx].bits[m][bit]++;
247 bit = vp8_rac_get_tree(&s->c, ff_vp9_mv_fp_tree,
248 s->prob.p.mv_comp[idx].fp);
250 s->counts.mv_comp[idx].fp[bit]++;
252 bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].hp);
253 s->counts.mv_comp[idx].hp[bit]++;
257 // bug in libvpx - we count for bw entropy purposes even if the
259 s->counts.mv_comp[idx].hp[1]++;
263 n = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].class0);
264 s->counts.mv_comp[idx].class0[n]++;
265 bit = vp8_rac_get_tree(&s->c, ff_vp9_mv_fp_tree,
266 s->prob.p.mv_comp[idx].class0_fp[n]);
267 s->counts.mv_comp[idx].class0_fp[n][bit]++;
268 n = (n << 3) | (bit << 1);
270 bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].class0_hp);
271 s->counts.mv_comp[idx].class0_hp[bit]++;
275 // bug in libvpx - we count for bw entropy purposes even if the
277 s->counts.mv_comp[idx].class0_hp[1]++;
281 return sign ? -(n + 1) : (n + 1);
284 void ff_vp9_fill_mv(VP9Context *s, VP56mv *mv, int mode, int sb)
288 if (mode == ZEROMV) {
289 memset(mv, 0, sizeof(*mv) * 2);
293 // FIXME cache this value and reuse for other subblocks
294 find_ref_mvs(s, &mv[0], b->ref[0], 0, mode == NEARMV,
295 mode == NEWMV ? -1 : sb);
296 // FIXME maybe move this code into find_ref_mvs()
297 if ((mode == NEWMV || sb == -1) &&
298 !(hp = s->highprecisionmvs &&
299 abs(mv[0].x) < 64 && abs(mv[0].y) < 64)) {
314 enum MVJoint j = vp8_rac_get_tree(&s->c, ff_vp9_mv_joint_tree,
317 s->counts.mv_joint[j]++;
319 mv[0].y += read_mv_component(s, 0, hp);
321 mv[0].x += read_mv_component(s, 1, hp);
325 // FIXME cache this value and reuse for other subblocks
326 find_ref_mvs(s, &mv[1], b->ref[1], 1, mode == NEARMV,
327 mode == NEWMV ? -1 : sb);
328 if ((mode == NEWMV || sb == -1) &&
329 !(hp = s->highprecisionmvs &&
330 abs(mv[1].x) < 64 && abs(mv[1].y) < 64)) {
345 enum MVJoint j = vp8_rac_get_tree(&s->c, ff_vp9_mv_joint_tree,
348 s->counts.mv_joint[j]++;
350 mv[1].y += read_mv_component(s, 0, hp);
352 mv[1].x += read_mv_component(s, 1, hp);