2 * VP9 compatible video decoder
4 * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5 * Copyright (C) 2013 Clément Bœsch <u pkh me>
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
30 static av_always_inline void clamp_mv(VP56mv *dst, const VP56mv *src,
33 dst->x = av_clip(src->x, s->min_mv.x, s->max_mv.x);
34 dst->y = av_clip(src->y, s->min_mv.y, s->max_mv.y);
37 static void find_ref_mvs(VP9Context *s,
38 VP56mv *pmv, int ref, int z, int idx, int sb)
40 static const int8_t mv_ref_blk_off[N_BS_SIZES][8][2] = {
41 [BS_64x64] = { { 3, -1 }, { -1, 3 }, { 4, -1 }, { -1, 4 },
42 { -1, -1 }, { 0, -1 }, { -1, 0 }, { 6, -1 } },
43 [BS_64x32] = { { 0, -1 }, { -1, 0 }, { 4, -1 }, { -1, 2 },
44 { -1, -1 }, { 0, -3 }, { -3, 0 }, { 2, -1 } },
45 [BS_32x64] = { { -1, 0 }, { 0, -1 }, { -1, 4 }, { 2, -1 },
46 { -1, -1 }, { -3, 0 }, { 0, -3 }, { -1, 2 } },
47 [BS_32x32] = { { 1, -1 }, { -1, 1 }, { 2, -1 }, { -1, 2 },
48 { -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
49 [BS_32x16] = { { 0, -1 }, { -1, 0 }, { 2, -1 }, { -1, -1 },
50 { -1, 1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
51 [BS_16x32] = { { -1, 0 }, { 0, -1 }, { -1, 2 }, { -1, -1 },
52 { 1, -1 }, { -3, 0 }, { 0, -3 }, { -3, -3 } },
53 [BS_16x16] = { { 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, 1 },
54 { -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
55 [BS_16x8] = { { 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, -1 },
56 { 0, -2 }, { -2, 0 }, { -2, -1 }, { -1, -2 } },
57 [BS_8x16] = { { -1, 0 }, { 0, -1 }, { -1, 1 }, { -1, -1 },
58 { -2, 0 }, { 0, -2 }, { -1, -2 }, { -2, -1 } },
59 [BS_8x8] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
60 { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
61 [BS_8x4] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
62 { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
63 [BS_4x8] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
64 { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
65 [BS_4x4] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
66 { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
69 int row = s->row, col = s->col, row7 = s->row7;
70 const int8_t (*p)[2] = mv_ref_blk_off[b->bs];
71 #define INVALID_MV 0x80008000U
72 uint32_t mem = INVALID_MV, mem_sub8x8 = INVALID_MV;
75 #define RETURN_DIRECT_MV(mv) \
77 uint32_t m = AV_RN32A(&mv); \
81 } else if (mem == INVALID_MV) { \
83 } else if (m != mem) { \
90 if (sb == 2 || sb == 1) {
91 RETURN_DIRECT_MV(b->mv[0][z]);
93 RETURN_DIRECT_MV(b->mv[2][z]);
94 RETURN_DIRECT_MV(b->mv[1][z]);
95 RETURN_DIRECT_MV(b->mv[0][z]);
98 #define RETURN_MV(mv) \
103 av_assert2(idx == 1); \
104 av_assert2(mem != INVALID_MV); \
105 if (mem_sub8x8 == INVALID_MV) { \
106 clamp_mv(&tmp, &mv, s); \
107 m = AV_RN32A(&tmp); \
112 mem_sub8x8 = AV_RN32A(&mv); \
113 } else if (mem_sub8x8 != AV_RN32A(&mv)) { \
114 clamp_mv(&tmp, &mv, s); \
115 m = AV_RN32A(&tmp); \
119 /* BUG I'm pretty sure this isn't the intention */ \
125 uint32_t m = AV_RN32A(&mv); \
127 clamp_mv(pmv, &mv, s); \
129 } else if (mem == INVALID_MV) { \
131 } else if (m != mem) { \
132 clamp_mv(pmv, &mv, s); \
139 VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[(row - 1) * s->sb_cols * 8 + col];
140 if (mv->ref[0] == ref)
141 RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][0]);
142 else if (mv->ref[1] == ref)
143 RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][1]);
145 if (col > s->tile_col_start) {
146 VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[row * s->sb_cols * 8 + col - 1];
147 if (mv->ref[0] == ref)
148 RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][0]);
149 else if (mv->ref[1] == ref)
150 RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][1]);
157 // previously coded MVs in this neighborhood, using same reference frame
159 int c = p[i][0] + col, r = p[i][1] + row;
161 if (c >= s->tile_col_start && c < s->cols &&
162 r >= 0 && r < s->rows) {
163 VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c];
165 if (mv->ref[0] == ref)
166 RETURN_MV(mv->mv[0]);
167 else if (mv->ref[1] == ref)
168 RETURN_MV(mv->mv[1]);
172 // MV at this position in previous frame, using same reference frame
173 if (s->s.h.use_last_frame_mvs) {
174 VP9mvrefPair *mv = &s->s.frames[REF_FRAME_MVPAIR].mv[row * s->sb_cols * 8 + col];
176 if (!s->s.frames[REF_FRAME_MVPAIR].uses_2pass)
177 ff_thread_await_progress(&s->s.frames[REF_FRAME_MVPAIR].tf, row >> 3, 0);
178 if (mv->ref[0] == ref)
179 RETURN_MV(mv->mv[0]);
180 else if (mv->ref[1] == ref)
181 RETURN_MV(mv->mv[1]);
184 #define RETURN_SCALE_MV(mv, scale) \
187 VP56mv mv_temp = { -mv.x, -mv.y }; \
188 RETURN_MV(mv_temp); \
194 // previously coded MVs in this neighborhood, using different reference frame
195 for (i = 0; i < 8; i++) {
196 int c = p[i][0] + col, r = p[i][1] + row;
198 if (c >= s->tile_col_start && c < s->cols && r >= 0 && r < s->rows) {
199 VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c];
201 if (mv->ref[0] != ref && mv->ref[0] >= 0)
202 RETURN_SCALE_MV(mv->mv[0],
203 s->s.h.signbias[mv->ref[0]] != s->s.h.signbias[ref]);
204 if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
205 // BUG - libvpx has this condition regardless of whether
206 // we used the first ref MV and pre-scaling
207 AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) {
208 RETURN_SCALE_MV(mv->mv[1], s->s.h.signbias[mv->ref[1]] != s->s.h.signbias[ref]);
213 // MV at this position in previous frame, using different reference frame
214 if (s->s.h.use_last_frame_mvs) {
215 VP9mvrefPair *mv = &s->s.frames[REF_FRAME_MVPAIR].mv[row * s->sb_cols * 8 + col];
217 // no need to await_progress, because we already did that above
218 if (mv->ref[0] != ref && mv->ref[0] >= 0)
219 RETURN_SCALE_MV(mv->mv[0], s->s.h.signbias[mv->ref[0]] != s->s.h.signbias[ref]);
220 if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
221 // BUG - libvpx has this condition regardless of whether
222 // we used the first ref MV and pre-scaling
223 AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) {
224 RETURN_SCALE_MV(mv->mv[1], s->s.h.signbias[mv->ref[1]] != s->s.h.signbias[ref]);
229 clamp_mv(pmv, pmv, s);
232 #undef RETURN_SCALE_MV
235 static av_always_inline int read_mv_component(VP9Context *s, int idx, int hp)
237 int bit, sign = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].sign);
238 int n, c = vp8_rac_get_tree(&s->c, ff_vp9_mv_class_tree,
239 s->prob.p.mv_comp[idx].classes);
241 s->counts.mv_comp[idx].sign[sign]++;
242 s->counts.mv_comp[idx].classes[c]++;
246 for (n = 0, m = 0; m < c; m++) {
247 bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].bits[m]);
249 s->counts.mv_comp[idx].bits[m][bit]++;
252 bit = vp8_rac_get_tree(&s->c, ff_vp9_mv_fp_tree,
253 s->prob.p.mv_comp[idx].fp);
255 s->counts.mv_comp[idx].fp[bit]++;
257 bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].hp);
258 s->counts.mv_comp[idx].hp[bit]++;
262 // bug in libvpx - we count for bw entropy purposes even if the
264 s->counts.mv_comp[idx].hp[1]++;
268 n = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].class0);
269 s->counts.mv_comp[idx].class0[n]++;
270 bit = vp8_rac_get_tree(&s->c, ff_vp9_mv_fp_tree,
271 s->prob.p.mv_comp[idx].class0_fp[n]);
272 s->counts.mv_comp[idx].class0_fp[n][bit]++;
273 n = (n << 3) | (bit << 1);
275 bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].class0_hp);
276 s->counts.mv_comp[idx].class0_hp[bit]++;
280 // bug in libvpx - we count for bw entropy purposes even if the
282 s->counts.mv_comp[idx].class0_hp[1]++;
286 return sign ? -(n + 1) : (n + 1);
289 void ff_vp9_fill_mv(VP9Context *s, VP56mv *mv, int mode, int sb)
293 if (mode == ZEROMV) {
298 // FIXME cache this value and reuse for other subblocks
299 find_ref_mvs(s, &mv[0], b->ref[0], 0, mode == NEARMV,
300 mode == NEWMV ? -1 : sb);
301 // FIXME maybe move this code into find_ref_mvs()
302 if ((mode == NEWMV || sb == -1) &&
303 !(hp = s->s.h.highprecisionmvs &&
304 abs(mv[0].x) < 64 && abs(mv[0].y) < 64)) {
319 enum MVJoint j = vp8_rac_get_tree(&s->c, ff_vp9_mv_joint_tree,
322 s->counts.mv_joint[j]++;
324 mv[0].y += read_mv_component(s, 0, hp);
326 mv[0].x += read_mv_component(s, 1, hp);
330 // FIXME cache this value and reuse for other subblocks
331 find_ref_mvs(s, &mv[1], b->ref[1], 1, mode == NEARMV,
332 mode == NEWMV ? -1 : sb);
333 if ((mode == NEWMV || sb == -1) &&
334 !(hp = s->s.h.highprecisionmvs &&
335 abs(mv[1].x) < 64 && abs(mv[1].y) < 64)) {
350 enum MVJoint j = vp8_rac_get_tree(&s->c, ff_vp9_mv_joint_tree,
353 s->counts.mv_joint[j]++;
355 mv[1].y += read_mv_component(s, 0, hp);
357 mv[1].x += read_mv_component(s, 1, hp);