2 * RV40 decoder motion compensation functions x86-optimised
3 * Copyright (c) 2008 Konstantin Shishkov
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * RV40 decoder motion compensation functions x86-optimised
25 * 2,0 and 0,2 have h264 equivalents.
26 * 3,3 is bugged in the rv40 format and maps to _xy2 version
29 #include "libavcodec/x86/dsputil_mmx.h"
30 #include "libavcodec/rv34dsp.h"
32 void ff_put_rv40_chroma_mc8_mmx (uint8_t *dst, uint8_t *src,
33 int stride, int h, int x, int y);
34 void ff_avg_rv40_chroma_mc8_mmx2 (uint8_t *dst, uint8_t *src,
35 int stride, int h, int x, int y);
36 void ff_avg_rv40_chroma_mc8_3dnow(uint8_t *dst, uint8_t *src,
37 int stride, int h, int x, int y);
39 void ff_put_rv40_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
40 int stride, int h, int x, int y);
41 void ff_avg_rv40_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src,
42 int stride, int h, int x, int y);
43 void ff_avg_rv40_chroma_mc4_3dnow(uint8_t *dst, uint8_t *src,
44 int stride, int h, int x, int y);
46 #define DECLARE_WEIGHT(opt) \
47 void ff_rv40_weight_func_rnd_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \
48 int w1, int w2, ptrdiff_t stride); \
49 void ff_rv40_weight_func_rnd_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \
50 int w1, int w2, ptrdiff_t stride); \
51 void ff_rv40_weight_func_nornd_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \
52 int w1, int w2, ptrdiff_t stride); \
53 void ff_rv40_weight_func_nornd_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \
54 int w1, int w2, ptrdiff_t stride);
61 * Define one qpel function.
62 * LOOPSIZE must be already set to the number of pixels processed per
63 * iteration in the inner loop of the called functions.
64 * COFF(x) must be already defined so as to provide the offset into any
65 * array of coeffs used by the called function for the qpel position x.
67 #define QPEL_FUNC_DECL(OP, SIZE, PH, PV, OPT) \
68 static void OP ## rv40_qpel ##SIZE ##_mc ##PH ##PV ##OPT(uint8_t *dst, \
74 DECLARE_ALIGNED(16, uint8_t, tmp)[SIZE * (SIZE + 5)]; \
75 uint8_t *tmpptr = tmp + SIZE * 2; \
78 for (i = 0; i < SIZE; i += LOOPSIZE) \
79 ff_put_rv40_qpel_h ##OPT(tmp + i, SIZE, src + i, stride, \
80 SIZE + 5, HCOFF(PH)); \
81 for (i = 0; i < SIZE; i += LOOPSIZE) \
82 ff_ ##OP ##rv40_qpel_v ##OPT(dst + i, stride, tmpptr + i, \
83 SIZE, SIZE, VCOFF(PV)); \
85 for (i = 0; i < SIZE; i += LOOPSIZE) \
86 ff_ ##OP ##rv40_qpel_v ## OPT(dst + i, stride, src + i, \
87 stride, SIZE, VCOFF(PV)); \
89 for (i = 0; i < SIZE; i += LOOPSIZE) \
90 ff_ ##OP ##rv40_qpel_h ## OPT(dst + i, stride, src + i, \
91 stride, SIZE, HCOFF(PH)); \
95 /** Declare functions for sizes 8 and 16 and given operations
96 * and qpel position. */
97 #define QPEL_FUNCS_DECL(OP, PH, PV, OPT) \
98 QPEL_FUNC_DECL(OP, 8, PH, PV, OPT) \
99 QPEL_FUNC_DECL(OP, 16, PH, PV, OPT)
101 /** Declare all functions for all sizes and qpel positions */
102 #define QPEL_MC_DECL(OP, OPT) \
103 void ff_ ##OP ##rv40_qpel_h ##OPT(uint8_t *dst, ptrdiff_t dstStride, \
104 const uint8_t *src, \
105 ptrdiff_t srcStride, \
107 void ff_ ##OP ##rv40_qpel_v ##OPT(uint8_t *dst, ptrdiff_t dstStride, \
108 const uint8_t *src, \
109 ptrdiff_t srcStride, \
111 QPEL_FUNCS_DECL(OP, 0, 1, OPT) \
112 QPEL_FUNCS_DECL(OP, 0, 3, OPT) \
113 QPEL_FUNCS_DECL(OP, 1, 0, OPT) \
114 QPEL_FUNCS_DECL(OP, 1, 1, OPT) \
115 QPEL_FUNCS_DECL(OP, 1, 2, OPT) \
116 QPEL_FUNCS_DECL(OP, 1, 3, OPT) \
117 QPEL_FUNCS_DECL(OP, 2, 1, OPT) \
118 QPEL_FUNCS_DECL(OP, 2, 2, OPT) \
119 QPEL_FUNCS_DECL(OP, 2, 3, OPT) \
120 QPEL_FUNCS_DECL(OP, 3, 0, OPT) \
121 QPEL_FUNCS_DECL(OP, 3, 1, OPT) \
122 QPEL_FUNCS_DECL(OP, 3, 2, OPT)
126 #define HCOFF(x) (32 * (x - 1))
127 #define VCOFF(x) (32 * (x - 1))
128 QPEL_MC_DECL(put_, _ssse3)
129 QPEL_MC_DECL(avg_, _ssse3)
135 #define HCOFF(x) (64 * (x - 1))
136 #define VCOFF(x) (64 * (x - 1))
137 QPEL_MC_DECL(put_, _sse2)
138 QPEL_MC_DECL(avg_, _sse2)
145 #define HCOFF(x) (64 * (x - 1))
146 #define VCOFF(x) (64 * (x - 1))
148 QPEL_MC_DECL(put_, _mmx)
150 #define ff_put_rv40_qpel_h_mmx2 ff_put_rv40_qpel_h_mmx
151 #define ff_put_rv40_qpel_v_mmx2 ff_put_rv40_qpel_v_mmx
152 QPEL_MC_DECL(avg_, _mmx2)
154 #define ff_put_rv40_qpel_h_3dnow ff_put_rv40_qpel_h_mmx
155 #define ff_put_rv40_qpel_v_3dnow ff_put_rv40_qpel_v_mmx
156 QPEL_MC_DECL(avg_, _3dnow)
160 /** Set one function */
161 #define QPEL_FUNC_SET(OP, SIZE, PH, PV, OPT) \
162 c-> OP ## pixels_tab[2 - SIZE / 8][4 * PV + PH] = OP ## rv40_qpel ##SIZE ## _mc ##PH ##PV ##OPT;
164 /** Set functions put and avg for sizes 8 and 16 and a given qpel position */
165 #define QPEL_FUNCS_SET(OP, PH, PV, OPT) \
166 QPEL_FUNC_SET(OP, 8, PH, PV, OPT) \
167 QPEL_FUNC_SET(OP, 16, PH, PV, OPT)
169 /** Set all functions for all sizes and qpel positions */
170 #define QPEL_MC_SET(OP, OPT) \
171 QPEL_FUNCS_SET (OP, 0, 1, OPT) \
172 QPEL_FUNCS_SET (OP, 0, 3, OPT) \
173 QPEL_FUNCS_SET (OP, 1, 0, OPT) \
174 QPEL_FUNCS_SET (OP, 1, 1, OPT) \
175 QPEL_FUNCS_SET (OP, 1, 2, OPT) \
176 QPEL_FUNCS_SET (OP, 1, 3, OPT) \
177 QPEL_FUNCS_SET (OP, 2, 1, OPT) \
178 QPEL_FUNCS_SET (OP, 2, 2, OPT) \
179 QPEL_FUNCS_SET (OP, 2, 3, OPT) \
180 QPEL_FUNCS_SET (OP, 3, 0, OPT) \
181 QPEL_FUNCS_SET (OP, 3, 1, OPT) \
182 QPEL_FUNCS_SET (OP, 3, 2, OPT)
185 void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp)
188 int mm_flags = av_get_cpu_flags();
190 if (mm_flags & AV_CPU_FLAG_MMX) {
191 c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_mmx;
192 c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_mmx;
193 c->put_pixels_tab[0][15] = ff_put_rv40_qpel16_mc33_mmx;
194 c->put_pixels_tab[1][15] = ff_put_rv40_qpel8_mc33_mmx;
195 c->avg_pixels_tab[0][15] = ff_avg_rv40_qpel16_mc33_mmx;
196 c->avg_pixels_tab[1][15] = ff_avg_rv40_qpel8_mc33_mmx;
198 QPEL_MC_SET(put_, _mmx)
201 if (mm_flags & AV_CPU_FLAG_MMX2) {
202 c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_mmx2;
203 c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_mmx2;
204 c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_mmx2;
205 c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_mmx2;
206 c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_mmx2;
207 c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_mmx2;
209 QPEL_MC_SET(avg_, _mmx2)
211 } else if (mm_flags & AV_CPU_FLAG_3DNOW) {
212 c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_3dnow;
213 c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_3dnow;
215 QPEL_MC_SET(avg_, _3dnow)
218 if (mm_flags & AV_CPU_FLAG_SSE2) {
219 c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_sse2;
220 c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_sse2;
221 c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_sse2;
222 c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_sse2;
223 QPEL_MC_SET(put_, _sse2)
224 QPEL_MC_SET(avg_, _sse2)
226 if (mm_flags & AV_CPU_FLAG_SSSE3) {
227 c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_ssse3;
228 c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_ssse3;
229 c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_ssse3;
230 c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_ssse3;
231 QPEL_MC_SET(put_, _ssse3)
232 QPEL_MC_SET(avg_, _ssse3)