2 * RV40 decoder motion compensation functions x86-optimised
3 * Copyright (c) 2008 Konstantin Shishkov
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * RV40 decoder motion compensation functions x86-optimised
25 * 2,0 and 0,2 have h264 equivalents.
26 * 3,3 is bugged in the rv40 format and maps to _xy2 version
29 #include "libavcodec/rv34dsp.h"
30 #include "libavutil/mem.h"
31 #include "libavutil/x86/cpu.h"
32 #include "dsputil_mmx.h"
35 void ff_put_rv40_chroma_mc8_mmx (uint8_t *dst, uint8_t *src,
36 int stride, int h, int x, int y);
37 void ff_avg_rv40_chroma_mc8_mmxext(uint8_t *dst, uint8_t *src,
38 int stride, int h, int x, int y);
39 void ff_avg_rv40_chroma_mc8_3dnow(uint8_t *dst, uint8_t *src,
40 int stride, int h, int x, int y);
42 void ff_put_rv40_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
43 int stride, int h, int x, int y);
44 void ff_avg_rv40_chroma_mc4_mmxext(uint8_t *dst, uint8_t *src,
45 int stride, int h, int x, int y);
46 void ff_avg_rv40_chroma_mc4_3dnow(uint8_t *dst, uint8_t *src,
47 int stride, int h, int x, int y);
49 #define DECLARE_WEIGHT(opt) \
50 void ff_rv40_weight_func_rnd_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \
51 int w1, int w2, ptrdiff_t stride); \
52 void ff_rv40_weight_func_rnd_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \
53 int w1, int w2, ptrdiff_t stride); \
54 void ff_rv40_weight_func_nornd_16_##opt(uint8_t *dst, uint8_t *src1, uint8_t *src2, \
55 int w1, int w2, ptrdiff_t stride); \
56 void ff_rv40_weight_func_nornd_8_##opt (uint8_t *dst, uint8_t *src1, uint8_t *src2, \
57 int w1, int w2, ptrdiff_t stride);
58 DECLARE_WEIGHT(mmxext)
64 * Define one qpel function.
65 * LOOPSIZE must be already set to the number of pixels processed per
66 * iteration in the inner loop of the called functions.
67 * COFF(x) must be already defined so as to provide the offset into any
68 * array of coeffs used by the called function for the qpel position x.
70 #define QPEL_FUNC_DECL(OP, SIZE, PH, PV, OPT) \
71 static void OP ## rv40_qpel ##SIZE ##_mc ##PH ##PV ##OPT(uint8_t *dst, \
77 DECLARE_ALIGNED(16, uint8_t, tmp)[SIZE * (SIZE + 5)]; \
78 uint8_t *tmpptr = tmp + SIZE * 2; \
81 for (i = 0; i < SIZE; i += LOOPSIZE) \
82 ff_put_rv40_qpel_h ##OPT(tmp + i, SIZE, src + i, stride, \
83 SIZE + 5, HCOFF(PH)); \
84 for (i = 0; i < SIZE; i += LOOPSIZE) \
85 ff_ ##OP ##rv40_qpel_v ##OPT(dst + i, stride, tmpptr + i, \
86 SIZE, SIZE, VCOFF(PV)); \
88 for (i = 0; i < SIZE; i += LOOPSIZE) \
89 ff_ ##OP ##rv40_qpel_v ## OPT(dst + i, stride, src + i, \
90 stride, SIZE, VCOFF(PV)); \
92 for (i = 0; i < SIZE; i += LOOPSIZE) \
93 ff_ ##OP ##rv40_qpel_h ## OPT(dst + i, stride, src + i, \
94 stride, SIZE, HCOFF(PH)); \
98 /** Declare functions for sizes 8 and 16 and given operations
99 * and qpel position. */
100 #define QPEL_FUNCS_DECL(OP, PH, PV, OPT) \
101 QPEL_FUNC_DECL(OP, 8, PH, PV, OPT) \
102 QPEL_FUNC_DECL(OP, 16, PH, PV, OPT)
104 /** Declare all functions for all sizes and qpel positions */
105 #define QPEL_MC_DECL(OP, OPT) \
106 void ff_ ##OP ##rv40_qpel_h ##OPT(uint8_t *dst, ptrdiff_t dstStride, \
107 const uint8_t *src, \
108 ptrdiff_t srcStride, \
110 void ff_ ##OP ##rv40_qpel_v ##OPT(uint8_t *dst, ptrdiff_t dstStride, \
111 const uint8_t *src, \
112 ptrdiff_t srcStride, \
114 QPEL_FUNCS_DECL(OP, 0, 1, OPT) \
115 QPEL_FUNCS_DECL(OP, 0, 3, OPT) \
116 QPEL_FUNCS_DECL(OP, 1, 0, OPT) \
117 QPEL_FUNCS_DECL(OP, 1, 1, OPT) \
118 QPEL_FUNCS_DECL(OP, 1, 2, OPT) \
119 QPEL_FUNCS_DECL(OP, 1, 3, OPT) \
120 QPEL_FUNCS_DECL(OP, 2, 1, OPT) \
121 QPEL_FUNCS_DECL(OP, 2, 2, OPT) \
122 QPEL_FUNCS_DECL(OP, 2, 3, OPT) \
123 QPEL_FUNCS_DECL(OP, 3, 0, OPT) \
124 QPEL_FUNCS_DECL(OP, 3, 1, OPT) \
125 QPEL_FUNCS_DECL(OP, 3, 2, OPT)
129 #define HCOFF(x) (32 * (x - 1))
130 #define VCOFF(x) (32 * (x - 1))
131 QPEL_MC_DECL(put_, _ssse3)
132 QPEL_MC_DECL(avg_, _ssse3)
138 #define HCOFF(x) (64 * (x - 1))
139 #define VCOFF(x) (64 * (x - 1))
140 QPEL_MC_DECL(put_, _sse2)
141 QPEL_MC_DECL(avg_, _sse2)
148 #define HCOFF(x) (64 * (x - 1))
149 #define VCOFF(x) (64 * (x - 1))
151 QPEL_MC_DECL(put_, _mmx)
153 #define ff_put_rv40_qpel_h_mmxext ff_put_rv40_qpel_h_mmx
154 #define ff_put_rv40_qpel_v_mmxext ff_put_rv40_qpel_v_mmx
155 QPEL_MC_DECL(avg_, _mmxext)
157 #define ff_put_rv40_qpel_h_3dnow ff_put_rv40_qpel_h_mmx
158 #define ff_put_rv40_qpel_v_3dnow ff_put_rv40_qpel_v_mmx
159 QPEL_MC_DECL(avg_, _3dnow)
163 /** Set one function */
164 #define QPEL_FUNC_SET(OP, SIZE, PH, PV, OPT) \
165 c-> OP ## pixels_tab[2 - SIZE / 8][4 * PV + PH] = OP ## rv40_qpel ##SIZE ## _mc ##PH ##PV ##OPT;
167 /** Set functions put and avg for sizes 8 and 16 and a given qpel position */
168 #define QPEL_FUNCS_SET(OP, PH, PV, OPT) \
169 QPEL_FUNC_SET(OP, 8, PH, PV, OPT) \
170 QPEL_FUNC_SET(OP, 16, PH, PV, OPT)
172 /** Set all functions for all sizes and qpel positions */
173 #define QPEL_MC_SET(OP, OPT) \
174 QPEL_FUNCS_SET (OP, 0, 1, OPT) \
175 QPEL_FUNCS_SET (OP, 0, 3, OPT) \
176 QPEL_FUNCS_SET (OP, 1, 0, OPT) \
177 QPEL_FUNCS_SET (OP, 1, 1, OPT) \
178 QPEL_FUNCS_SET (OP, 1, 2, OPT) \
179 QPEL_FUNCS_SET (OP, 1, 3, OPT) \
180 QPEL_FUNCS_SET (OP, 2, 1, OPT) \
181 QPEL_FUNCS_SET (OP, 2, 2, OPT) \
182 QPEL_FUNCS_SET (OP, 2, 3, OPT) \
183 QPEL_FUNCS_SET (OP, 3, 0, OPT) \
184 QPEL_FUNCS_SET (OP, 3, 1, OPT) \
185 QPEL_FUNCS_SET (OP, 3, 2, OPT)
188 #endif /* HAVE_YASM */
190 void ff_rv40dsp_init_x86(RV34DSPContext *c, DSPContext *dsp)
193 int mm_flags = av_get_cpu_flags();
195 if (EXTERNAL_MMX(mm_flags)) {
196 c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_mmx;
197 c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_mmx;
199 c->put_pixels_tab[0][15] = ff_put_rv40_qpel16_mc33_mmx;
200 c->put_pixels_tab[1][15] = ff_put_rv40_qpel8_mc33_mmx;
201 c->avg_pixels_tab[0][15] = ff_avg_rv40_qpel16_mc33_mmx;
202 c->avg_pixels_tab[1][15] = ff_avg_rv40_qpel8_mc33_mmx;
203 #endif /* HAVE_MMX_INLINE */
205 QPEL_MC_SET(put_, _mmx)
208 if (EXTERNAL_MMXEXT(mm_flags)) {
209 c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_mmxext;
210 c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_mmxext;
211 c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_mmxext;
212 c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_mmxext;
213 c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_mmxext;
214 c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_mmxext;
216 QPEL_MC_SET(avg_, _mmxext)
218 } else if (EXTERNAL_AMD3DNOW(mm_flags)) {
219 c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_3dnow;
220 c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_3dnow;
222 QPEL_MC_SET(avg_, _3dnow)
225 if (EXTERNAL_SSE2(mm_flags)) {
226 c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_sse2;
227 c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_sse2;
228 c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_sse2;
229 c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_sse2;
230 QPEL_MC_SET(put_, _sse2)
231 QPEL_MC_SET(avg_, _sse2)
233 if (EXTERNAL_SSSE3(mm_flags)) {
234 c->rv40_weight_pixels_tab[0][0] = ff_rv40_weight_func_rnd_16_ssse3;
235 c->rv40_weight_pixels_tab[0][1] = ff_rv40_weight_func_rnd_8_ssse3;
236 c->rv40_weight_pixels_tab[1][0] = ff_rv40_weight_func_nornd_16_ssse3;
237 c->rv40_weight_pixels_tab[1][1] = ff_rv40_weight_func_nornd_8_ssse3;
238 QPEL_MC_SET(put_, _ssse3)
239 QPEL_MC_SET(avg_, _ssse3)
241 #endif /* HAVE_YASM */