2 * RV40 decoder motion compensation functions
3 * Copyright (c) 2008 Konstantin Shishkov
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * RV40 decoder motion compensation functions
31 #define RV40_LOWPASS(OPNAME, OP) \
32 static av_unused void OPNAME ## rv40_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,\
33 const int h, const int C1, const int C2, const int SHIFT){\
34 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
36 for(i = 0; i < h; i++)\
38 OP(dst[0], (src[-2] + src[ 3] - 5*(src[-1]+src[2]) + src[0]*C1 + src[1]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
39 OP(dst[1], (src[-1] + src[ 4] - 5*(src[ 0]+src[3]) + src[1]*C1 + src[2]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
40 OP(dst[2], (src[ 0] + src[ 5] - 5*(src[ 1]+src[4]) + src[2]*C1 + src[3]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
41 OP(dst[3], (src[ 1] + src[ 6] - 5*(src[ 2]+src[5]) + src[3]*C1 + src[4]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
42 OP(dst[4], (src[ 2] + src[ 7] - 5*(src[ 3]+src[6]) + src[4]*C1 + src[5]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
43 OP(dst[5], (src[ 3] + src[ 8] - 5*(src[ 4]+src[7]) + src[5]*C1 + src[6]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
44 OP(dst[6], (src[ 4] + src[ 9] - 5*(src[ 5]+src[8]) + src[6]*C1 + src[7]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
45 OP(dst[7], (src[ 5] + src[10] - 5*(src[ 6]+src[9]) + src[7]*C1 + src[8]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
51 static void OPNAME ## rv40_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,\
52 const int w, const int C1, const int C2, const int SHIFT){\
53 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
55 for(i = 0; i < w; i++)\
57 const int srcB = src[-2*srcStride];\
58 const int srcA = src[-1*srcStride];\
59 const int src0 = src[0 *srcStride];\
60 const int src1 = src[1 *srcStride];\
61 const int src2 = src[2 *srcStride];\
62 const int src3 = src[3 *srcStride];\
63 const int src4 = src[4 *srcStride];\
64 const int src5 = src[5 *srcStride];\
65 const int src6 = src[6 *srcStride];\
66 const int src7 = src[7 *srcStride];\
67 const int src8 = src[8 *srcStride];\
68 const int src9 = src[9 *srcStride];\
69 const int src10 = src[10*srcStride];\
70 OP(dst[0*dstStride], (srcB + src3 - 5*(srcA+src2) + src0*C1 + src1*C2 + (1<<(SHIFT-1))) >> SHIFT);\
71 OP(dst[1*dstStride], (srcA + src4 - 5*(src0+src3) + src1*C1 + src2*C2 + (1<<(SHIFT-1))) >> SHIFT);\
72 OP(dst[2*dstStride], (src0 + src5 - 5*(src1+src4) + src2*C1 + src3*C2 + (1<<(SHIFT-1))) >> SHIFT);\
73 OP(dst[3*dstStride], (src1 + src6 - 5*(src2+src5) + src3*C1 + src4*C2 + (1<<(SHIFT-1))) >> SHIFT);\
74 OP(dst[4*dstStride], (src2 + src7 - 5*(src3+src6) + src4*C1 + src5*C2 + (1<<(SHIFT-1))) >> SHIFT);\
75 OP(dst[5*dstStride], (src3 + src8 - 5*(src4+src7) + src5*C1 + src6*C2 + (1<<(SHIFT-1))) >> SHIFT);\
76 OP(dst[6*dstStride], (src4 + src9 - 5*(src5+src8) + src6*C1 + src7*C2 + (1<<(SHIFT-1))) >> SHIFT);\
77 OP(dst[7*dstStride], (src5 + src10 - 5*(src6+src9) + src7*C1 + src8*C2 + (1<<(SHIFT-1))) >> SHIFT);\
83 static void OPNAME ## rv40_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,\
84 const int w, const int C1, const int C2, const int SHIFT){\
85 OPNAME ## rv40_qpel8_v_lowpass(dst , src , dstStride, srcStride, 8, C1, C2, SHIFT);\
86 OPNAME ## rv40_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride, 8, C1, C2, SHIFT);\
89 OPNAME ## rv40_qpel8_v_lowpass(dst , src , dstStride, srcStride, w-8, C1, C2, SHIFT);\
90 OPNAME ## rv40_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride, w-8, C1, C2, SHIFT);\
93 static void OPNAME ## rv40_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,\
94 const int h, const int C1, const int C2, const int SHIFT){\
95 OPNAME ## rv40_qpel8_h_lowpass(dst , src , dstStride, srcStride, 8, C1, C2, SHIFT);\
96 OPNAME ## rv40_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride, 8, C1, C2, SHIFT);\
99 OPNAME ## rv40_qpel8_h_lowpass(dst , src , dstStride, srcStride, h-8, C1, C2, SHIFT);\
100 OPNAME ## rv40_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride, h-8, C1, C2, SHIFT);\
104 #define RV40_MC(OPNAME, SIZE) \
105 static void OPNAME ## rv40_qpel ## SIZE ## _mc10_c(uint8_t *dst, uint8_t *src, int stride){\
106 OPNAME ## rv40_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride, SIZE, 52, 20, 6);\
109 static void OPNAME ## rv40_qpel ## SIZE ## _mc30_c(uint8_t *dst, uint8_t *src, int stride){\
110 OPNAME ## rv40_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride, SIZE, 20, 52, 6);\
113 static void OPNAME ## rv40_qpel ## SIZE ## _mc01_c(uint8_t *dst, uint8_t *src, int stride){\
114 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, src, stride, stride, SIZE, 52, 20, 6);\
117 static void OPNAME ## rv40_qpel ## SIZE ## _mc11_c(uint8_t *dst, uint8_t *src, int stride){\
118 uint8_t full[SIZE*(SIZE+5)];\
119 uint8_t * const full_mid = full + SIZE*2;\
120 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\
121 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\
124 static void OPNAME ## rv40_qpel ## SIZE ## _mc21_c(uint8_t *dst, uint8_t *src, int stride){\
125 uint8_t full[SIZE*(SIZE+5)];\
126 uint8_t * const full_mid = full + SIZE*2;\
127 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\
128 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\
131 static void OPNAME ## rv40_qpel ## SIZE ## _mc31_c(uint8_t *dst, uint8_t *src, int stride){\
132 uint8_t full[SIZE*(SIZE+5)];\
133 uint8_t * const full_mid = full + SIZE*2;\
134 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 52, 6);\
135 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\
138 static void OPNAME ## rv40_qpel ## SIZE ## _mc12_c(uint8_t *dst, uint8_t *src, int stride){\
139 uint8_t full[SIZE*(SIZE+5)];\
140 uint8_t * const full_mid = full + SIZE*2;\
141 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\
142 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\
145 static void OPNAME ## rv40_qpel ## SIZE ## _mc22_c(uint8_t *dst, uint8_t *src, int stride){\
146 uint8_t full[SIZE*(SIZE+5)];\
147 uint8_t * const full_mid = full + SIZE*2;\
148 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\
149 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\
152 static void OPNAME ## rv40_qpel ## SIZE ## _mc32_c(uint8_t *dst, uint8_t *src, int stride){\
153 uint8_t full[SIZE*(SIZE+5)];\
154 uint8_t * const full_mid = full + SIZE*2;\
155 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 52, 6);\
156 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\
159 static void OPNAME ## rv40_qpel ## SIZE ## _mc03_c(uint8_t *dst, uint8_t *src, int stride){\
160 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, src, stride, stride, SIZE, 20, 52, 6);\
163 static void OPNAME ## rv40_qpel ## SIZE ## _mc13_c(uint8_t *dst, uint8_t *src, int stride){\
164 uint8_t full[SIZE*(SIZE+5)];\
165 uint8_t * const full_mid = full + SIZE*2;\
166 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\
167 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 52, 6);\
170 static void OPNAME ## rv40_qpel ## SIZE ## _mc23_c(uint8_t *dst, uint8_t *src, int stride){\
171 uint8_t full[SIZE*(SIZE+5)];\
172 uint8_t * const full_mid = full + SIZE*2;\
173 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\
174 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 52, 6);\
178 #define op_avg(a, b) a = (((a)+cm[b]+1)>>1)
179 #define op_put(a, b) a = cm[b]
181 RV40_LOWPASS(put_ , op_put)
182 RV40_LOWPASS(avg_ , op_avg)
192 static const int rv40_bias[4][4] = {
199 #define RV40_CHROMA_MC(OPNAME, OP)\
200 static void OPNAME ## rv40_chroma_mc4_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
201 const int A = (8-x) * (8-y);\
202 const int B = ( x) * (8-y);\
203 const int C = (8-x) * ( y);\
204 const int D = ( x) * ( y);\
206 int bias = rv40_bias[y>>1][x>>1];\
208 assert(x<8 && y<8 && x>=0 && y>=0);\
211 for(i = 0; i < h; i++){\
212 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + bias));\
213 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + bias));\
214 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + bias));\
215 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + bias));\
220 const int E = B + C;\
221 const int step = C ? stride : 1;\
222 for(i = 0; i < h; i++){\
223 OP(dst[0], (A*src[0] + E*src[step+0] + bias));\
224 OP(dst[1], (A*src[1] + E*src[step+1] + bias));\
225 OP(dst[2], (A*src[2] + E*src[step+2] + bias));\
226 OP(dst[3], (A*src[3] + E*src[step+3] + bias));\
233 static void OPNAME ## rv40_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
234 const int A = (8-x) * (8-y);\
235 const int B = ( x) * (8-y);\
236 const int C = (8-x) * ( y);\
237 const int D = ( x) * ( y);\
239 int bias = rv40_bias[y>>1][x>>1];\
241 assert(x<8 && y<8 && x>=0 && y>=0);\
244 for(i = 0; i < h; i++){\
245 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + bias));\
246 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + bias));\
247 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + bias));\
248 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + bias));\
249 OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + bias));\
250 OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + bias));\
251 OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + bias));\
252 OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + bias));\
257 const int E = B + C;\
258 const int step = C ? stride : 1;\
259 for(i = 0; i < h; i++){\
260 OP(dst[0], (A*src[0] + E*src[step+0] + bias));\
261 OP(dst[1], (A*src[1] + E*src[step+1] + bias));\
262 OP(dst[2], (A*src[2] + E*src[step+2] + bias));\
263 OP(dst[3], (A*src[3] + E*src[step+3] + bias));\
264 OP(dst[4], (A*src[4] + E*src[step+4] + bias));\
265 OP(dst[5], (A*src[5] + E*src[step+5] + bias));\
266 OP(dst[6], (A*src[6] + E*src[step+6] + bias));\
267 OP(dst[7], (A*src[7] + E*src[step+7] + bias));\
274 #define op_avg(a, b) a = (((a)+((b)>>6)+1)>>1)
275 #define op_put(a, b) a = ((b)>>6)
277 RV40_CHROMA_MC(put_, op_put)
278 RV40_CHROMA_MC(avg_, op_avg)
280 #define RV40_WEIGHT_FUNC(size) \
281 static void rv40_weight_func_rnd_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, ptrdiff_t stride)\
285 for (j = 0; j < size; j++) {\
286 for (i = 0; i < size; i++)\
287 dst[i] = (((w2 * src1[i]) >> 9) + ((w1 * src2[i]) >> 9) + 0x10) >> 5;\
293 static void rv40_weight_func_nornd_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, ptrdiff_t stride)\
297 for (j = 0; j < size; j++) {\
298 for (i = 0; i < size; i++)\
299 dst[i] = (w2 * src1[i] + w1 * src2[i] + 0x10) >> 5;\
310 * dither values for deblocking filter - left/top values
312 static const uint8_t rv40_dither_l[16] = {
313 0x40, 0x50, 0x20, 0x60, 0x30, 0x50, 0x40, 0x30,
314 0x50, 0x40, 0x50, 0x30, 0x60, 0x20, 0x50, 0x40
318 * dither values for deblocking filter - right/bottom values
320 static const uint8_t rv40_dither_r[16] = {
321 0x40, 0x30, 0x60, 0x20, 0x50, 0x30, 0x30, 0x40,
322 0x40, 0x40, 0x50, 0x30, 0x20, 0x60, 0x30, 0x40
325 #define CLIP_SYMM(a, b) av_clip(a, -(b), b)
327 * weaker deblocking very similar to the one described in 4.4.2 of JVT-A003r1
329 static av_always_inline void rv40_weak_loop_filter(uint8_t *src,
331 const ptrdiff_t stride,
340 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
343 for (i = 0; i < 4; i++, src += stride) {
344 int diff_p1p0 = src[-2*step] - src[-1*step];
345 int diff_q1q0 = src[ 1*step] - src[ 0*step];
346 int diff_p1p2 = src[-2*step] - src[-3*step];
347 int diff_q1q2 = src[ 1*step] - src[ 2*step];
349 t = src[0*step] - src[-1*step];
353 u = (alpha * FFABS(t)) >> 7;
354 if (u > 3 - (filter_p1 && filter_q1))
358 if (filter_p1 && filter_q1)
359 t += src[-2*step] - src[1*step];
361 diff = CLIP_SYMM((t + 4) >> 3, lim_p0q0);
362 src[-1*step] = cm[src[-1*step] + diff];
363 src[ 0*step] = cm[src[ 0*step] - diff];
365 if (filter_p1 && FFABS(diff_p1p2) <= beta) {
366 t = (diff_p1p0 + diff_p1p2 - diff) >> 1;
367 src[-2*step] = cm[src[-2*step] - CLIP_SYMM(t, lim_p1)];
370 if (filter_q1 && FFABS(diff_q1q2) <= beta) {
371 t = (diff_q1q0 + diff_q1q2 + diff) >> 1;
372 src[ 1*step] = cm[src[ 1*step] - CLIP_SYMM(t, lim_q1)];
377 static void rv40_h_weak_loop_filter(uint8_t *src, const ptrdiff_t stride,
378 const int filter_p1, const int filter_q1,
379 const int alpha, const int beta,
380 const int lim_p0q0, const int lim_q1,
383 rv40_weak_loop_filter(src, stride, 1, filter_p1, filter_q1,
384 alpha, beta, lim_p0q0, lim_q1, lim_p1);
387 static void rv40_v_weak_loop_filter(uint8_t *src, const ptrdiff_t stride,
388 const int filter_p1, const int filter_q1,
389 const int alpha, const int beta,
390 const int lim_p0q0, const int lim_q1,
393 rv40_weak_loop_filter(src, 1, stride, filter_p1, filter_q1,
394 alpha, beta, lim_p0q0, lim_q1, lim_p1);
397 static av_always_inline void rv40_strong_loop_filter(uint8_t *src,
399 const ptrdiff_t stride,
407 for(i = 0; i < 4; i++, src += stride){
408 int sflag, p0, q0, p1, q1;
409 int t = src[0*step] - src[-1*step];
414 sflag = (alpha * FFABS(t)) >> 7;
418 p0 = (25*src[-3*step] + 26*src[-2*step] + 26*src[-1*step] +
419 26*src[ 0*step] + 25*src[ 1*step] +
420 rv40_dither_l[dmode + i]) >> 7;
422 q0 = (25*src[-2*step] + 26*src[-1*step] + 26*src[ 0*step] +
423 26*src[ 1*step] + 25*src[ 2*step] +
424 rv40_dither_r[dmode + i]) >> 7;
427 p0 = av_clip(p0, src[-1*step] - lims, src[-1*step] + lims);
428 q0 = av_clip(q0, src[ 0*step] - lims, src[ 0*step] + lims);
431 p1 = (25*src[-4*step] + 26*src[-3*step] + 26*src[-2*step] + 26*p0 +
432 25*src[ 0*step] + rv40_dither_l[dmode + i]) >> 7;
433 q1 = (25*src[-1*step] + 26*q0 + 26*src[ 1*step] + 26*src[ 2*step] +
434 25*src[ 3*step] + rv40_dither_r[dmode + i]) >> 7;
437 p1 = av_clip(p1, src[-2*step] - lims, src[-2*step] + lims);
438 q1 = av_clip(q1, src[ 1*step] - lims, src[ 1*step] + lims);
447 src[-3*step] = (25*src[-1*step] + 26*src[-2*step] +
448 51*src[-3*step] + 26*src[-4*step] + 64) >> 7;
449 src[ 2*step] = (25*src[ 0*step] + 26*src[ 1*step] +
450 51*src[ 2*step] + 26*src[ 3*step] + 64) >> 7;
455 static void rv40_h_strong_loop_filter(uint8_t *src, const ptrdiff_t stride,
456 const int alpha, const int lims,
457 const int dmode, const int chroma)
459 rv40_strong_loop_filter(src, stride, 1, alpha, lims, dmode, chroma);
462 static void rv40_v_strong_loop_filter(uint8_t *src, const ptrdiff_t stride,
463 const int alpha, const int lims,
464 const int dmode, const int chroma)
466 rv40_strong_loop_filter(src, 1, stride, alpha, lims, dmode, chroma);
469 static av_always_inline int rv40_loop_filter_strength(uint8_t *src,
470 int step, ptrdiff_t stride,
475 int sum_p1p0 = 0, sum_q1q0 = 0, sum_p1p2 = 0, sum_q1q2 = 0;
476 int strong0 = 0, strong1 = 0;
480 for (i = 0, ptr = src; i < 4; i++, ptr += stride) {
481 sum_p1p0 += ptr[-2*step] - ptr[-1*step];
482 sum_q1q0 += ptr[ 1*step] - ptr[ 0*step];
485 *p1 = FFABS(sum_p1p0) < (beta << 2);
486 *q1 = FFABS(sum_q1q0) < (beta << 2);
494 for (i = 0, ptr = src; i < 4; i++, ptr += stride) {
495 sum_p1p2 += ptr[-2*step] - ptr[-3*step];
496 sum_q1q2 += ptr[ 1*step] - ptr[ 2*step];
499 strong0 = *p1 && (FFABS(sum_p1p2) < beta2);
500 strong1 = *q1 && (FFABS(sum_q1q2) < beta2);
502 return strong0 && strong1;
505 static int rv40_h_loop_filter_strength(uint8_t *src, ptrdiff_t stride,
506 int beta, int beta2, int edge,
509 return rv40_loop_filter_strength(src, stride, 1, beta, beta2, edge, p1, q1);
512 static int rv40_v_loop_filter_strength(uint8_t *src, ptrdiff_t stride,
513 int beta, int beta2, int edge,
516 return rv40_loop_filter_strength(src, 1, stride, beta, beta2, edge, p1, q1);
519 av_cold void ff_rv40dsp_init(RV34DSPContext *c, DSPContext* dsp) {
521 ff_rv34dsp_init(c, dsp);
523 c->put_pixels_tab[0][ 0] = dsp->put_h264_qpel_pixels_tab[0][0];
524 c->put_pixels_tab[0][ 1] = put_rv40_qpel16_mc10_c;
525 c->put_pixels_tab[0][ 2] = dsp->put_h264_qpel_pixels_tab[0][2];
526 c->put_pixels_tab[0][ 3] = put_rv40_qpel16_mc30_c;
527 c->put_pixels_tab[0][ 4] = put_rv40_qpel16_mc01_c;
528 c->put_pixels_tab[0][ 5] = put_rv40_qpel16_mc11_c;
529 c->put_pixels_tab[0][ 6] = put_rv40_qpel16_mc21_c;
530 c->put_pixels_tab[0][ 7] = put_rv40_qpel16_mc31_c;
531 c->put_pixels_tab[0][ 8] = dsp->put_h264_qpel_pixels_tab[0][8];
532 c->put_pixels_tab[0][ 9] = put_rv40_qpel16_mc12_c;
533 c->put_pixels_tab[0][10] = put_rv40_qpel16_mc22_c;
534 c->put_pixels_tab[0][11] = put_rv40_qpel16_mc32_c;
535 c->put_pixels_tab[0][12] = put_rv40_qpel16_mc03_c;
536 c->put_pixels_tab[0][13] = put_rv40_qpel16_mc13_c;
537 c->put_pixels_tab[0][14] = put_rv40_qpel16_mc23_c;
538 c->put_pixels_tab[0][15] = ff_put_rv40_qpel16_mc33_c;
539 c->avg_pixels_tab[0][ 0] = dsp->avg_h264_qpel_pixels_tab[0][0];
540 c->avg_pixels_tab[0][ 1] = avg_rv40_qpel16_mc10_c;
541 c->avg_pixels_tab[0][ 2] = dsp->avg_h264_qpel_pixels_tab[0][2];
542 c->avg_pixels_tab[0][ 3] = avg_rv40_qpel16_mc30_c;
543 c->avg_pixels_tab[0][ 4] = avg_rv40_qpel16_mc01_c;
544 c->avg_pixels_tab[0][ 5] = avg_rv40_qpel16_mc11_c;
545 c->avg_pixels_tab[0][ 6] = avg_rv40_qpel16_mc21_c;
546 c->avg_pixels_tab[0][ 7] = avg_rv40_qpel16_mc31_c;
547 c->avg_pixels_tab[0][ 8] = dsp->avg_h264_qpel_pixels_tab[0][8];
548 c->avg_pixels_tab[0][ 9] = avg_rv40_qpel16_mc12_c;
549 c->avg_pixels_tab[0][10] = avg_rv40_qpel16_mc22_c;
550 c->avg_pixels_tab[0][11] = avg_rv40_qpel16_mc32_c;
551 c->avg_pixels_tab[0][12] = avg_rv40_qpel16_mc03_c;
552 c->avg_pixels_tab[0][13] = avg_rv40_qpel16_mc13_c;
553 c->avg_pixels_tab[0][14] = avg_rv40_qpel16_mc23_c;
554 c->avg_pixels_tab[0][15] = ff_avg_rv40_qpel16_mc33_c;
555 c->put_pixels_tab[1][ 0] = dsp->put_h264_qpel_pixels_tab[1][0];
556 c->put_pixels_tab[1][ 1] = put_rv40_qpel8_mc10_c;
557 c->put_pixels_tab[1][ 2] = dsp->put_h264_qpel_pixels_tab[1][2];
558 c->put_pixels_tab[1][ 3] = put_rv40_qpel8_mc30_c;
559 c->put_pixels_tab[1][ 4] = put_rv40_qpel8_mc01_c;
560 c->put_pixels_tab[1][ 5] = put_rv40_qpel8_mc11_c;
561 c->put_pixels_tab[1][ 6] = put_rv40_qpel8_mc21_c;
562 c->put_pixels_tab[1][ 7] = put_rv40_qpel8_mc31_c;
563 c->put_pixels_tab[1][ 8] = dsp->put_h264_qpel_pixels_tab[1][8];
564 c->put_pixels_tab[1][ 9] = put_rv40_qpel8_mc12_c;
565 c->put_pixels_tab[1][10] = put_rv40_qpel8_mc22_c;
566 c->put_pixels_tab[1][11] = put_rv40_qpel8_mc32_c;
567 c->put_pixels_tab[1][12] = put_rv40_qpel8_mc03_c;
568 c->put_pixels_tab[1][13] = put_rv40_qpel8_mc13_c;
569 c->put_pixels_tab[1][14] = put_rv40_qpel8_mc23_c;
570 c->put_pixels_tab[1][15] = ff_put_rv40_qpel8_mc33_c;
571 c->avg_pixels_tab[1][ 0] = dsp->avg_h264_qpel_pixels_tab[1][0];
572 c->avg_pixels_tab[1][ 1] = avg_rv40_qpel8_mc10_c;
573 c->avg_pixels_tab[1][ 2] = dsp->avg_h264_qpel_pixels_tab[1][2];
574 c->avg_pixels_tab[1][ 3] = avg_rv40_qpel8_mc30_c;
575 c->avg_pixels_tab[1][ 4] = avg_rv40_qpel8_mc01_c;
576 c->avg_pixels_tab[1][ 5] = avg_rv40_qpel8_mc11_c;
577 c->avg_pixels_tab[1][ 6] = avg_rv40_qpel8_mc21_c;
578 c->avg_pixels_tab[1][ 7] = avg_rv40_qpel8_mc31_c;
579 c->avg_pixels_tab[1][ 8] = dsp->avg_h264_qpel_pixels_tab[1][8];
580 c->avg_pixels_tab[1][ 9] = avg_rv40_qpel8_mc12_c;
581 c->avg_pixels_tab[1][10] = avg_rv40_qpel8_mc22_c;
582 c->avg_pixels_tab[1][11] = avg_rv40_qpel8_mc32_c;
583 c->avg_pixels_tab[1][12] = avg_rv40_qpel8_mc03_c;
584 c->avg_pixels_tab[1][13] = avg_rv40_qpel8_mc13_c;
585 c->avg_pixels_tab[1][14] = avg_rv40_qpel8_mc23_c;
586 c->avg_pixels_tab[1][15] = ff_avg_rv40_qpel8_mc33_c;
588 c->put_chroma_pixels_tab[0] = put_rv40_chroma_mc8_c;
589 c->put_chroma_pixels_tab[1] = put_rv40_chroma_mc4_c;
590 c->avg_chroma_pixels_tab[0] = avg_rv40_chroma_mc8_c;
591 c->avg_chroma_pixels_tab[1] = avg_rv40_chroma_mc4_c;
593 c->rv40_weight_pixels_tab[0][0] = rv40_weight_func_rnd_16;
594 c->rv40_weight_pixels_tab[0][1] = rv40_weight_func_rnd_8;
595 c->rv40_weight_pixels_tab[1][0] = rv40_weight_func_nornd_16;
596 c->rv40_weight_pixels_tab[1][1] = rv40_weight_func_nornd_8;
598 c->rv40_weak_loop_filter[0] = rv40_h_weak_loop_filter;
599 c->rv40_weak_loop_filter[1] = rv40_v_weak_loop_filter;
600 c->rv40_strong_loop_filter[0] = rv40_h_strong_loop_filter;
601 c->rv40_strong_loop_filter[1] = rv40_v_strong_loop_filter;
602 c->rv40_loop_filter_strength[0] = rv40_h_loop_filter_strength;
603 c->rv40_loop_filter_strength[1] = rv40_v_loop_filter_strength;
606 ff_rv40dsp_init_x86(c, dsp);
608 ff_rv40dsp_init_neon(c, dsp);