*
* Ported from arm/hevcdsp_idct_neon.S by
* Copyright (c) 2020 Reimar Döffinger
+ * Copyright (c) 2020 Josh Dekker
*
* This file is part of FFmpeg.
*
idct_16x16 8
idct_16x16 10
+
+// void ff_hevc_idct_NxN_dc_DEPTH_neon(int16_t *coeffs)
+.macro idct_dc size, bitdepth
+function ff_hevc_idct_\size\()x\size\()_dc_\bitdepth\()_neon, export=1
+ movi v1.8h, #((1 << (14 - \bitdepth))+1)
+ ld1r {v4.8h}, [x0]
+ add v4.8h, v4.8h, v1.8h
+ sshr v0.8h, v4.8h, #(15 - \bitdepth)
+ sshr v1.8h, v4.8h, #(15 - \bitdepth)
+.if \size > 4
+ sshr v2.8h, v4.8h, #(15 - \bitdepth)
+ sshr v3.8h, v4.8h, #(15 - \bitdepth)
+.if \size > 16 /* dc 32x32 */
+ mov x2, #4
+1:
+ subs x2, x2, #1
+.endif
+ add x12, x0, #64
+ mov x13, #128
+.if \size > 8 /* dc 16x16 */
+ st1 {v0.8h-v3.8h}, [x0], x13
+ st1 {v0.8h-v3.8h}, [x12], x13
+ st1 {v0.8h-v3.8h}, [x0], x13
+ st1 {v0.8h-v3.8h}, [x12], x13
+ st1 {v0.8h-v3.8h}, [x0], x13
+ st1 {v0.8h-v3.8h}, [x12], x13
+.endif /* dc 8x8 */
+ st1 {v0.8h-v3.8h}, [x0], x13
+ st1 {v0.8h-v3.8h}, [x12], x13
+.if \size > 16 /* dc 32x32 */
+ bne 1b
+.endif
+.else /* dc 4x4 */
+ st1 {v0.8h-v1.8h}, [x0]
+.endif
+ ret
+endfunc
+.endm
+
+idct_dc 4, 8
+idct_dc 4, 10
+
+idct_dc 8, 8
+idct_dc 8, 10
+
+idct_dc 16, 8
+idct_dc 16, 10
+
+idct_dc 32, 8
+idct_dc 32, 10
void ff_hevc_idct_8x8_10_neon(int16_t *coeffs, int col_limit);
void ff_hevc_idct_16x16_8_neon(int16_t *coeffs, int col_limit);
void ff_hevc_idct_16x16_10_neon(int16_t *coeffs, int col_limit);
+void ff_hevc_idct_4x4_dc_8_neon(int16_t *coeffs);
+void ff_hevc_idct_8x8_dc_8_neon(int16_t *coeffs);
+void ff_hevc_idct_16x16_dc_8_neon(int16_t *coeffs);
+void ff_hevc_idct_32x32_dc_8_neon(int16_t *coeffs);
+void ff_hevc_idct_4x4_dc_10_neon(int16_t *coeffs);
+void ff_hevc_idct_8x8_dc_10_neon(int16_t *coeffs);
+void ff_hevc_idct_16x16_dc_10_neon(int16_t *coeffs);
+void ff_hevc_idct_32x32_dc_10_neon(int16_t *coeffs);
av_cold void ff_hevc_dsp_init_aarch64(HEVCDSPContext *c, const int bit_depth)
{
c->add_residual[3] = ff_hevc_add_residual_32x32_8_neon;
c->idct[1] = ff_hevc_idct_8x8_8_neon;
c->idct[2] = ff_hevc_idct_16x16_8_neon;
+ c->idct_dc[0] = ff_hevc_idct_4x4_dc_8_neon;
+ c->idct_dc[1] = ff_hevc_idct_8x8_dc_8_neon;
+ c->idct_dc[2] = ff_hevc_idct_16x16_dc_8_neon;
+ c->idct_dc[3] = ff_hevc_idct_32x32_dc_8_neon;
}
if (bit_depth == 10) {
c->add_residual[0] = ff_hevc_add_residual_4x4_10_neon;
c->add_residual[3] = ff_hevc_add_residual_32x32_10_neon;
c->idct[1] = ff_hevc_idct_8x8_10_neon;
c->idct[2] = ff_hevc_idct_16x16_10_neon;
+ c->idct_dc[0] = ff_hevc_idct_4x4_dc_10_neon;
+ c->idct_dc[1] = ff_hevc_idct_8x8_dc_10_neon;
+ c->idct_dc[2] = ff_hevc_idct_16x16_dc_10_neon;
+ c->idct_dc[3] = ff_hevc_idct_32x32_dc_10_neon;
}
}