]> git.sesse.net Git - ffmpeg/commitdiff
avcodec: loongson3 optimized h264dsp weighted mc with mmi
author周晓勇 <zhouxiaoyong@loongson.cn>
Wed, 27 May 2015 03:11:29 +0000 (11:11 +0800)
committerMichael Niedermayer <michaelni@gmx.at>
Wed, 27 May 2015 11:01:54 +0000 (13:01 +0200)
Signed-off-by: ZhouXiaoyong <zhouxiaoyong@loongson.cn>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
libavcodec/mips/Makefile
libavcodec/mips/h264dsp_init_mips.c
libavcodec/mips/h264dsp_mips.h
libavcodec/mips/h264dsp_mmi.c [new file with mode: 0644]

index eaedd7f8f52c6ea75d5c6647344aed68b099a2bf..25813e7adfd10e19bc6346370213bf3cf8bf4d4b 100644 (file)
@@ -22,3 +22,4 @@ OBJS-$(CONFIG_HEVC_DECODER)               += mips/hevcdsp_init_mips.o
 OBJS-$(CONFIG_H264DSP)                    += mips/h264dsp_init_mips.o
 MSA-OBJS-$(CONFIG_HEVC_DECODER)           += mips/hevcdsp_msa.o
 MSA-OBJS-$(CONFIG_H264DSP)                += mips/h264dsp_msa.o
+LOONGSON3-OBJS-$(CONFIG_H264DSP)          += mips/h264dsp_mmi.o
index 8d3d76085f6a9d4fa262800d9e658178bf9b917f..d9182f28a5ed5a28c5d117017cc5b1a56e35436b 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2015 Parag Salasakar (Parag.Salasakar@imgtec.com)
+ * Copyright (c) 2015 Zhou Xiaoyong <zhouxiaoyong@loongson.cn>
  *
  * This file is part of FFmpeg.
  *
@@ -65,10 +66,30 @@ static av_cold void h264dsp_init_msa(H264DSPContext *c,
 }
 #endif  // #if HAVE_MSA
 
+#if HAVE_LOONGSON3
+static av_cold void h264dsp_init_mmi(H264DSPContext * c,
+                                     const int bit_depth,
+                                     const int chroma_format_idc)
+{
+    if (bit_depth == 8) {
+        c->weight_h264_pixels_tab[0] = ff_h264_weight_pixels16_8_mmi;
+        c->weight_h264_pixels_tab[1] = ff_h264_weight_pixels8_8_mmi;
+        c->weight_h264_pixels_tab[2] = ff_h264_weight_pixels4_8_mmi;
+
+        c->biweight_h264_pixels_tab[0] = ff_h264_biweight_pixels16_8_mmi;
+        c->biweight_h264_pixels_tab[1] = ff_h264_biweight_pixels8_8_mmi;
+        c->biweight_h264_pixels_tab[2] = ff_h264_biweight_pixels4_8_mmi;
+    }
+}
+#endif /* HAVE_LOONGSON3 */
+
 av_cold void ff_h264dsp_init_mips(H264DSPContext *c, const int bit_depth,
                                   const int chroma_format_idc)
 {
 #if HAVE_MSA
     h264dsp_init_msa(c, bit_depth, chroma_format_idc);
 #endif  // #if HAVE_MSA
+#if HAVE_LOONGSON3
+    h264dsp_init_mmi(c, bit_depth, chroma_format_idc);
+#endif /* HAVE_LOONGSON3 */
 }
index df9b0b29d7345d6f46989894ab08ee0ae19fce8d..319f6d3bbd51b822a5ea3971c6a97409e5a6b42f 100644 (file)
@@ -68,4 +68,20 @@ void ff_weight_h264_pixels8_8_msa(uint8_t *src, int stride, int height,
 void ff_weight_h264_pixels4_8_msa(uint8_t *src, int stride, int height,
                                   int log2_denom, int weight, int offset);
 
+void ff_h264_weight_pixels16_8_mmi(uint8_t *block, int stride, int height,
+        int log2_denom, int weight, int offset);
+void ff_h264_biweight_pixels16_8_mmi(uint8_t *dst, uint8_t *src,
+        int stride, int height, int log2_denom, int weightd, int weights,
+        int offset);
+void ff_h264_weight_pixels8_8_mmi(uint8_t *block, int stride, int height,
+        int log2_denom, int weight, int offset);
+void ff_h264_biweight_pixels8_8_mmi(uint8_t *dst, uint8_t *src,
+        int stride, int height, int log2_denom, int weightd, int weights,
+        int offset);
+void ff_h264_weight_pixels4_8_mmi(uint8_t *block, int stride, int height,
+        int log2_denom, int weight, int offset);
+void ff_h264_biweight_pixels4_8_mmi(uint8_t *dst, uint8_t *src,
+        int stride, int height, int log2_denom, int weightd, int weights,
+        int offset);
+
 #endif  // #ifndef H264_DSP_MIPS_H
diff --git a/libavcodec/mips/h264dsp_mmi.c b/libavcodec/mips/h264dsp_mmi.c
new file mode 100644 (file)
index 0000000..641cd2f
--- /dev/null
@@ -0,0 +1,278 @@
+/*
+ * Loongson SIMD optimized h264dsp
+ *
+ * Copyright (c) 2015 Loongson Technology Corporation Limited
+ * Copyright (c) 2015 Zhou Xiaoyong <zhouxiaoyong@loongson.cn>
+ *                    Zhang Shuangshuang <zhangshuangshuang@ict.ac.cn>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavcodec/bit_depth_template.c"
+#include "h264dsp_mips.h"
+
+void ff_h264_weight_pixels16_8_mmi(uint8_t *block, int stride,
+        int height, int log2_denom, int weight, int offset)
+{
+    int y;
+
+    offset <<= log2_denom;
+
+    if (log2_denom)
+        offset += 1 << (log2_denom - 1);
+
+    for (y=0; y<height; y++, block+=stride) {
+        __asm__ volatile (
+            "ldc1 $f2, %0                   \r\n"
+            "ldc1 $f4, %1                   \r\n"
+            "dmtc1 $0, $f20                 \r\n"
+            "mtc1 %2, $f6                   \r\n"
+            "mtc1 %3, $f8                   \r\n"
+            "mtc1 %4, $f10                  \r\n"
+            "pshufh $f6, $f6, $f20          \r\n"
+            "pshufh $f8, $f8, $f20          \r\n"
+            "punpckhbh $f14, $f2, $f20      \r\n"
+            "punpckhbh $f16, $f4, $f20      \r\n"
+            "punpcklbh $f2, $f2, $f20       \r\n"
+            "punpcklbh $f4, $f4, $f20       \r\n"
+            "pmullh $f14, $f14, $f6         \r\n"
+            "pmullh $f16, $f16, $f6         \r\n"
+            "pmullh $f2, $f2, $f6           \r\n"
+            "pmullh $f4, $f4, $f6           \r\n"
+            "paddsh $f14, $f14, $f8         \r\n"
+            "paddsh $f16, $f16, $f8         \r\n"
+            "paddsh $f2, $f2, $f8           \r\n"
+            "paddsh $f4, $f4, $f8           \r\n"
+            "psrah $f14, $f14, $f10         \r\n"
+            "psrah $f16, $f16, $f10         \r\n"
+            "psrah $f2, $f2, $f10           \r\n"
+            "psrah $f4, $f4, $f10           \r\n"
+            "packushb $f2, $f2, $f14        \r\n"
+            "packushb $f4, $f4, $f16        \r\n"
+            "sdc1 $f2, %0                   \r\n"
+            "sdc1 $f4, %1                   \r\n"
+            : "=m"(*block),"=m"(*(block + 8))
+            : "r"(weight),"r"(offset),"r"(log2_denom)
+        );
+    }
+}
+
+void ff_h264_biweight_pixels16_8_mmi(uint8_t *dst, uint8_t *src,
+        int stride, int height, int log2_denom, int weightd, int weights,
+        int offset)
+{
+    int y;
+
+    offset = ((offset + 1) | 1) << log2_denom;
+
+    for (y=0; y<height; y++, dst+=stride, src+=stride) {
+        __asm__ volatile (
+            "ldc1 $f2, %2                   \r\n"
+            "ldc1 $f4, %3                   \r\n"
+            "dmtc1 $0, $f20                 \r\n"
+            "mtc1 %6, $f6                   \r\n"
+            "mtc1 %7, $f8                   \r\n"
+            "mtc1 %8, $f10                  \r\n"
+            "mtc1 %9, $f12                  \r\n"
+            "pshufh $f6, $f6, $f20          \r\n"
+            "pshufh $f8, $f8, $f20          \r\n"
+            "pshufh $f10, $f10, $f20        \r\n"
+            "punpckhbh $f14, $f2, $f20      \r\n"
+            "punpckhbh $f16, $f4, $f20      \r\n"
+            "punpcklbh $f2, $f2, $f20       \r\n"
+            "punpcklbh $f4, $f4, $f20       \r\n"
+            "pmullh $f14, $f14, $f6         \r\n"
+            "pmullh $f16, $f16, $f8         \r\n"
+            "pmullh $f2, $f2, $f6           \r\n"
+            "pmullh $f4, $f4, $f8           \r\n"
+            "paddsh $f14, $f14, $f10        \r\n"
+            "paddsh $f2, $f2, $f10          \r\n"
+            "paddsh $f14, $f14, $f16        \r\n"
+            "paddsh $f2, $f2, $f4           \r\n"
+            "psrah $f14, $f14, $f12         \r\n"
+            "psrah $f2, $f2, $f12           \r\n"
+            "packushb $f2, $f2, $f14        \r\n"
+            "sdc1 $f2, %0                   \r\n"
+            "ldc1 $f2, %4                   \r\n"
+            "ldc1 $f4, %5                   \r\n"
+            "punpckhbh $f14, $f2, $f20      \r\n"
+            "punpckhbh $f16, $f4, $f20      \r\n"
+            "punpcklbh $f2, $f2, $f20       \r\n"
+            "punpcklbh $f4, $f4, $f20       \r\n"
+            "pmullh $f14, $f14, $f6         \r\n"
+            "pmullh $f16, $f16, $f8         \r\n"
+            "pmullh $f2, $f2, $f6           \r\n"
+            "pmullh $f4, $f4, $f8           \r\n"
+            "paddsh $f14, $f14, $f10        \r\n"
+            "paddsh $f2, $f2, $f10          \r\n"
+            "paddsh $f14, $f14, $f16        \r\n"
+            "paddsh $f2, $f2, $f4           \r\n"
+            "psrah $f14, $f14, $f12         \r\n"
+            "psrah $f2, $f2, $f12           \r\n"
+            "packushb $f2, $f2, $f14        \r\n"
+            "sdc1 $f2, %1                   \r\n"
+            : "=m"(*dst),"=m"(*(dst+8))
+            : "m"(*src),"m"(*dst),"m"(*(src+8)),"m"(*(dst+8)),
+              "r"(weights),"r"(weightd),"r"(offset),"r"(log2_denom+1)
+        );
+    }
+}
+
+void ff_h264_weight_pixels8_8_mmi(uint8_t *block, int stride, int height,
+        int log2_denom, int weight, int offset)
+{
+    int y;
+
+    offset <<= log2_denom;
+
+    if (log2_denom)
+        offset += 1 << (log2_denom - 1);
+
+    for (y=0; y<height; y++, block+=stride) {
+        __asm__ volatile (
+            "ldc1 $f2, %0                   \r\n"
+            "mtc1 %1, $f6                   \r\n"
+            "mtc1 %2, $f8                   \r\n"
+            "mtc1 %3, $f10                  \r\n"
+            "dmtc1 $0, $f20                 \r\n"
+            "pshufh $f6, $f6, $f20          \r\n"
+            "pshufh $f8, $f8, $f20          \r\n"
+            "punpckhbh $f14, $f2, $f20      \r\n"
+            "punpcklbh $f2, $f2, $f20       \r\n"
+            "pmullh $f14, $f14, $f6         \r\n"
+            "pmullh $f2, $f2, $f6           \r\n"
+            "paddsh $f14, $f14, $f8         \r\n"
+            "paddsh $f2, $f2, $f8           \r\n"
+            "psrah $f14, $f14, $f10         \r\n"
+            "psrah $f2, $f2, $f10           \r\n"
+            "packushb $f2, $f2, $f14        \r\n"
+            "sdc1 $f2, %0                   \r\n"
+            : "=m"(*block)
+            : "r"(weight),"r"(offset),"r"(log2_denom)
+        );
+    }
+}
+
+void ff_h264_biweight_pixels8_8_mmi(uint8_t *dst, uint8_t *src,
+        int stride, int height, int log2_denom, int weightd, int weights,
+        int offset)
+{
+    int y;
+
+    offset = ((offset + 1) | 1) << log2_denom;
+
+    for (y=0; y<height; y++, dst+=stride, src+=stride) {
+        __asm__ volatile (
+            "ldc1 $f2, %1                   \r\n"
+            "ldc1 $f4, %2                   \r\n"
+            "dmtc1 $0, $f20                 \r\n"
+            "mtc1 %3, $f6                   \r\n"
+            "mtc1 %4, $f8                   \r\n"
+            "mtc1 %5, $f10                  \r\n"
+            "mtc1 %6, $f12                  \r\n"
+            "pshufh $f6, $f6, $f20          \r\n"
+            "pshufh $f8, $f8, $f20          \r\n"
+            "pshufh $f10, $f10, $f20        \r\n"
+            "punpckhbh $f14, $f2, $f20      \r\n"
+            "punpckhbh $f16, $f4, $f20      \r\n"
+            "punpcklbh $f2, $f2, $f20       \r\n"
+            "punpcklbh $f4, $f4, $f20       \r\n"
+            "pmullh $f14, $f14, $f6         \r\n"
+            "pmullh $f16, $f16, $f8         \r\n"
+            "pmullh $f2, $f2, $f6           \r\n"
+            "pmullh $f4, $f4, $f8           \r\n"
+            "paddsh $f14, $f14, $f10        \r\n"
+            "paddsh $f2, $f2, $f10          \r\n"
+            "paddsh $f14, $f14, $f16        \r\n"
+            "paddsh $f2, $f2, $f4           \r\n"
+            "psrah $f14, $f14, $f12         \r\n"
+            "psrah $f2, $f2, $f12           \r\n"
+            "packushb $f2, $f2, $f14        \r\n"
+            "sdc1 $f2, %0                   \r\n"
+            : "=m"(*dst)
+            : "m"(*src),"m"(*dst),"r"(weights),
+              "r"(weightd),"r"(offset),"r"(log2_denom+1)
+        );
+    }
+}
+
+void ff_h264_weight_pixels4_8_mmi(uint8_t *block, int stride, int height,
+        int log2_denom, int weight, int offset)
+{
+    int y;
+
+    offset <<= log2_denom;
+
+    if (log2_denom)
+        offset += 1 << (log2_denom - 1);
+
+    for (y=0; y<height; y++, block+=stride) {
+        __asm__ volatile (
+            "lwc1 $f2, %0                   \r\n"
+            "mtc1 %1, $f6                   \r\n"
+            "mtc1 %2, $f8                   \r\n"
+            "mtc1 %3, $f10                  \r\n"
+            "dmtc1 $0, $f20                 \r\n"
+            "pshufh $f6, $f6, $f20          \r\n"
+            "pshufh $f8, $f8, $f20          \r\n"
+            "punpcklbh $f2, $f2, $f20       \r\n"
+            "pmullh $f2, $f2, $f6           \r\n"
+            "paddsh $f2, $f2, $f8           \r\n"
+            "psrah $f2, $f2, $f10           \r\n"
+            "packushb $f2, $f2, $f20        \r\n"
+            "swc1 $f2, %0                   \r\n"
+            : "=m"(*block)
+            : "r"(weight),"r"(offset),"r"(log2_denom)
+        );
+    }
+}
+
+void ff_h264_biweight_pixels4_8_mmi(uint8_t *dst, uint8_t *src,
+        int stride, int height, int log2_denom, int weightd, int weights,
+        int offset)
+{
+    int y;
+
+    offset = ((offset + 1) | 1) << log2_denom;
+
+    for (y=0; y<height; y++, dst+=stride, src+=stride) {
+        __asm__ volatile (
+            "lwc1 $f2, %1                   \r\n"
+            "lwc1 $f4, %2                   \r\n"
+            "dmtc1 $0, $f20                 \r\n"
+            "mtc1 %3, $f6                   \r\n"
+            "mtc1 %4, $f8                   \r\n"
+            "mtc1 %5, $f10                  \r\n"
+            "mtc1 %6, $f12                  \r\n"
+            "pshufh $f6, $f6, $f20          \r\n"
+            "pshufh $f8, $f8, $f20          \r\n"
+            "pshufh $f10, $f10, $f20        \r\n"
+            "punpcklbh $f2, $f2, $f20       \r\n"
+            "punpcklbh $f4, $f4, $f20       \r\n"
+            "pmullh $f2, $f2, $f6           \r\n"
+            "pmullh $f4, $f4, $f8           \r\n"
+            "paddsh $f2, $f2, $f10          \r\n"
+            "paddsh $f2, $f2, $f4           \r\n"
+            "psrah $f2, $f2, $f12           \r\n"
+            "packushb $f2, $f2, $f20        \r\n"
+            "swc1 $f2, %0                   \r\n"
+            : "=m"(*dst)
+            : "m"(*src),"m"(*dst),"r"(weights),
+              "r"(weightd),"r"(offset),"r"(log2_denom+1)
+        );
+    }
+}