qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 16/46] target/loongarch: Implement xvmadd/xvmsub/xvmaddw{ev/od


From: Song Gao
Subject: [PATCH v2 16/46] target/loongarch: Implement xvmadd/xvmsub/xvmaddw{ev/od}
Date: Fri, 30 Jun 2023 15:58:34 +0800

This patch includes:
- XVMADD.{B/H/W/D};
- XVMSUB.{B/H/W/D};
- XVMADDW{EV/OD}.{H.B/W.H/D.W/Q.D}[U];
- XVMADDW{EV/OD}.{H.BU.B/W.HU.H/D.WU.W/Q.DU.D}.

Signed-off-by: Song Gao <gaosong@loongson.cn>
---
 target/loongarch/disas.c                     | 34 ++++++++++
 target/loongarch/insn_trans/trans_lasx.c.inc | 69 ++++++++++++++++++++
 target/loongarch/insn_trans/trans_lsx.c.inc  |  2 +
 target/loongarch/insns.decode                | 34 ++++++++++
 target/loongarch/vec.h                       |  3 +
 target/loongarch/vec_helper.c                | 33 +++++-----
 6 files changed, 160 insertions(+), 15 deletions(-)

diff --git a/target/loongarch/disas.c b/target/loongarch/disas.c
index e5f9a6bcdf..b115fe8315 100644
--- a/target/loongarch/disas.c
+++ b/target/loongarch/disas.c
@@ -1928,6 +1928,40 @@ INSN_LASX(xvmulwod_w_hu_h,   vvv)
 INSN_LASX(xvmulwod_d_wu_w,   vvv)
 INSN_LASX(xvmulwod_q_du_d,   vvv)
 
+INSN_LASX(xvmadd_b,          vvv)
+INSN_LASX(xvmadd_h,          vvv)
+INSN_LASX(xvmadd_w,          vvv)
+INSN_LASX(xvmadd_d,          vvv)
+INSN_LASX(xvmsub_b,          vvv)
+INSN_LASX(xvmsub_h,          vvv)
+INSN_LASX(xvmsub_w,          vvv)
+INSN_LASX(xvmsub_d,          vvv)
+
+INSN_LASX(xvmaddwev_h_b,     vvv)
+INSN_LASX(xvmaddwev_w_h,     vvv)
+INSN_LASX(xvmaddwev_d_w,     vvv)
+INSN_LASX(xvmaddwev_q_d,     vvv)
+INSN_LASX(xvmaddwod_h_b,     vvv)
+INSN_LASX(xvmaddwod_w_h,     vvv)
+INSN_LASX(xvmaddwod_d_w,     vvv)
+INSN_LASX(xvmaddwod_q_d,     vvv)
+INSN_LASX(xvmaddwev_h_bu,    vvv)
+INSN_LASX(xvmaddwev_w_hu,    vvv)
+INSN_LASX(xvmaddwev_d_wu,    vvv)
+INSN_LASX(xvmaddwev_q_du,    vvv)
+INSN_LASX(xvmaddwod_h_bu,    vvv)
+INSN_LASX(xvmaddwod_w_hu,    vvv)
+INSN_LASX(xvmaddwod_d_wu,    vvv)
+INSN_LASX(xvmaddwod_q_du,    vvv)
+INSN_LASX(xvmaddwev_h_bu_b,  vvv)
+INSN_LASX(xvmaddwev_w_hu_h,  vvv)
+INSN_LASX(xvmaddwev_d_wu_w,  vvv)
+INSN_LASX(xvmaddwev_q_du_d,  vvv)
+INSN_LASX(xvmaddwod_h_bu_b,  vvv)
+INSN_LASX(xvmaddwod_w_hu_h,  vvv)
+INSN_LASX(xvmaddwod_d_wu_w,  vvv)
+INSN_LASX(xvmaddwod_q_du_d,  vvv)
+
 INSN_LASX(xvreplgr2vr_b,     vr)
 INSN_LASX(xvreplgr2vr_h,     vr)
 INSN_LASX(xvreplgr2vr_w,     vr)
diff --git a/target/loongarch/insn_trans/trans_lasx.c.inc 
b/target/loongarch/insn_trans/trans_lasx.c.inc
index 1b07d3ce3a..2c2fae91b9 100644
--- a/target/loongarch/insn_trans/trans_lasx.c.inc
+++ b/target/loongarch/insn_trans/trans_lasx.c.inc
@@ -275,6 +275,75 @@ TRANS(xvmulwod_h_bu_b, gvec_vvv, 32, MO_8, do_vmulwod_u_s)
 TRANS(xvmulwod_w_hu_h, gvec_vvv, 32, MO_16, do_vmulwod_u_s)
 TRANS(xvmulwod_d_wu_w, gvec_vvv, 32, MO_32, do_vmulwod_u_s)
 
+TRANS(xvmadd_b, gvec_vvv, 32, MO_8, do_vmadd)
+TRANS(xvmadd_h, gvec_vvv, 32, MO_16, do_vmadd)
+TRANS(xvmadd_w, gvec_vvv, 32, MO_32, do_vmadd)
+TRANS(xvmadd_d, gvec_vvv, 32, MO_64, do_vmadd)
+TRANS(xvmsub_b, gvec_vvv, 32, MO_8, do_vmsub)
+TRANS(xvmsub_h, gvec_vvv, 32, MO_16, do_vmsub)
+TRANS(xvmsub_w, gvec_vvv, 32, MO_32, do_vmsub)
+TRANS(xvmsub_d, gvec_vvv, 32, MO_64, do_vmsub)
+
+TRANS(xvmaddwev_h_b, gvec_vvv, 32, MO_8, do_vmaddwev_s)
+TRANS(xvmaddwev_w_h, gvec_vvv, 32, MO_16, do_vmaddwev_s)
+TRANS(xvmaddwev_d_w, gvec_vvv, 32, MO_32, do_vmaddwev_s)
+
+#define XVMADD_Q(NAME, FN, idx1, idx2)                    \
+static bool trans_## NAME(DisasContext *ctx, arg_vvv * a) \
+{                                                         \
+    TCGv_i64 rh, rl, arg1, arg2, th, tl;                  \
+    int i;                                                \
+                                                          \
+    CHECK_VEC;                                            \
+                                                          \
+    rh = tcg_temp_new_i64();                              \
+    rl = tcg_temp_new_i64();                              \
+    arg1 = tcg_temp_new_i64();                            \
+    arg2 = tcg_temp_new_i64();                            \
+    th = tcg_temp_new_i64();                              \
+    tl = tcg_temp_new_i64();                              \
+                                                          \
+    for (i = 0; i < 2; i++) {                             \
+        get_vreg64(arg1, a->vj, idx1 + i * 2);            \
+        get_vreg64(arg2, a->vk, idx2 + i * 2);            \
+        get_vreg64(rh, a->vd, 1 + i * 2);                 \
+        get_vreg64(rl, a->vd, 0 + i * 2);                 \
+                                                          \
+        tcg_gen_## FN ##_i64(tl, th, arg1, arg2);         \
+        tcg_gen_add2_i64(rl, rh, rl, rh, tl, th);         \
+                                                          \
+        set_vreg64(rh, a->vd, 1 + i * 2);                 \
+        set_vreg64(rl, a->vd, 0 + i * 2);                 \
+    }                                                     \
+                                                          \
+    return true;                                          \
+}
+
+XVMADD_Q(xvmaddwev_q_d, muls2, 0, 0)
+XVMADD_Q(xvmaddwod_q_d, muls2, 1, 1)
+XVMADD_Q(xvmaddwev_q_du, mulu2, 0, 0)
+XVMADD_Q(xvmaddwod_q_du, mulu2, 1, 1)
+XVMADD_Q(xvmaddwev_q_du_d, mulus2, 0, 0)
+XVMADD_Q(xvmaddwod_q_du_d, mulus2, 1, 1)
+
+TRANS(xvmaddwod_h_b, gvec_vvv, 32, MO_8, do_vmaddwod_s)
+TRANS(xvmaddwod_w_h, gvec_vvv, 32, MO_16, do_vmaddwod_s)
+TRANS(xvmaddwod_d_w, gvec_vvv, 32, MO_32, do_vmaddwod_s)
+
+TRANS(xvmaddwev_h_bu, gvec_vvv, 32, MO_8, do_vmaddwev_u)
+TRANS(xvmaddwev_w_hu, gvec_vvv, 32, MO_16, do_vmaddwev_u)
+TRANS(xvmaddwev_d_wu, gvec_vvv, 32, MO_32, do_vmaddwev_u)
+TRANS(xvmaddwod_h_bu, gvec_vvv, 32, MO_8, do_vmaddwod_u)
+TRANS(xvmaddwod_w_hu, gvec_vvv, 32, MO_16, do_vmaddwod_u)
+TRANS(xvmaddwod_d_wu, gvec_vvv, 32, MO_32, do_vmaddwod_u)
+
+TRANS(xvmaddwev_h_bu_b, gvec_vvv, 32, MO_8, do_vmaddwev_u_s)
+TRANS(xvmaddwev_w_hu_h, gvec_vvv, 32, MO_16, do_vmaddwev_u_s)
+TRANS(xvmaddwev_d_wu_w, gvec_vvv, 32, MO_32, do_vmaddwev_u_s)
+TRANS(xvmaddwod_h_bu_b, gvec_vvv, 32, MO_8, do_vmaddwod_u_s)
+TRANS(xvmaddwod_w_hu_h, gvec_vvv, 32, MO_16, do_vmaddwod_u_s)
+TRANS(xvmaddwod_d_wu_w, gvec_vvv, 32, MO_32, do_vmaddwod_u_s)
+
 TRANS(xvreplgr2vr_b, gvec_dup, 32, MO_8)
 TRANS(xvreplgr2vr_h, gvec_dup, 32, MO_16)
 TRANS(xvreplgr2vr_w, gvec_dup, 32, MO_32)
diff --git a/target/loongarch/insn_trans/trans_lsx.c.inc 
b/target/loongarch/insn_trans/trans_lsx.c.inc
index cc97866ef9..f2f7c7a9aa 100644
--- a/target/loongarch/insn_trans/trans_lsx.c.inc
+++ b/target/loongarch/insn_trans/trans_lsx.c.inc
@@ -2333,6 +2333,8 @@ static bool trans_## NAME (DisasContext *ctx, arg_vvv *a) 
\
 {                                                         \
     TCGv_i64 rh, rl, arg1, arg2, th, tl;                  \
                                                           \
+    CHECK_VEC;                                            \
+                                                          \
     rh = tcg_temp_new_i64();                              \
     rl = tcg_temp_new_i64();                              \
     arg1 = tcg_temp_new_i64();                            \
diff --git a/target/loongarch/insns.decode b/target/loongarch/insns.decode
index 0f9ebe641f..d6fb51ae64 100644
--- a/target/loongarch/insns.decode
+++ b/target/loongarch/insns.decode
@@ -1511,6 +1511,40 @@ xvmulwod_w_hu_h  0111 01001010 00101 ..... ..... .....   
 @vvv
 xvmulwod_d_wu_w  0111 01001010 00110 ..... ..... .....    @vvv
 xvmulwod_q_du_d  0111 01001010 00111 ..... ..... .....    @vvv
 
+xvmadd_b         0111 01001010 10000 ..... ..... .....    @vvv
+xvmadd_h         0111 01001010 10001 ..... ..... .....    @vvv
+xvmadd_w         0111 01001010 10010 ..... ..... .....    @vvv
+xvmadd_d         0111 01001010 10011 ..... ..... .....    @vvv
+xvmsub_b         0111 01001010 10100 ..... ..... .....    @vvv
+xvmsub_h         0111 01001010 10101 ..... ..... .....    @vvv
+xvmsub_w         0111 01001010 10110 ..... ..... .....    @vvv
+xvmsub_d         0111 01001010 10111 ..... ..... .....    @vvv
+
+xvmaddwev_h_b    0111 01001010 11000 ..... ..... .....    @vvv
+xvmaddwev_w_h    0111 01001010 11001 ..... ..... .....    @vvv
+xvmaddwev_d_w    0111 01001010 11010 ..... ..... .....    @vvv
+xvmaddwev_q_d    0111 01001010 11011 ..... ..... .....    @vvv
+xvmaddwod_h_b    0111 01001010 11100 ..... ..... .....    @vvv
+xvmaddwod_w_h    0111 01001010 11101 ..... ..... .....    @vvv
+xvmaddwod_d_w    0111 01001010 11110 ..... ..... .....    @vvv
+xvmaddwod_q_d    0111 01001010 11111 ..... ..... .....    @vvv
+xvmaddwev_h_bu   0111 01001011 01000 ..... ..... .....    @vvv
+xvmaddwev_w_hu   0111 01001011 01001 ..... ..... .....    @vvv
+xvmaddwev_d_wu   0111 01001011 01010 ..... ..... .....    @vvv
+xvmaddwev_q_du   0111 01001011 01011 ..... ..... .....    @vvv
+xvmaddwod_h_bu   0111 01001011 01100 ..... ..... .....    @vvv
+xvmaddwod_w_hu   0111 01001011 01101 ..... ..... .....    @vvv
+xvmaddwod_d_wu   0111 01001011 01110 ..... ..... .....    @vvv
+xvmaddwod_q_du   0111 01001011 01111 ..... ..... .....    @vvv
+xvmaddwev_h_bu_b 0111 01001011 11000 ..... ..... .....    @vvv
+xvmaddwev_w_hu_h 0111 01001011 11001 ..... ..... .....    @vvv
+xvmaddwev_d_wu_w 0111 01001011 11010 ..... ..... .....    @vvv
+xvmaddwev_q_du_d 0111 01001011 11011 ..... ..... .....    @vvv
+xvmaddwod_h_bu_b 0111 01001011 11100 ..... ..... .....    @vvv
+xvmaddwod_w_hu_h 0111 01001011 11101 ..... ..... .....    @vvv
+xvmaddwod_d_wu_w 0111 01001011 11110 ..... ..... .....    @vvv
+xvmaddwod_q_du_d 0111 01001011 11111 ..... ..... .....    @vvv
+
 xvreplgr2vr_b    0111 01101001 11110 00000 ..... .....    @vr
 xvreplgr2vr_h    0111 01101001 11110 00001 ..... .....    @vr
 xvreplgr2vr_w    0111 01101001 11110 00010 ..... .....    @vr
diff --git a/target/loongarch/vec.h b/target/loongarch/vec.h
index c371a59a2e..1abc6a3da0 100644
--- a/target/loongarch/vec.h
+++ b/target/loongarch/vec.h
@@ -62,4 +62,7 @@
 
 #define DO_MUL(a, b)    (a * b)
 
+#define DO_MADD(a, b, c)  (a + b * c)
+#define DO_MSUB(a, b, c)  (a - b * c)
+
 #endif /* LOONGARCH_VEC_H */
diff --git a/target/loongarch/vec_helper.c b/target/loongarch/vec_helper.c
index 804fbc6969..367b794853 100644
--- a/target/loongarch/vec_helper.c
+++ b/target/loongarch/vec_helper.c
@@ -508,17 +508,16 @@ DO_ODD_U_S(vmulwod_h_bu_b, 16, H, UH, B, UB, DO_MUL)
 DO_ODD_U_S(vmulwod_w_hu_h, 32, W, UW, H, UH, DO_MUL)
 DO_ODD_U_S(vmulwod_d_wu_w, 64, D, UD, W, UW, DO_MUL)
 
-#define DO_MADD(a, b, c)  (a + b * c)
-#define DO_MSUB(a, b, c)  (a - b * c)
-
 #define VMADDSUB(NAME, BIT, E, DO_OP)                       \
 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
 {                                                           \
-    int i;                                                  \
+    int i, len;                                             \
     VReg *Vd = (VReg *)vd;                                  \
     VReg *Vj = (VReg *)vj;                                  \
     VReg *Vk = (VReg *)vk;                                  \
-    for (i = 0; i < LSX_LEN/BIT; i++) {                     \
+                                                            \
+    len = (simd_oprsz(v) ==  16) ? LSX_LEN : LASX_LEN;      \
+    for (i = 0; i < len / BIT; i++) {                       \
         Vd->E(i) = DO_OP(Vd->E(i), Vj->E(i) ,Vk->E(i));     \
     }                                                       \
 }
@@ -535,13 +534,14 @@ VMADDSUB(vmsub_d, 64, D, DO_MSUB)
 #define VMADDWEV(NAME, BIT, E1, E2, DO_OP)                        \
 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v)       \
 {                                                                 \
-    int i;                                                        \
+    int i, len;                                                   \
     VReg *Vd = (VReg *)vd;                                        \
     VReg *Vj = (VReg *)vj;                                        \
     VReg *Vk = (VReg *)vk;                                        \
     typedef __typeof(Vd->E1(0)) TD;                               \
                                                                   \
-    for (i = 0; i < LSX_LEN/BIT; i++) {                           \
+    len = (simd_oprsz(v) == 16) ? LSX_LEN : LASX_LEN;             \
+    for (i = 0; i < len / BIT; i++) {                             \
         Vd->E1(i) += DO_OP((TD)Vj->E2(2 * i), (TD)Vk->E2(2 * i)); \
     }                                                             \
 }
@@ -556,13 +556,14 @@ VMADDWEV(vmaddwev_d_wu, 64, UD, UW, DO_MUL)
 #define VMADDWOD(NAME, BIT, E1, E2, DO_OP)                  \
 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
 {                                                           \
-    int i;                                                  \
+    int i, len;                                             \
     VReg *Vd = (VReg *)vd;                                  \
     VReg *Vj = (VReg *)vj;                                  \
     VReg *Vk = (VReg *)vk;                                  \
     typedef __typeof(Vd->E1(0)) TD;                         \
                                                             \
-    for (i = 0; i < LSX_LEN/BIT; i++) {                     \
+    len = (simd_oprsz(v) == 16) ? LSX_LEN : LASX_LEN;       \
+    for (i = 0; i < len / BIT; i++) {                       \
         Vd->E1(i) += DO_OP((TD)Vj->E2(2 * i + 1),           \
                            (TD)Vk->E2(2 * i + 1));          \
     }                                                       \
@@ -578,14 +579,15 @@ VMADDWOD(vmaddwod_d_wu, 64,  UD, UW, DO_MUL)
 #define VMADDWEV_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP)  \
 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
 {                                                           \
-    int i;                                                  \
+    int i, len;                                             \
     VReg *Vd = (VReg *)vd;                                  \
     VReg *Vj = (VReg *)vj;                                  \
     VReg *Vk = (VReg *)vk;                                  \
     typedef __typeof(Vd->ES1(0)) TS1;                       \
     typedef __typeof(Vd->EU1(0)) TU1;                       \
                                                             \
-    for (i = 0; i < LSX_LEN/BIT; i++) {                     \
+    len = (simd_oprsz(v) == 16) ? LSX_LEN : LASX_LEN;       \
+    for (i = 0; i < len / BIT; i++) {                       \
         Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i),            \
                             (TS1)Vk->ES2(2 * i));           \
     }                                                       \
@@ -598,16 +600,17 @@ VMADDWEV_U_S(vmaddwev_d_wu_w, 64, D, UD, W, UW, DO_MUL)
 #define VMADDWOD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP)  \
 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
 {                                                           \
-    int i;                                                  \
+    int i, len;                                             \
     VReg *Vd = (VReg *)vd;                                  \
     VReg *Vj = (VReg *)vj;                                  \
     VReg *Vk = (VReg *)vk;                                  \
     typedef __typeof(Vd->ES1(0)) TS1;                       \
     typedef __typeof(Vd->EU1(0)) TU1;                       \
                                                             \
-    for (i = 0; i < LSX_LEN/BIT; i++) {                     \
-        Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i + 1),         \
-                            (TS1)Vk->ES2(2 * i + 1));        \
+    len = (simd_oprsz(v) == 16) ? LSX_LEN : LASX_LEN;       \
+    for (i = 0; i < len / BIT; i++) {                       \
+        Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i + 1),        \
+                            (TS1)Vk->ES2(2 * i + 1));       \
     }                                                       \
 }
 
-- 
2.39.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]