[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v4 24/60] target/riscv: vector single-width averaging add and sub
From: |
LIU Zhiwei |
Subject: |
[PATCH v4 24/60] target/riscv: vector single-width averaging add and subtract |
Date: |
Wed, 11 Mar 2020 13:06:43 +0800 |
Signed-off-by: LIU Zhiwei <address@hidden>
---
target/riscv/helper.h | 17 ++++
target/riscv/insn32.decode | 5 +
target/riscv/insn_trans/trans_rvv.inc.c | 7 ++
target/riscv/vector_helper.c | 129 ++++++++++++++++++++++++
4 files changed, 158 insertions(+)
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index 95da00d365..d3837d2ca4 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -707,3 +707,20 @@ DEF_HELPER_6(vssub_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vssub_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vssub_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vssub_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vaadd_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vaadd_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vaadd_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vaadd_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vasub_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vasub_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vasub_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vasub_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vaadd_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vaadd_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vaadd_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vaadd_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vasub_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vasub_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vasub_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vasub_vx_d, void, ptr, ptr, tl, ptr, env, i32)
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index 44baadf582..0227a16b16 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -412,6 +412,11 @@ vssubu_vv 100010 . ..... ..... 000 ..... 1010111
@r_vm
vssubu_vx 100010 . ..... ..... 100 ..... 1010111 @r_vm
vssub_vv 100011 . ..... ..... 000 ..... 1010111 @r_vm
vssub_vx 100011 . ..... ..... 100 ..... 1010111 @r_vm
+vaadd_vv 100100 . ..... ..... 000 ..... 1010111 @r_vm
+vaadd_vx 100100 . ..... ..... 100 ..... 1010111 @r_vm
+vaadd_vi 100100 . ..... ..... 011 ..... 1010111 @r_vm
+vasub_vv 100110 . ..... ..... 000 ..... 1010111 @r_vm
+vasub_vx 100110 . ..... ..... 100 ..... 1010111 @r_vm
vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm
vsetvl 1000000 ..... ..... 111 ..... 1010111 @r
diff --git a/target/riscv/insn_trans/trans_rvv.inc.c
b/target/riscv/insn_trans/trans_rvv.inc.c
index ad55766b98..9988fad2fe 100644
--- a/target/riscv/insn_trans/trans_rvv.inc.c
+++ b/target/riscv/insn_trans/trans_rvv.inc.c
@@ -1521,3 +1521,10 @@ GEN_OPIVX_TRANS(vssubu_vx, opivx_check)
GEN_OPIVX_TRANS(vssub_vx, opivx_check)
GEN_OPIVI_TRANS(vsaddu_vi, 1, vsaddu_vx, opivx_check)
GEN_OPIVI_TRANS(vsadd_vi, 0, vsadd_vx, opivx_check)
+
+/* Vector Single-Width Averaging Add and Subtract */
+GEN_OPIVV_TRANS(vaadd_vv, opivv_check)
+GEN_OPIVV_TRANS(vasub_vv, opivv_check)
+GEN_OPIVX_TRANS(vaadd_vx, opivx_check)
+GEN_OPIVX_TRANS(vasub_vx, opivx_check)
+GEN_OPIVI_TRANS(vaadd_vi, 0, vaadd_vx, opivx_check)
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index c7b8c1bff4..b0a7a3b6e4 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -2291,3 +2291,132 @@ GEN_VEXT_VX_ENV(vssub_vx_b, 1, 1, clearb)
GEN_VEXT_VX_ENV(vssub_vx_h, 2, 2, clearh)
GEN_VEXT_VX_ENV(vssub_vx_w, 4, 4, clearl)
GEN_VEXT_VX_ENV(vssub_vx_d, 8, 8, clearq)
+
+/* Vector Single-Width Averaging Add and Subtract */
+static inline uint8_t get_round(CPURISCVState *env, uint64_t v, uint8_t shift)
+{
+ uint8_t d = extract64(v, shift, 1);
+ uint8_t d1;
+ uint64_t D1, D2;
+ int mod = env->vxrm;
+
+ if (shift == 0 || shift > 64) {
+ return 0;
+ }
+
+ d1 = extract64(v, shift - 1, 1);
+ D1 = extract64(v, 0, shift);
+ if (mod == 0) { /* round-to-nearest-up (add +0.5 LSB) */
+ return d1;
+ } else if (mod == 1) { /* round-to-nearest-even */
+ if (shift > 1) {
+ D2 = extract64(v, 0, shift - 1);
+ return d1 & ((D2 != 0) | d);
+ } else {
+ return d1 & d;
+ }
+ } else if (mod == 3) { /* round-to-odd (OR bits into LSB, aka "jam") */
+ return !d & (D1 != 0);
+ }
+ return 0; /* round-down (truncate) */
+}
+
+static inline int8_t aadd8(CPURISCVState *env, int8_t a, int8_t b)
+{
+ int16_t res = (int16_t)a + (int16_t)b;
+ uint8_t round = get_round(env, res, 1);
+ res = (res >> 1) + round;
+ return res;
+}
+static inline int16_t aadd16(CPURISCVState *env, int16_t a, int16_t b)
+{
+ int32_t res = (int32_t)a + (int32_t)b;
+ uint8_t round = get_round(env, res, 1);
+ res = (res >> 1) + round;
+ return res;
+}
+static inline int32_t aadd32(CPURISCVState *env, int32_t a, int32_t b)
+{
+ int64_t res = (int64_t)a + (int64_t)b;
+ uint8_t round = get_round(env, res, 1);
+ res = (res >> 1) + round;
+ return res;
+}
+static inline int64_t aadd64(CPURISCVState *env, int64_t a, int64_t b)
+{
+ int64_t res = (int64_t)a + (int64_t)b;
+ uint8_t round = get_round(env, res, 1); /* get_round only need v[d : 0] */
+ if (((res ^ a) & (res ^ b)) >> 63 == -1LL) { /* overflow */
+ res = ((res >> 1) ^ INT64_MIN) + round;
+ } else {
+ res = (res >> 1) + round;
+ }
+ return res;
+}
+RVVCALL(OPIVV2_ENV, vaadd_vv_b, OP_SSS_B, H1, H1, H1, aadd8)
+RVVCALL(OPIVV2_ENV, vaadd_vv_h, OP_SSS_H, H2, H2, H2, aadd16)
+RVVCALL(OPIVV2_ENV, vaadd_vv_w, OP_SSS_W, H4, H4, H4, aadd32)
+RVVCALL(OPIVV2_ENV, vaadd_vv_d, OP_SSS_D, H8, H8, H8, aadd64)
+GEN_VEXT_VV_ENV(vaadd_vv_b, 1, 1, clearb)
+GEN_VEXT_VV_ENV(vaadd_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vaadd_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vaadd_vv_d, 8, 8, clearq)
+
+RVVCALL(OPIVX2_ENV, vaadd_vx_b, OP_SSS_B, H1, H1, aadd8)
+RVVCALL(OPIVX2_ENV, vaadd_vx_h, OP_SSS_H, H2, H2, aadd16)
+RVVCALL(OPIVX2_ENV, vaadd_vx_w, OP_SSS_W, H4, H4, aadd32)
+RVVCALL(OPIVX2_ENV, vaadd_vx_d, OP_SSS_D, H8, H8, aadd64)
+GEN_VEXT_VX_ENV(vaadd_vx_b, 1, 1, clearb)
+GEN_VEXT_VX_ENV(vaadd_vx_h, 2, 2, clearh)
+GEN_VEXT_VX_ENV(vaadd_vx_w, 4, 4, clearl)
+GEN_VEXT_VX_ENV(vaadd_vx_d, 8, 8, clearq)
+
+static inline int8_t asub8(CPURISCVState *env, int8_t a, int8_t b)
+{
+ int16_t res = (int16_t)a - (int16_t)b;
+ uint8_t round = get_round(env, res, 1);
+ res = (res >> 1) + round;
+ return res;
+}
+static inline int16_t asub16(CPURISCVState *env, int16_t a, int16_t b)
+{
+ int32_t res = (int32_t)a - (int32_t)b;
+ uint8_t round = get_round(env, res, 1);
+ res = (res >> 1) + round;
+ return res;
+}
+static inline int32_t asub32(CPURISCVState *env, int32_t a, int32_t b)
+{
+ int64_t res = (int64_t)a - (int64_t)b;
+ uint8_t round = get_round(env, res, 1);
+ res = (res >> 1) + round;
+ return res;
+}
+static inline int64_t asub64(CPURISCVState *env, int64_t a, int64_t b)
+{
+ int64_t res = (int64_t)a - (int64_t)b;
+ uint8_t round = get_round(env, res, 1); /* get_round only need v[d : 0] */
+ if (((res ^ a) & (a ^ b)) >> 63 == -1LL) { /* overflow */
+ res = ((res >> 1) ^ INT64_MIN) + round;
+ } else {
+ res = (res >> 1) + round;
+ }
+ return res;
+}
+RVVCALL(OPIVV2_ENV, vasub_vv_b, OP_SSS_B, H1, H1, H1, asub8)
+RVVCALL(OPIVV2_ENV, vasub_vv_h, OP_SSS_H, H2, H2, H2, asub16)
+RVVCALL(OPIVV2_ENV, vasub_vv_w, OP_SSS_W, H4, H4, H4, asub32)
+RVVCALL(OPIVV2_ENV, vasub_vv_d, OP_SSS_D, H8, H8, H8, asub64)
+GEN_VEXT_VV_ENV(vasub_vv_b, 1, 1, clearb)
+GEN_VEXT_VV_ENV(vasub_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_ENV(vasub_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_ENV(vasub_vv_d, 8, 8, clearq)
+
+RVVCALL(OPIVX2_ENV, vasub_vx_b, OP_SSS_B, H1, H1, asub8)
+RVVCALL(OPIVX2_ENV, vasub_vx_h, OP_SSS_H, H2, H2, asub16)
+RVVCALL(OPIVX2_ENV, vasub_vx_w, OP_SSS_W, H4, H4, asub32)
+RVVCALL(OPIVX2_ENV, vasub_vx_d, OP_SSS_D, H8, H8, asub64)
+GEN_VEXT_VX_ENV(vasub_vx_b, 1, 1, clearb)
+GEN_VEXT_VX_ENV(vasub_vx_h, 2, 2, clearh)
+GEN_VEXT_VX_ENV(vasub_vx_w, 4, 4, clearl)
+GEN_VEXT_VX_ENV(vasub_vx_d, 8, 8, clearq)
--
2.23.0
- [PATCH v4 11/60] target/riscv: vector integer add-with-carry / subtract-with-borrow instructions, (continued)
- [PATCH v4 11/60] target/riscv: vector integer add-with-carry / subtract-with-borrow instructions, LIU Zhiwei, 2020/03/11
- [PATCH v4 13/60] target/riscv: vector single-width bit shift instructions, LIU Zhiwei, 2020/03/11
- [PATCH v4 15/60] target/riscv: vector integer comparison instructions, LIU Zhiwei, 2020/03/11
- [PATCH v4 16/60] target/riscv: vector integer min/max instructions, LIU Zhiwei, 2020/03/11
- [PATCH v4 19/60] target/riscv: vector widening integer multiply instructions, LIU Zhiwei, 2020/03/11
- [PATCH v4 18/60] target/riscv: vector integer divide instructions, LIU Zhiwei, 2020/03/11
- [PATCH v4 20/60] target/riscv: vector single-width integer multiply-add instructions, LIU Zhiwei, 2020/03/11
- [PATCH v4 22/60] target/riscv: vector integer merge and move instructions, LIU Zhiwei, 2020/03/11
- [PATCH v4 21/60] target/riscv: vector widening integer multiply-add instructions, LIU Zhiwei, 2020/03/11
- [PATCH v4 25/60] target/riscv: vector single-width fractional multiply with rounding and saturation, LIU Zhiwei, 2020/03/11
- [PATCH v4 24/60] target/riscv: vector single-width averaging add and subtract,
LIU Zhiwei <=
- [PATCH v4 26/60] target/riscv: vector widening saturating scaled multiply-add, LIU Zhiwei, 2020/03/11
- [PATCH v4 23/60] target/riscv: vector single-width saturating add and subtract, LIU Zhiwei, 2020/03/11
- [PATCH v4 27/60] target/riscv: vector single-width scaling shift instructions, LIU Zhiwei, 2020/03/11
- [PATCH v4 28/60] target/riscv: vector narrowing fixed-point clip instructions, LIU Zhiwei, 2020/03/11
- [PATCH v4 29/60] target/riscv: vector single-width floating-point add/subtract instructions, LIU Zhiwei, 2020/03/11
- [PATCH v4 32/60] target/riscv: vector widening floating-point multiply, LIU Zhiwei, 2020/03/11
- [PATCH v4 31/60] target/riscv: vector single-width floating-point multiply/divide instructions, LIU Zhiwei, 2020/03/11
- [PATCH v4 34/60] target/riscv: vector widening floating-point fused multiply-add instructions, LIU Zhiwei, 2020/03/11
- [PATCH v4 30/60] target/riscv: vector widening floating-point add/subtract instructions, LIU Zhiwei, 2020/03/11
- [PATCH v4 35/60] target/riscv: vector floating-point square-root instruction, LIU Zhiwei, 2020/03/11