[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 099/100] target/arm: Implement SVE2 bitwise shift immediate
From: |
Richard Henderson |
Subject: |
[PATCH v2 099/100] target/arm: Implement SVE2 bitwise shift immediate |
Date: |
Wed, 17 Jun 2020 21:26:43 -0700 |
From: Stephen Long <steplong@quicinc.com>
Implements SQSHL/UQSHL, SRSHR/URSHR, and SQSHLU
Signed-off-by: Stephen Long <steplong@quicinc.com>
Message-Id: <20200430194159.24064-1-steplong@quicinc.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/helper-sve.h | 33 +++++++++++++++++++++
target/arm/sve.decode | 5 ++++
target/arm/sve_helper.c | 39 +++++++++++++++++++++++--
target/arm/translate-sve.c | 60 ++++++++++++++++++++++++++++++++++++++
4 files changed, 135 insertions(+), 2 deletions(-)
diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h
index a00d1904b7..cb609b5daa 100644
--- a/target/arm/helper-sve.h
+++ b/target/arm/helper-sve.h
@@ -2250,6 +2250,39 @@ DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_idx_h, TCG_CALL_NO_RWG,
DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_idx_s, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqshl_zpzi_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqshl_zpzi_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqshl_zpzi_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqshl_zpzi_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_uqshl_zpzi_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uqshl_zpzi_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uqshl_zpzi_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_uqshl_zpzi_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_srshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_srshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_srshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_srshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_urshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_urshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_urshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_urshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqshlu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqshlu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqshlu_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqshlu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_5(sve2_fcvtnt_sh, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sve2_fcvtnt_ds, TCG_CALL_NO_RWG,
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
index 4e21274dc4..d2f33d96f3 100644
--- a/target/arm/sve.decode
+++ b/target/arm/sve.decode
@@ -342,6 +342,11 @@ ASR_zpzi 00000100 .. 000 000 100 ... .. ... .....
@rdn_pg_tszimm_shr
LSR_zpzi 00000100 .. 000 001 100 ... .. ... ..... @rdn_pg_tszimm_shr
LSL_zpzi 00000100 .. 000 011 100 ... .. ... ..... @rdn_pg_tszimm_shl
ASRD 00000100 .. 000 100 100 ... .. ... ..... @rdn_pg_tszimm_shr
+SQSHL_zpzi 00000100 .. 000 110 100 ... .. ... ..... @rdn_pg_tszimm_shl
+UQSHL_zpzi 00000100 .. 000 111 100 ... .. ... ..... @rdn_pg_tszimm_shl
+SRSHR 00000100 .. 001 100 100 ... .. ... ..... @rdn_pg_tszimm_shr
+URSHR 00000100 .. 001 101 100 ... .. ... ..... @rdn_pg_tszimm_shr
+SQSHLU 00000100 .. 001 111 100 ... .. ... ..... @rdn_pg_tszimm_shl
# SVE bitwise shift by vector (predicated)
ASR_zpzz 00000100 .. 010 000 100 ... ..... ..... @rdn_pg_rm
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index b37fb60b7d..fe79e22bb8 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -2231,6 +2231,43 @@ DO_ZPZI(sve_asrd_h, int16_t, H1_2, DO_ASRD)
DO_ZPZI(sve_asrd_s, int32_t, H1_4, DO_ASRD)
DO_ZPZI_D(sve_asrd_d, int64_t, DO_ASRD)
+#define DO_RSHR(x, sh) ((x >> sh) + ((x >> (sh - 1)) & 1))
+
+/* SVE2 bitwise shift by immediate */
+DO_ZPZI(sve2_sqshl_zpzi_b, int8_t, H1, do_sqshl_b)
+DO_ZPZI(sve2_sqshl_zpzi_h, int16_t, H1_2, do_sqshl_h)
+DO_ZPZI(sve2_sqshl_zpzi_s, int32_t, H1_4, do_sqshl_s)
+DO_ZPZI_D(sve2_sqshl_zpzi_d, int64_t, do_sqshl_d)
+
+DO_ZPZI(sve2_uqshl_zpzi_b, uint8_t, H1, do_uqshl_b)
+DO_ZPZI(sve2_uqshl_zpzi_h, uint16_t, H1_2, do_uqshl_h)
+DO_ZPZI(sve2_uqshl_zpzi_s, uint32_t, H1_4, do_uqshl_s)
+DO_ZPZI_D(sve2_uqshl_zpzi_d, uint64_t, do_uqshl_d)
+
+DO_ZPZI(sve2_srshr_b, int8_t, H1, DO_RSHR)
+DO_ZPZI(sve2_srshr_h, int16_t, H1_2, DO_RSHR)
+DO_ZPZI(sve2_srshr_s, int32_t, H1_4, DO_RSHR)
+DO_ZPZI_D(sve2_srshr_d, int64_t, DO_RSHR)
+
+DO_ZPZI(sve2_urshr_b, uint8_t, H1, DO_RSHR)
+DO_ZPZI(sve2_urshr_h, uint16_t, H1_2, DO_RSHR)
+DO_ZPZI(sve2_urshr_s, uint32_t, H1_4, DO_RSHR)
+DO_ZPZI_D(sve2_urshr_d, uint64_t, DO_RSHR)
+
+#define do_suqrshl_b(n, m) \
+ ({ uint32_t discard; do_suqrshl_bhs(n, (int8_t)m, 8, false, &discard); })
+#define do_suqrshl_h(n, m) \
+ ({ uint32_t discard; do_suqrshl_bhs(n, (int16_t)m, 16, false, &discard); })
+#define do_suqrshl_s(n, m) \
+ ({ uint32_t discard; do_suqrshl_bhs(n, m, 32, false, &discard); })
+#define do_suqrshl_d(n, m) \
+ ({ uint32_t discard; do_suqrshl_d(n, m, false, &discard); })
+
+DO_ZPZI(sve2_sqshlu_b, int8_t, H1, do_suqrshl_b)
+DO_ZPZI(sve2_sqshlu_h, int16_t, H1_2, do_suqrshl_h)
+DO_ZPZI(sve2_sqshlu_s, int32_t, H1_4, do_suqrshl_s)
+DO_ZPZI_D(sve2_sqshlu_d, int64_t, do_suqrshl_d)
+
#undef DO_ASRD
#undef DO_ZPZI
#undef DO_ZPZI_D
@@ -2265,8 +2302,6 @@ DO_SHRNT(sve2_shrnt_h, uint16_t, uint8_t, H1_2, H1,
DO_SHR)
DO_SHRNT(sve2_shrnt_s, uint32_t, uint16_t, H1_4, H1_2, DO_SHR)
DO_SHRNT(sve2_shrnt_d, uint64_t, uint32_t, , H1_4, DO_SHR)
-#define DO_RSHR(x, sh) ((x >> sh) + ((x >> (sh - 1)) & 1))
-
DO_SHRNB(sve2_rshrnb_h, uint16_t, uint8_t, DO_RSHR)
DO_SHRNB(sve2_rshrnb_s, uint32_t, uint16_t, DO_RSHR)
DO_SHRNB(sve2_rshrnb_d, uint64_t, uint32_t, DO_RSHR)
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 1dd67ef538..2b2e186988 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -1044,6 +1044,66 @@ static bool trans_ASRD(DisasContext *s, arg_rpri_esz *a)
}
}
+static bool trans_SQSHL_zpzi(DisasContext *s, arg_rpri_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_sqshl_zpzi_b, gen_helper_sve2_sqshl_zpzi_h,
+ gen_helper_sve2_sqshl_zpzi_s, gen_helper_sve2_sqshl_zpzi_d,
+ };
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpzi_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_UQSHL_zpzi(DisasContext *s, arg_rpri_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_uqshl_zpzi_b, gen_helper_sve2_uqshl_zpzi_h,
+ gen_helper_sve2_uqshl_zpzi_s, gen_helper_sve2_uqshl_zpzi_d,
+ };
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpzi_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_SRSHR(DisasContext *s, arg_rpri_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_srshr_b, gen_helper_sve2_srshr_h,
+ gen_helper_sve2_srshr_s, gen_helper_sve2_srshr_d,
+ };
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpzi_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_URSHR(DisasContext *s, arg_rpri_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_urshr_b, gen_helper_sve2_urshr_h,
+ gen_helper_sve2_urshr_s, gen_helper_sve2_urshr_d,
+ };
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpzi_ool(s, a, fns[a->esz]);
+}
+
+static bool trans_SQSHLU(DisasContext *s, arg_rpri_esz *a)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_sqshlu_b, gen_helper_sve2_sqshlu_h,
+ gen_helper_sve2_sqshlu_s, gen_helper_sve2_sqshlu_d,
+ };
+ if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zpzi_ool(s, a, fns[a->esz]);
+}
+
/*
*** SVE Bitwise Shift - Predicated Group
*/
--
2.25.1
- [PATCH v2 088/100] target/arm: Implement SVE2 crypto constructive binary operations, (continued)
- [PATCH v2 088/100] target/arm: Implement SVE2 crypto constructive binary operations, Richard Henderson, 2020/06/18
- [PATCH v2 085/100] target/arm: Implement SVE mixed sign dot product, Richard Henderson, 2020/06/18
- [PATCH v2 089/100] target/arm: Implement SVE2 TBL, TBX, Richard Henderson, 2020/06/18
- [PATCH v2 090/100] target/arm: Implement SVE2 FCVTNT, Richard Henderson, 2020/06/18
- [PATCH v2 091/100] target/arm: Implement SVE2 FCVTLT, Richard Henderson, 2020/06/18
- [PATCH v2 092/100] target/arm: Implement SVE2 FCVTXNT, FCVTX, Richard Henderson, 2020/06/18
- [PATCH v2 093/100] softfloat: Add float16_is_normal, Richard Henderson, 2020/06/18
- [PATCH v2 095/100] tcg: Implement 256-bit dup for tcg_gen_gvec_dup_mem, Richard Henderson, 2020/06/18
- [PATCH v2 094/100] target/arm: Implement SVE2 FLOGB, Richard Henderson, 2020/06/18
- [PATCH v2 096/100] target/arm: Share table of sve load functions, Richard Henderson, 2020/06/18
- [PATCH v2 099/100] target/arm: Implement SVE2 bitwise shift immediate,
Richard Henderson <=
- [PATCH v2 097/100] target/arm: Implement SVE2 LD1RO, Richard Henderson, 2020/06/18
- [PATCH v2 098/100] target/arm: Implement 128-bit ZIP, UZP, TRN, Richard Henderson, 2020/06/18
- [PATCH v2 100/100] target/arm: Implement SVE2 fp multiply-add long, Richard Henderson, 2020/06/18
- Re: [PATCH v2 000/100] target/arm: Implement SVE2, no-reply, 2020/06/18
- Re: [PATCH v2 000/100] target/arm: Implement SVE2, no-reply, 2020/06/18