[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 36/57] target/arm: Add gen_mte_checkN
From: |
Peter Maydell |
Subject: |
[PULL 36/57] target/arm: Add gen_mte_checkN |
Date: |
Fri, 26 Jun 2020 16:14:03 +0100 |
From: Richard Henderson <richard.henderson@linaro.org>
Replace existing uses of check_data_tbi in translate-a64.c that
perform multiple logical memory access. Leave the helper blank
for now to reduce the patch size.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20200626033144.790098-25-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
target/arm/helper-a64.h | 1 +
target/arm/translate-a64.h | 2 ++
target/arm/mte_helper.c | 8 +++++
target/arm/translate-a64.c | 71 +++++++++++++++++++++++++++++---------
4 files changed, 66 insertions(+), 16 deletions(-)
diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
index 2faa49d0a33..005af678c77 100644
--- a/target/arm/helper-a64.h
+++ b/target/arm/helper-a64.h
@@ -105,6 +105,7 @@ DEF_HELPER_FLAGS_2(xpaci, TCG_CALL_NO_RWG_SE, i64, env, i64)
DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64)
DEF_HELPER_FLAGS_3(mte_check1, TCG_CALL_NO_WG, i64, env, i32, i64)
+DEF_HELPER_FLAGS_3(mte_checkN, TCG_CALL_NO_WG, i64, env, i32, i64)
DEF_HELPER_FLAGS_3(irg, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_4(addsubg, TCG_CALL_NO_RWG_SE, i64, env, i64, s32, i32)
DEF_HELPER_FLAGS_3(ldg, TCG_CALL_NO_WG, i64, env, i64, i64)
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
index daab6a96665..781c4413999 100644
--- a/target/arm/translate-a64.h
+++ b/target/arm/translate-a64.h
@@ -42,6 +42,8 @@ bool logic_imm_decode_wmask(uint64_t *result, unsigned int
immn,
bool sve_access_check(DisasContext *s);
TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write,
bool tag_checked, int log2_size);
+TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
+ bool tag_checked, int count, int log2_esize);
/* We should have at some point before trying to access an FP register
* done the necessary access check, so assert that
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
index ec12768dfc3..907a12b3664 100644
--- a/target/arm/mte_helper.c
+++ b/target/arm/mte_helper.c
@@ -366,3 +366,11 @@ uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t
desc, uint64_t ptr)
{
return ptr;
}
+
+/*
+ * Perform an MTE checked access for multiple logical accesses.
+ */
+uint64_t HELPER(mte_checkN)(CPUARMState *env, uint32_t desc, uint64_t ptr)
+{
+ return ptr;
+}
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 4d0453c8956..52be0400d75 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -284,6 +284,34 @@ TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr,
bool is_write,
false, get_mem_index(s));
}
+/*
+ * For MTE, check multiple logical sequential accesses.
+ */
+TCGv_i64 gen_mte_checkN(DisasContext *s, TCGv_i64 addr, bool is_write,
+ bool tag_checked, int log2_esize, int total_size)
+{
+ if (tag_checked && s->mte_active[0] && total_size != (1 << log2_esize)) {
+ TCGv_i32 tcg_desc;
+ TCGv_i64 ret;
+ int desc = 0;
+
+ desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
+ desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
+ desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
+ desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
+ desc = FIELD_DP32(desc, MTEDESC, ESIZE, 1 << log2_esize);
+ desc = FIELD_DP32(desc, MTEDESC, TSIZE, total_size);
+ tcg_desc = tcg_const_i32(desc);
+
+ ret = new_tmp_a64(s);
+ gen_helper_mte_checkN(ret, cpu_env, tcg_desc, addr);
+ tcg_temp_free_i32(tcg_desc);
+
+ return ret;
+ }
+ return gen_mte_check1(s, addr, is_write, tag_checked, log2_esize);
+}
+
typedef struct DisasCompare64 {
TCGCond cond;
TCGv_i64 value;
@@ -2848,7 +2876,10 @@ static void disas_ldst_pair(DisasContext *s, uint32_t
insn)
}
}
- clean_addr = clean_data_tbi(s, dirty_addr);
+ clean_addr = gen_mte_checkN(s, dirty_addr, !is_load,
+ (wback || rn != 31) && !set_tag,
+ size, 2 << size);
+
if (is_vector) {
if (is_load) {
do_fp_ld(s, rt, clean_addr, size);
@@ -3514,7 +3545,7 @@ static void disas_ldst_multiple_struct(DisasContext *s,
uint32_t insn)
TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
MemOp endian = s->be_data;
- int ebytes; /* bytes per element */
+ int total; /* total bytes */
int elements; /* elements per vector */
int rpt; /* num iterations */
int selem; /* structure elements */
@@ -3584,19 +3615,26 @@ static void disas_ldst_multiple_struct(DisasContext *s,
uint32_t insn)
endian = MO_LE;
}
- /* Consecutive little-endian elements from a single register
+ total = rpt * selem * (is_q ? 16 : 8);
+ tcg_rn = cpu_reg_sp(s, rn);
+
+ /*
+ * Issue the MTE check vs the logical repeat count, before we
+ * promote consecutive little-endian elements below.
+ */
+ clean_addr = gen_mte_checkN(s, tcg_rn, is_store, is_postidx || rn != 31,
+ size, total);
+
+ /*
+ * Consecutive little-endian elements from a single register
* can be promoted to a larger little-endian operation.
*/
if (selem == 1 && endian == MO_LE) {
size = 3;
}
- ebytes = 1 << size;
- elements = (is_q ? 16 : 8) / ebytes;
-
- tcg_rn = cpu_reg_sp(s, rn);
- clean_addr = clean_data_tbi(s, tcg_rn);
- tcg_ebytes = tcg_const_i64(ebytes);
+ elements = (is_q ? 16 : 8) >> size;
+ tcg_ebytes = tcg_const_i64(1 << size);
for (r = 0; r < rpt; r++) {
int e;
for (e = 0; e < elements; e++) {
@@ -3630,7 +3668,7 @@ static void disas_ldst_multiple_struct(DisasContext *s,
uint32_t insn)
if (is_postidx) {
if (rm == 31) {
- tcg_gen_addi_i64(tcg_rn, tcg_rn, rpt * elements * selem * ebytes);
+ tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
} else {
tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
}
@@ -3676,7 +3714,7 @@ static void disas_ldst_single_struct(DisasContext *s,
uint32_t insn)
int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
bool replicate = false;
int index = is_q << 3 | S << 2 | size;
- int ebytes, xs;
+ int xs, total;
TCGv_i64 clean_addr, tcg_rn, tcg_ebytes;
if (extract32(insn, 31, 1)) {
@@ -3730,16 +3768,17 @@ static void disas_ldst_single_struct(DisasContext *s,
uint32_t insn)
return;
}
- ebytes = 1 << scale;
-
if (rn == 31) {
gen_check_sp_alignment(s);
}
+ total = selem << scale;
tcg_rn = cpu_reg_sp(s, rn);
- clean_addr = clean_data_tbi(s, tcg_rn);
- tcg_ebytes = tcg_const_i64(ebytes);
+ clean_addr = gen_mte_checkN(s, tcg_rn, !is_load, is_postidx || rn != 31,
+ scale, total);
+
+ tcg_ebytes = tcg_const_i64(1 << scale);
for (xs = 0; xs < selem; xs++) {
if (replicate) {
/* Load and replicate to all elements */
@@ -3766,7 +3805,7 @@ static void disas_ldst_single_struct(DisasContext *s,
uint32_t insn)
if (is_postidx) {
if (rm == 31) {
- tcg_gen_addi_i64(tcg_rn, tcg_rn, selem * ebytes);
+ tcg_gen_addi_i64(tcg_rn, tcg_rn, total);
} else {
tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
}
--
2.20.1
- [PULL 28/57] target/arm: Implement the STGP instruction, (continued)
- [PULL 28/57] target/arm: Implement the STGP instruction, Peter Maydell, 2020/06/26
- [PULL 29/57] target/arm: Restrict the values of DCZID.BS under TCG, Peter Maydell, 2020/06/26
- [PULL 27/57] target/arm: Implement LDG, STG, ST2G instructions, Peter Maydell, 2020/06/26
- [PULL 30/57] target/arm: Simplify DC_ZVA, Peter Maydell, 2020/06/26
- [PULL 31/57] target/arm: Implement the LDGM, STGM, STZGM instructions, Peter Maydell, 2020/06/26
- [PULL 32/57] target/arm: Implement the access tag cache flushes, Peter Maydell, 2020/06/26
- [PULL 33/57] target/arm: Move regime_el to internals.h, Peter Maydell, 2020/06/26
- [PULL 34/57] target/arm: Move regime_tcr to internals.h, Peter Maydell, 2020/06/26
- [PULL 35/57] target/arm: Add gen_mte_check1, Peter Maydell, 2020/06/26
- [PULL 37/57] target/arm: Implement helper_mte_check1, Peter Maydell, 2020/06/26
- [PULL 36/57] target/arm: Add gen_mte_checkN,
Peter Maydell <=
- [PULL 41/57] target/arm: Use mte_checkN for sve unpredicated stores, Peter Maydell, 2020/06/26
- [PULL 38/57] target/arm: Implement helper_mte_checkN, Peter Maydell, 2020/06/26
- [PULL 40/57] target/arm: Use mte_checkN for sve unpredicated loads, Peter Maydell, 2020/06/26
- [PULL 42/57] target/arm: Use mte_check1 for sve LD1R, Peter Maydell, 2020/06/26
- [PULL 43/57] target/arm: Tidy trans_LD1R_zpri, Peter Maydell, 2020/06/26
- [PULL 44/57] target/arm: Add arm_tlb_bti_gp, Peter Maydell, 2020/06/26
- [PULL 48/57] target/arm: Handle TBI for sve scalar + int memory ops, Peter Maydell, 2020/06/26
- [PULL 39/57] target/arm: Add helper_mte_check_zva, Peter Maydell, 2020/06/26
- [PULL 45/57] target/arm: Add mte helpers for sve scalar + int loads, Peter Maydell, 2020/06/26
- [PULL 51/57] target/arm: Implement data cache set allocation tags, Peter Maydell, 2020/06/26