[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 39/57] target/arm: Add helper_mte_check_zva
From: |
Peter Maydell |
Subject: |
[PULL 39/57] target/arm: Add helper_mte_check_zva |
Date: |
Fri, 26 Jun 2020 16:14:06 +0100 |
From: Richard Henderson <richard.henderson@linaro.org>
Use a special helper for DC_ZVA, rather than the more
general mte_checkN.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20200626033144.790098-28-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
target/arm/helper-a64.h | 1 +
target/arm/mte_helper.c | 106 +++++++++++++++++++++++++++++++++++++
target/arm/translate-a64.c | 16 +++++-
3 files changed, 122 insertions(+), 1 deletion(-)
diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
index 005af678c77..5b0b699a50a 100644
--- a/target/arm/helper-a64.h
+++ b/target/arm/helper-a64.h
@@ -106,6 +106,7 @@ DEF_HELPER_FLAGS_2(xpacd, TCG_CALL_NO_RWG_SE, i64, env, i64)
DEF_HELPER_FLAGS_3(mte_check1, TCG_CALL_NO_WG, i64, env, i32, i64)
DEF_HELPER_FLAGS_3(mte_checkN, TCG_CALL_NO_WG, i64, env, i32, i64)
+DEF_HELPER_FLAGS_3(mte_check_zva, TCG_CALL_NO_WG, i64, env, i32, i64)
DEF_HELPER_FLAGS_3(irg, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_4(addsubg, TCG_CALL_NO_RWG_SE, i64, env, i64, s32, i32)
DEF_HELPER_FLAGS_3(ldg, TCG_CALL_NO_WG, i64, env, i64, i64)
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
index abe6af6b795..4f9bd3add3d 100644
--- a/target/arm/mte_helper.c
+++ b/target/arm/mte_helper.c
@@ -667,3 +667,109 @@ uint64_t HELPER(mte_checkN)(CPUARMState *env, uint32_t
desc, uint64_t ptr)
{
return mte_checkN(env, desc, ptr, GETPC());
}
+
+/*
+ * Perform an MTE checked access for DC_ZVA.
+ */
+uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr)
+{
+ uintptr_t ra = GETPC();
+ int log2_dcz_bytes, log2_tag_bytes;
+ int mmu_idx, bit55;
+ intptr_t dcz_bytes, tag_bytes, i;
+ void *mem;
+ uint64_t ptr_tag, mem_tag, align_ptr;
+
+ bit55 = extract64(ptr, 55, 1);
+
+ /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
+ if (unlikely(!tbi_check(desc, bit55))) {
+ return ptr;
+ }
+
+ ptr_tag = allocation_tag_from_addr(ptr);
+
+ if (tcma_check(desc, bit55, ptr_tag)) {
+ goto done;
+ }
+
+ /*
+ * In arm_cpu_realizefn, we asserted that dcz > LOG2_TAG_GRANULE+1,
+ * i.e. 32 bytes, which is an unreasonably small dcz anyway, to make
+ * sure that we can access one complete tag byte here.
+ */
+ log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
+ log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
+ dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
+ tag_bytes = (intptr_t)1 << log2_tag_bytes;
+ align_ptr = ptr & -dcz_bytes;
+
+ /*
+ * Trap if accessing an invalid page. DC_ZVA requires that we supply
+ * the original pointer for an invalid page. But watchpoints require
+ * that we probe the actual space. So do both.
+ */
+ mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
+ (void) probe_write(env, ptr, 1, mmu_idx, ra);
+ mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE,
+ dcz_bytes, MMU_DATA_LOAD, tag_bytes, ra);
+ if (!mem) {
+ goto done;
+ }
+
+ /*
+ * Unlike the reasoning for checkN, DC_ZVA is always aligned, and thus
+ * it is quite easy to perform all of the comparisons at once without
+ * any extra masking.
+ *
+ * The most common zva block size is 64; some of the thunderx cpus use
+ * a block size of 128. For user-only, aarch64_max_initfn will set the
+ * block size to 512. Fill out the other cases for future-proofing.
+ *
+ * In order to be able to find the first miscompare later, we want the
+ * tag bytes to be in little-endian order.
+ */
+ switch (log2_tag_bytes) {
+ case 0: /* zva_blocksize 32 */
+ mem_tag = *(uint8_t *)mem;
+ ptr_tag *= 0x11u;
+ break;
+ case 1: /* zva_blocksize 64 */
+ mem_tag = cpu_to_le16(*(uint16_t *)mem);
+ ptr_tag *= 0x1111u;
+ break;
+ case 2: /* zva_blocksize 128 */
+ mem_tag = cpu_to_le32(*(uint32_t *)mem);
+ ptr_tag *= 0x11111111u;
+ break;
+ case 3: /* zva_blocksize 256 */
+ mem_tag = cpu_to_le64(*(uint64_t *)mem);
+ ptr_tag *= 0x1111111111111111ull;
+ break;
+
+ default: /* zva_blocksize 512, 1024, 2048 */
+ ptr_tag *= 0x1111111111111111ull;
+ i = 0;
+ do {
+ mem_tag = cpu_to_le64(*(uint64_t *)(mem + i));
+ if (unlikely(mem_tag != ptr_tag)) {
+ goto fail;
+ }
+ i += 8;
+ align_ptr += 16 * TAG_GRANULE;
+ } while (i < tag_bytes);
+ goto done;
+ }
+
+ if (likely(mem_tag == ptr_tag)) {
+ goto done;
+ }
+
+ fail:
+ /* Locate the first nibble that differs. */
+ i = ctz64(mem_tag ^ ptr_tag) >> 4;
+ mte_check_fail(env, mmu_idx, align_ptr + i * TAG_GRANULE, ra);
+
+ done:
+ return useronly_clean_ptr(ptr);
+}
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 52be0400d75..a2a82800102 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -1857,7 +1857,21 @@ static void handle_sys(DisasContext *s, uint32_t insn,
bool isread,
return;
case ARM_CP_DC_ZVA:
/* Writes clear the aligned block of memory which rt points into. */
- tcg_rt = clean_data_tbi(s, cpu_reg(s, rt));
+ if (s->mte_active[0]) {
+ TCGv_i32 t_desc;
+ int desc = 0;
+
+ desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
+ desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
+ desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
+ t_desc = tcg_const_i32(desc);
+
+ tcg_rt = new_tmp_a64(s);
+ gen_helper_mte_check_zva(tcg_rt, cpu_env, t_desc, cpu_reg(s, rt));
+ tcg_temp_free_i32(t_desc);
+ } else {
+ tcg_rt = clean_data_tbi(s, cpu_reg(s, rt));
+ }
gen_helper_dc_zva(cpu_env, tcg_rt);
return;
default:
--
2.20.1
- [PULL 35/57] target/arm: Add gen_mte_check1, (continued)
- [PULL 35/57] target/arm: Add gen_mte_check1, Peter Maydell, 2020/06/26
- [PULL 37/57] target/arm: Implement helper_mte_check1, Peter Maydell, 2020/06/26
- [PULL 36/57] target/arm: Add gen_mte_checkN, Peter Maydell, 2020/06/26
- [PULL 41/57] target/arm: Use mte_checkN for sve unpredicated stores, Peter Maydell, 2020/06/26
- [PULL 38/57] target/arm: Implement helper_mte_checkN, Peter Maydell, 2020/06/26
- [PULL 40/57] target/arm: Use mte_checkN for sve unpredicated loads, Peter Maydell, 2020/06/26
- [PULL 42/57] target/arm: Use mte_check1 for sve LD1R, Peter Maydell, 2020/06/26
- [PULL 43/57] target/arm: Tidy trans_LD1R_zpri, Peter Maydell, 2020/06/26
- [PULL 44/57] target/arm: Add arm_tlb_bti_gp, Peter Maydell, 2020/06/26
- [PULL 48/57] target/arm: Handle TBI for sve scalar + int memory ops, Peter Maydell, 2020/06/26
- [PULL 39/57] target/arm: Add helper_mte_check_zva,
Peter Maydell <=
- [PULL 45/57] target/arm: Add mte helpers for sve scalar + int loads, Peter Maydell, 2020/06/26
- [PULL 51/57] target/arm: Implement data cache set allocation tags, Peter Maydell, 2020/06/26
- [PULL 50/57] target/arm: Complete TBI clearing for user-only for SVE, Peter Maydell, 2020/06/26
- [PULL 46/57] target/arm: Add mte helpers for sve scalar + int stores, Peter Maydell, 2020/06/26
- [PULL 47/57] target/arm: Add mte helpers for sve scalar + int ff/nf loads, Peter Maydell, 2020/06/26
- [PULL 52/57] target/arm: Set PSTATE.TCO on exception entry, Peter Maydell, 2020/06/26
- [PULL 49/57] target/arm: Add mte helpers for sve scatter/gather memory ops, Peter Maydell, 2020/06/26
- [PULL 53/57] target/arm: Always pass cacheattr to get_phys_addr, Peter Maydell, 2020/06/26
- [PULL 54/57] target/arm: Cache the Tagged bit for a page in MemTxAttrs, Peter Maydell, 2020/06/26