[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 37/57] target/arm: Implement helper_mte_check1
From: |
Peter Maydell |
Subject: |
[PULL 37/57] target/arm: Implement helper_mte_check1 |
Date: |
Fri, 26 Jun 2020 16:14:04 +0100 |
From: Richard Henderson <richard.henderson@linaro.org>
Fill out the stub that was added earlier.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20200626033144.790098-26-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
target/arm/internals.h | 48 +++++++++++++++
target/arm/mte_helper.c | 132 +++++++++++++++++++++++++++++++++++++++-
2 files changed, 179 insertions(+), 1 deletion(-)
diff --git a/target/arm/internals.h b/target/arm/internals.h
index fb92ef6b840..807830cc400 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1318,6 +1318,10 @@ FIELD(MTEDESC, WRITE, 8, 1)
FIELD(MTEDESC, ESIZE, 9, 5)
FIELD(MTEDESC, TSIZE, 14, 10) /* mte_checkN only */
+bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr);
+uint64_t mte_check1(CPUARMState *env, uint32_t desc,
+ uint64_t ptr, uintptr_t ra);
+
static inline int allocation_tag_from_addr(uint64_t ptr)
{
return extract64(ptr, 56, 4);
@@ -1328,4 +1332,48 @@ static inline uint64_t
address_with_allocation_tag(uint64_t ptr, int rtag)
return deposit64(ptr, 56, 4, rtag);
}
+/* Return true if tbi bits mean that the access is checked. */
+static inline bool tbi_check(uint32_t desc, int bit55)
+{
+ return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
+}
+
+/* Return true if tcma bits mean that the access is unchecked. */
+static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
+{
+ /*
+ * We had extracted bit55 and ptr_tag for other reasons, so fold
+ * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
+ */
+ bool match = ((ptr_tag + bit55) & 0xf) == 0;
+ bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
+ return tcma && match;
+}
+
+/*
+ * For TBI, ideally, we would do nothing. Proper behaviour on fault is
+ * for the tag to be present in the FAR_ELx register. But for user-only
+ * mode, we do not have a TLB with which to implement this, so we must
+ * remove the top byte.
+ */
+static inline uint64_t useronly_clean_ptr(uint64_t ptr)
+{
+ /* TBI is known to be enabled. */
+#ifdef CONFIG_USER_ONLY
+ ptr = sextract64(ptr, 0, 56);
+#endif
+ return ptr;
+}
+
+static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
+{
+#ifdef CONFIG_USER_ONLY
+ int64_t clean_ptr = sextract64(ptr, 0, 56);
+ if (tbi_check(desc, clean_ptr < 0)) {
+ ptr = clean_ptr;
+ }
+#endif
+ return ptr;
+}
+
#endif
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
index 907a12b3664..c8a5e7c0edd 100644
--- a/target/arm/mte_helper.c
+++ b/target/arm/mte_helper.c
@@ -359,12 +359,142 @@ void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr,
uint64_t val)
}
}
+/* Record a tag check failure. */
+static void mte_check_fail(CPUARMState *env, int mmu_idx,
+ uint64_t dirty_ptr, uintptr_t ra)
+{
+ ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
+ int el, reg_el, tcf, select;
+ uint64_t sctlr;
+
+ reg_el = regime_el(env, arm_mmu_idx);
+ sctlr = env->cp15.sctlr_el[reg_el];
+
+ switch (arm_mmu_idx) {
+ case ARMMMUIdx_E10_0:
+ case ARMMMUIdx_E20_0:
+ el = 0;
+ tcf = extract64(sctlr, 38, 2);
+ break;
+ default:
+ el = reg_el;
+ tcf = extract64(sctlr, 40, 2);
+ }
+
+ switch (tcf) {
+ case 1:
+ /*
+ * Tag check fail causes a synchronous exception.
+ *
+ * In restore_state_to_opc, we set the exception syndrome
+ * for the load or store operation. Unwind first so we
+ * may overwrite that with the syndrome for the tag check.
+ */
+ cpu_restore_state(env_cpu(env), ra, true);
+ env->exception.vaddress = dirty_ptr;
+ raise_exception(env, EXCP_DATA_ABORT,
+ syn_data_abort_no_iss(el != 0, 0, 0, 0, 0, 0, 0x11),
+ exception_target_el(env));
+ /* noreturn, but fall through to the assert anyway */
+
+ case 0:
+ /*
+ * Tag check fail does not affect the PE.
+ * We eliminate this case by not setting MTE_ACTIVE
+ * in tb_flags, so that we never make this runtime call.
+ */
+ g_assert_not_reached();
+
+ case 2:
+ /* Tag check fail causes asynchronous flag set. */
+ mmu_idx = arm_mmu_idx_el(env, el);
+ if (regime_has_2_ranges(mmu_idx)) {
+ select = extract64(dirty_ptr, 55, 1);
+ } else {
+ select = 0;
+ }
+ env->cp15.tfsr_el[el] |= 1 << select;
+ break;
+
+ default:
+ /* Case 3: Reserved. */
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Tag check failure with SCTLR_EL%d.TCF%s "
+ "set to reserved value %d\n",
+ reg_el, el ? "" : "0", tcf);
+ break;
+ }
+}
+
/*
* Perform an MTE checked access for a single logical or atomic access.
*/
+static bool mte_probe1_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
+ uintptr_t ra, int bit55)
+{
+ int mem_tag, mmu_idx, ptr_tag, size;
+ MMUAccessType type;
+ uint8_t *mem;
+
+ ptr_tag = allocation_tag_from_addr(ptr);
+
+ if (tcma_check(desc, bit55, ptr_tag)) {
+ return true;
+ }
+
+ mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
+ type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
+ size = FIELD_EX32(desc, MTEDESC, ESIZE);
+
+ mem = allocation_tag_mem(env, mmu_idx, ptr, type, size,
+ MMU_DATA_LOAD, 1, ra);
+ if (!mem) {
+ return true;
+ }
+
+ mem_tag = load_tag1(ptr, mem);
+ return ptr_tag == mem_tag;
+}
+
+/*
+ * No-fault version of mte_check1, to be used by SVE for MemSingleNF.
+ * Returns false if the access is Checked and the check failed. This
+ * is only intended to probe the tag -- the validity of the page must
+ * be checked beforehand.
+ */
+bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr)
+{
+ int bit55 = extract64(ptr, 55, 1);
+
+ /* If TBI is disabled, the access is unchecked. */
+ if (unlikely(!tbi_check(desc, bit55))) {
+ return true;
+ }
+
+ return mte_probe1_int(env, desc, ptr, 0, bit55);
+}
+
+uint64_t mte_check1(CPUARMState *env, uint32_t desc,
+ uint64_t ptr, uintptr_t ra)
+{
+ int bit55 = extract64(ptr, 55, 1);
+
+ /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
+ if (unlikely(!tbi_check(desc, bit55))) {
+ return ptr;
+ }
+
+ if (unlikely(!mte_probe1_int(env, desc, ptr, ra, bit55))) {
+ int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
+ mte_check_fail(env, mmu_idx, ptr, ra);
+ }
+
+ return useronly_clean_ptr(ptr);
+}
+
uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
{
- return ptr;
+ return mte_check1(env, desc, ptr, GETPC());
}
/*
--
2.20.1
- [PULL 26/57] target/arm: Define arm_cpu_do_unaligned_access for user-only, (continued)
- [PULL 26/57] target/arm: Define arm_cpu_do_unaligned_access for user-only, Peter Maydell, 2020/06/26
- [PULL 28/57] target/arm: Implement the STGP instruction, Peter Maydell, 2020/06/26
- [PULL 29/57] target/arm: Restrict the values of DCZID.BS under TCG, Peter Maydell, 2020/06/26
- [PULL 27/57] target/arm: Implement LDG, STG, ST2G instructions, Peter Maydell, 2020/06/26
- [PULL 30/57] target/arm: Simplify DC_ZVA, Peter Maydell, 2020/06/26
- [PULL 31/57] target/arm: Implement the LDGM, STGM, STZGM instructions, Peter Maydell, 2020/06/26
- [PULL 32/57] target/arm: Implement the access tag cache flushes, Peter Maydell, 2020/06/26
- [PULL 33/57] target/arm: Move regime_el to internals.h, Peter Maydell, 2020/06/26
- [PULL 34/57] target/arm: Move regime_tcr to internals.h, Peter Maydell, 2020/06/26
- [PULL 35/57] target/arm: Add gen_mte_check1, Peter Maydell, 2020/06/26
- [PULL 37/57] target/arm: Implement helper_mte_check1,
Peter Maydell <=
- [PULL 36/57] target/arm: Add gen_mte_checkN, Peter Maydell, 2020/06/26
- [PULL 41/57] target/arm: Use mte_checkN for sve unpredicated stores, Peter Maydell, 2020/06/26
- [PULL 38/57] target/arm: Implement helper_mte_checkN, Peter Maydell, 2020/06/26
- [PULL 40/57] target/arm: Use mte_checkN for sve unpredicated loads, Peter Maydell, 2020/06/26
- [PULL 42/57] target/arm: Use mte_check1 for sve LD1R, Peter Maydell, 2020/06/26
- [PULL 43/57] target/arm: Tidy trans_LD1R_zpri, Peter Maydell, 2020/06/26
- [PULL 44/57] target/arm: Add arm_tlb_bti_gp, Peter Maydell, 2020/06/26
- [PULL 48/57] target/arm: Handle TBI for sve scalar + int memory ops, Peter Maydell, 2020/06/26
- [PULL 39/57] target/arm: Add helper_mte_check_zva, Peter Maydell, 2020/06/26
- [PULL 45/57] target/arm: Add mte helpers for sve scalar + int loads, Peter Maydell, 2020/06/26