[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 06/62] target/arm: Use PageEntryExtra for BTI
From: |
Richard Henderson |
Subject: |
[PATCH 06/62] target/arm: Use PageEntryExtra for BTI |
Date: |
Sun, 3 Jul 2022 13:53:23 +0530 |
Add a bit to ARMCacheAttrs to hold the guarded bit between
get_phys_addr_lpae and arm_cpu_tlb_fill, then put the bit
into PageEntryExtra.
In is_guarded_page, use probe_access_extra instead of just
guessing that the tlb entry is still present. Also handles
the FIXME about executing from device memory.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/arm/cpu.h | 13 -------------
target/arm/internals.h | 2 ++
target/arm/ptw.c | 4 ++--
target/arm/tlb_helper.c | 2 ++
target/arm/translate-a64.c | 22 ++++++++--------------
5 files changed, 14 insertions(+), 29 deletions(-)
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index a26b9437e9..4a41b5dcef 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -3357,19 +3357,6 @@ static inline uint64_t *aa64_vfp_qreg(CPUARMState *env,
unsigned regno)
/* Shared between translate-sve.c and sve_helper.c. */
extern const uint64_t pred_esz_masks[4];
-/* Helper for the macros below, validating the argument type. */
-static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x)
-{
- return x;
-}
-
-/*
- * Lvalue macros for ARM TLB bits that we must cache in the TCG TLB.
- * Using these should be a bit more self-documenting than using the
- * generic target bits directly.
- */
-#define arm_tlb_bti_gp(x) (typecheck_memtxattrs(x)->target_tlb_bit0)
-
/*
* AArch64 usage of the PAGE_TARGET_* bits for linux-user.
*/
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 2b38a83574..268c3c7380 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -77,6 +77,7 @@ FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1
prefix */
/* Bit definitions for PageEntryExtra */
FIELD(PAGEENTRYEXTRA, ATTRS, 0, 8)
FIELD(PAGEENTRYEXTRA, SHAREABILITY, 8, 2)
+FIELD(PAGEENTRYEXTRA, GUARDED, 10, 1)
FIELD(PAGEENTRYEXTRA, PA, 12, 52)
/* Minimum value which is a magic number for exception return */
@@ -1129,6 +1130,7 @@ typedef struct ARMCacheAttrs {
unsigned int attrs:8;
unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
bool is_s2_format:1;
+ bool guarded:1; /* guarded bit of the v8-64 PTE */
} ARMCacheAttrs;
bool get_phys_addr(CPUARMState *env, target_ulong address,
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index da478104f0..204c820026 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -1320,8 +1320,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t
address,
txattrs->secure = false;
}
/* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
- if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
- arm_tlb_bti_gp(txattrs) = true;
+ if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
+ cacheattrs->guarded = guarded;
}
if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
index 1305b6ec7d..7476fcafeb 100644
--- a/target/arm/tlb_helper.c
+++ b/target/arm/tlb_helper.c
@@ -244,6 +244,8 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
cacheattrs.attrs);
extra.x = FIELD_DP64(extra.x, PAGEENTRYEXTRA, SHAREABILITY,
cacheattrs.shareability);
+ extra.x = FIELD_DP64(extra.x, PAGEENTRYEXTRA, GUARDED,
+ cacheattrs.guarded);
}
tlb_set_page_with_extra(cs, address, phys_addr, attrs, extra,
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index c86b97b1d4..57f492ccef 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -14543,22 +14543,16 @@ static bool is_guarded_page(CPUARMState *env,
DisasContext *s)
#ifdef CONFIG_USER_ONLY
return page_get_flags(addr) & PAGE_BTI;
#else
+ MemTxAttrs attrs;
+ PageEntryExtra extra;
+ void *host;
int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
- unsigned int index = tlb_index(env, mmu_idx, addr);
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
+ int flags;
- /*
- * We test this immediately after reading an insn, which means
- * that any normal page must be in the TLB. The only exception
- * would be for executing from flash or device memory, which
- * does not retain the TLB entry.
- *
- * FIXME: Assume false for those, for now. We could use
- * arm_cpu_get_phys_page_attrs_debug to re-read the page
- * table entry even for that case.
- */
- return (tlb_hit(entry->addr_code, addr) &&
- arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].iotlb[index].attrs));
+ flags = probe_access_extra(env, addr, MMU_INST_FETCH, mmu_idx,
+ false, &host, &attrs, &extra, 0);
+ assert(!(flags & TLB_INVALID_MASK));
+ return FIELD_EX64(extra.x, PAGEENTRYEXTRA, GUARDED);
#endif
}
--
2.34.1
- Re: [PATCH 01/62] accel/tcg: Introduce PageEntryExtra, (continued)
- [PATCH 02/62] target/arm: Enable PageEntryExtra, Richard Henderson, 2022/07/03
- [PATCH 03/62] target/arm: Fix MTE check in sve_ldnfff1_r, Richard Henderson, 2022/07/03
- [PATCH 04/62] target/arm: Record tagged bit for user-only in sve_probe_page, Richard Henderson, 2022/07/03
- [PATCH 05/62] target/arm: Use PageEntryExtra for MTE, Richard Henderson, 2022/07/03
- [PATCH 06/62] target/arm: Use PageEntryExtra for BTI,
Richard Henderson <=
- [PATCH 07/62] include/exec: Remove target_tlb_bitN from MemTxAttrs, Richard Henderson, 2022/07/03
- [PATCH 09/62] target/arm: Fix ipa_secure in get_phys_addr, Richard Henderson, 2022/07/03
- [PATCH 08/62] target/arm: Create GetPhysAddrResult, Richard Henderson, 2022/07/03
- [PATCH 10/62] target/arm: Use GetPhysAddrResult in get_phys_addr_lpae, Richard Henderson, 2022/07/03
- [PATCH 11/62] target/arm: Use GetPhysAddrResult in get_phys_addr_v6, Richard Henderson, 2022/07/03
- [PATCH 12/62] target/arm: Use GetPhysAddrResult in get_phys_addr_v5, Richard Henderson, 2022/07/03
- [PATCH 13/62] target/arm: Use GetPhysAddrResult in get_phys_addr_pmsav5, Richard Henderson, 2022/07/03
- [PATCH 14/62] target/arm: Use GetPhysAddrResult in get_phys_addr_pmsav7, Richard Henderson, 2022/07/03