[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 26/54] accel/tcg: Return CPUTLBEntryFull not pointer in probe_
From: |
Richard Henderson |
Subject: |
[PATCH v2 26/54] accel/tcg: Return CPUTLBEntryFull not pointer in probe_access_full |
Date: |
Thu, 14 Nov 2024 08:01:02 -0800 |
Return a copy of the structure, not a pointer.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/exec/exec-all.h | 6 +-----
accel/tcg/cputlb.c | 8 +++++---
target/arm/tcg/helper-a64.c | 4 ++--
target/arm/tcg/mte_helper.c | 15 ++++++---------
target/arm/tcg/sve_helper.c | 6 +++---
5 files changed, 17 insertions(+), 22 deletions(-)
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index df7d0b5ad0..69bdb77584 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -365,10 +365,6 @@ int probe_access_flags(CPUArchState *env, vaddr addr, int
size,
* probe_access_full:
* Like probe_access_flags, except also return into @pfull.
*
- * The CPUTLBEntryFull structure returned via @pfull is transient
- * and must be consumed or copied immediately, before any further
- * access or changes to TLB @mmu_idx.
- *
* This function will not fault if @nonfault is set, but will
* return TLB_INVALID_MASK if the page is not mapped, or is not
* accessible with @access_type.
@@ -379,7 +375,7 @@ int probe_access_flags(CPUArchState *env, vaddr addr, int
size,
int probe_access_full(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost,
- CPUTLBEntryFull **pfull, uintptr_t retaddr);
+ CPUTLBEntryFull *pfull, uintptr_t retaddr);
/**
* probe_access_full_mmu:
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 81135524eb..84e7e633e3 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1420,20 +1420,22 @@ static int probe_access_internal(CPUState *cpu, vaddr
addr,
int probe_access_full(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
- bool nonfault, void **phost, CPUTLBEntryFull **pfull,
+ bool nonfault, void **phost, CPUTLBEntryFull *pfull,
uintptr_t retaddr)
{
+ CPUTLBEntryFull *full;
int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
- mmu_idx, nonfault, phost, pfull, retaddr,
+ mmu_idx, nonfault, phost, &full, retaddr,
true);
/* Handle clean RAM pages. */
if (unlikely(flags & TLB_NOTDIRTY)) {
int dirtysize = size == 0 ? 1 : size;
- notdirty_write(env_cpu(env), addr, dirtysize, *pfull, retaddr);
+ notdirty_write(env_cpu(env), addr, dirtysize, full, retaddr);
flags &= ~TLB_NOTDIRTY;
}
+ *pfull = *full;
return flags;
}
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
index 8f42a28d07..783864d6db 100644
--- a/target/arm/tcg/helper-a64.c
+++ b/target/arm/tcg/helper-a64.c
@@ -1883,14 +1883,14 @@ static bool is_guarded_page(CPUARMState *env,
target_ulong addr, uintptr_t ra)
#ifdef CONFIG_USER_ONLY
return page_get_flags(addr) & PAGE_BTI;
#else
- CPUTLBEntryFull *full;
+ CPUTLBEntryFull full;
void *host;
int mmu_idx = cpu_mmu_index(env_cpu(env), true);
int flags = probe_access_full(env, addr, 0, MMU_INST_FETCH, mmu_idx,
false, &host, &full, ra);
assert(!(flags & TLB_INVALID_MASK));
- return full->extra.arm.guarded;
+ return full.extra.arm.guarded;
#endif
}
diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
index 9d2ba287ee..870b2875af 100644
--- a/target/arm/tcg/mte_helper.c
+++ b/target/arm/tcg/mte_helper.c
@@ -83,8 +83,7 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int
ptr_mmu_idx,
TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
return tags + index;
#else
- CPUTLBEntryFull *full;
- MemTxAttrs attrs;
+ CPUTLBEntryFull full;
int in_page, flags;
hwaddr ptr_paddr, tag_paddr, xlat;
MemoryRegion *mr;
@@ -110,7 +109,7 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int
ptr_mmu_idx,
assert(!(flags & TLB_INVALID_MASK));
/* If the virtual page MemAttr != Tagged, access unchecked. */
- if (full->extra.arm.pte_attrs != 0xf0) {
+ if (full.extra.arm.pte_attrs != 0xf0) {
return NULL;
}
@@ -129,9 +128,7 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int
ptr_mmu_idx,
* Remember these values across the second lookup below,
* which may invalidate this pointer via tlb resize.
*/
- ptr_paddr = full->phys_addr | (ptr & ~TARGET_PAGE_MASK);
- attrs = full->attrs;
- full = NULL;
+ ptr_paddr = full.phys_addr | (ptr & ~TARGET_PAGE_MASK);
/*
* The Normal memory access can extend to the next page. E.g. a single
@@ -150,17 +147,17 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int
ptr_mmu_idx,
if (!probe && unlikely(flags & TLB_WATCHPOINT)) {
int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
assert(ra != 0);
- cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra);
+ cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, full.attrs, wp, ra);
}
/* Convert to the physical address in tag space. */
tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
/* Look up the address in tag space. */
- tag_asi = attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
+ tag_asi = full.attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
- tag_access == MMU_DATA_STORE, attrs);
+ tag_access == MMU_DATA_STORE, full.attrs);
/*
* Note that @mr will never be NULL. If there is nothing in the address
diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c
index f1ee0e060f..dad0d5e518 100644
--- a/target/arm/tcg/sve_helper.c
+++ b/target/arm/tcg/sve_helper.c
@@ -5357,7 +5357,7 @@ bool sve_probe_page(SVEHostPage *info, bool nofault,
CPUARMState *env,
flags = probe_access_flags(env, addr, 0, access_type, mmu_idx, nofault,
&info->host, retaddr);
#else
- CPUTLBEntryFull *full;
+ CPUTLBEntryFull full;
flags = probe_access_full(env, addr, 0, access_type, mmu_idx, nofault,
&info->host, &full, retaddr);
#endif
@@ -5373,8 +5373,8 @@ bool sve_probe_page(SVEHostPage *info, bool nofault,
CPUARMState *env,
/* Require both ANON and MTE; see allocation_tag_mem(). */
info->tagged = (flags & PAGE_ANON) && (flags & PAGE_MTE);
#else
- info->attrs = full->attrs;
- info->tagged = full->extra.arm.pte_attrs == 0xf0;
+ info->attrs = full.attrs;
+ info->tagged = full.extra.arm.pte_attrs == 0xf0;
#endif
/* Ensure that info->host[] is relative to addr, not addr + mem_off. */
--
2.43.0
- [PATCH v2 24/54] accel/tcg: Preserve tlb flags in tlb_set_compare, (continued)
- [PATCH v2 24/54] accel/tcg: Preserve tlb flags in tlb_set_compare, Richard Henderson, 2024/11/14
- [PATCH v2 25/54] accel/tcg: Return CPUTLBEntryFull not pointer in probe_access_full_mmu, Richard Henderson, 2024/11/14
- [PATCH v2 35/54] target/avr: Convert to TCGCPUOps.tlb_fill_align, Richard Henderson, 2024/11/14
- [PATCH v2 16/54] accel/tcg: Pass full addr to victim_tlb_hit, Richard Henderson, 2024/11/14
- [PATCH v2 23/54] accel/tcg: Check original prot bits for read in atomic_mmu_lookup, Richard Henderson, 2024/11/14
- [PATCH v2 26/54] accel/tcg: Return CPUTLBEntryFull not pointer in probe_access_full,
Richard Henderson <=
- [PATCH v2 21/54] accel/tcg: Delay plugin adjustment in probe_access_internal, Richard Henderson, 2024/11/14
- [PATCH v2 38/54] target/m68k: Convert to TCGCPUOps.tlb_fill_align, Richard Henderson, 2024/11/14
- [PATCH v2 39/54] target/m68k: Do not call tlb_set_page in helper_ptest, Richard Henderson, 2024/11/14
- [PATCH v2 19/54] accel/tcg: Remove tlb_n_used_entries_inc, Richard Henderson, 2024/11/14
- [PATCH v2 15/54] accel/tcg: Use tlb_hit_page in victim_tlb_hit, Richard Henderson, 2024/11/14