[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 33/54] accel/tcg: Remove CPUTLBDesc.fulltlb
From: |
Richard Henderson |
Subject: |
[PATCH v2 33/54] accel/tcg: Remove CPUTLBDesc.fulltlb |
Date: |
Thu, 14 Nov 2024 08:01:09 -0800 |
This array is now write-only, and may be removed.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/hw/core/cpu.h | 1 -
accel/tcg/cputlb.c | 34 +++++++---------------------------
2 files changed, 7 insertions(+), 28 deletions(-)
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 4364ddb1db..5c069f2a00 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -219,7 +219,6 @@ typedef struct CPUTLBDesc {
/* maximum number of entries observed in the window */
size_t window_max_entries;
size_t n_used_entries;
- CPUTLBEntryFull *fulltlb;
/* All active tlb entries for this address space. */
IntervalTreeRoot iroot;
} CPUTLBDesc;
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 3282436752..7f63dc3fd8 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -149,13 +149,6 @@ static inline CPUTLBEntry *tlbfast_entry(CPUTLBDescFast
*fast, vaddr addr)
return fast->table + tlbfast_index(fast, addr);
}
-/* Find the TLB index corresponding to the mmu_idx + address pair. */
-static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx,
- vaddr addr)
-{
- return tlbfast_index(&cpu->neg.tlb.f[mmu_idx], addr);
-}
-
/* Find the TLB entry corresponding to the mmu_idx + address pair. */
static inline CPUTLBEntry *tlb_entry(CPUState *cpu, uintptr_t mmu_idx,
vaddr addr)
@@ -270,22 +263,20 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc,
CPUTLBDescFast *fast,
}
g_free(fast->table);
- g_free(desc->fulltlb);
tlb_window_reset(desc, now, 0);
/* desc->n_used_entries is cleared by the caller */
fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
fast->table = g_try_new(CPUTLBEntry, new_size);
- desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
/*
- * If the allocations fail, try smaller sizes. We just freed some
+ * If the allocation fails, try smaller sizes. We just freed some
* memory, so going back to half of new_size has a good chance of working.
* Increased memory pressure elsewhere in the system might cause the
* allocations to fail though, so we progressively reduce the allocation
* size, aborting if we cannot even allocate the smallest TLB we support.
*/
- while (fast->table == NULL || desc->fulltlb == NULL) {
+ while (fast->table == NULL) {
if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
error_report("%s: %s", __func__, strerror(errno));
abort();
@@ -294,9 +285,7 @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc,
CPUTLBDescFast *fast,
fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
g_free(fast->table);
- g_free(desc->fulltlb);
fast->table = g_try_new(CPUTLBEntry, new_size);
- desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
}
}
@@ -350,7 +339,6 @@ static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast
*fast, int64_t now)
desc->n_used_entries = 0;
fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
fast->table = g_new(CPUTLBEntry, n_entries);
- desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
memset(&desc->iroot, 0, sizeof(desc->iroot));
tlb_mmu_flush_locked(desc, fast);
}
@@ -372,15 +360,9 @@ void tlb_init(CPUState *cpu)
void tlb_destroy(CPUState *cpu)
{
- int i;
-
qemu_spin_destroy(&cpu->neg.tlb.c.lock);
- for (i = 0; i < NB_MMU_MODES; i++) {
- CPUTLBDesc *desc = &cpu->neg.tlb.d[i];
- CPUTLBDescFast *fast = &cpu->neg.tlb.f[i];
-
- g_free(fast->table);
- g_free(desc->fulltlb);
+ for (int i = 0; i < NB_MMU_MODES; i++) {
+ g_free(cpu->neg.tlb.f[i].table);
interval_tree_free_nodes(&cpu->neg.tlb.d[i].iroot,
offsetof(CPUTLBEntryTree, itree));
}
@@ -1061,7 +1043,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
CPUTLB *tlb = &cpu->neg.tlb;
CPUTLBDesc *desc = &tlb->d[mmu_idx];
MemoryRegionSection *section;
- unsigned int index, read_flags, write_flags;
+ unsigned int read_flags, write_flags;
uintptr_t addend;
CPUTLBEntry *te;
CPUTLBEntryTree *node;
@@ -1140,7 +1122,6 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
wp_flags = cpu_watchpoint_address_matches(cpu, addr_page,
TARGET_PAGE_SIZE);
- index = tlb_index(cpu, mmu_idx, addr_page);
te = tlb_entry(cpu, mmu_idx, addr_page);
/*
@@ -1179,8 +1160,8 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
* subtract here is that of the page base, and not the same as the
* vaddr we add back in io_prepare()/get_page_addr_code().
*/
- desc->fulltlb[index] = *full;
- full = &desc->fulltlb[index];
+ node->full = *full;
+ full = &node->full;
full->xlat_section = iotlb - addr_page;
full->phys_addr = paddr_page;
@@ -1203,7 +1184,6 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
tlb_set_compare(full, &node->copy, addr_page, write_flags,
MMU_DATA_STORE, prot & PAGE_WRITE);
- node->full = *full;
copy_tlb_helper_locked(te, &node->copy);
desc->n_used_entries++;
qemu_spin_unlock(&tlb->c.lock);
--
2.43.0
- [PATCH v2 14/54] accel/tcg: Process IntervalTree entries in tlb_set_dirty, (continued)
- [PATCH v2 14/54] accel/tcg: Process IntervalTree entries in tlb_set_dirty, Richard Henderson, 2024/11/14
- [PATCH v2 32/54] accel/tcg: Link CPUTLBEntry to CPUTLBEntryTree, Richard Henderson, 2024/11/14
- [PATCH v2 36/54] target/i386: Convert to TCGCPUOps.tlb_fill_align, Richard Henderson, 2024/11/14
- [PATCH v2 29/54] accel/tcg: Partially unify MMULookupPageData and TLBLookupOutput, Richard Henderson, 2024/11/14
- [PATCH v2 37/54] target/loongarch: Convert to TCGCPUOps.tlb_fill_align, Richard Henderson, 2024/11/14
- [PATCH v2 33/54] accel/tcg: Remove CPUTLBDesc.fulltlb,
Richard Henderson <=
- [PATCH v2 28/54] accel/tcg: Introduce tlb_lookup, Richard Henderson, 2024/11/14
- [PATCH v2 18/54] accel/tcg: Remove the victim tlb, Richard Henderson, 2024/11/14
- [PATCH v2 27/54] accel/tcg: Return CPUTLBEntryFull not pointer in probe_access_internal, Richard Henderson, 2024/11/14
- [PATCH v2 34/54] target/alpha: Convert to TCGCPUOps.tlb_fill_align, Richard Henderson, 2024/11/14
- [PATCH v2 17/54] accel/tcg: Replace victim_tlb_hit with tlbtree_hit, Richard Henderson, 2024/11/14