[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[RFC PATCH 4/7] accel/tcg: Use CPU_FOREACH_TCG()
From: |
Philippe Mathieu-Daudé |
Subject: |
[RFC PATCH 4/7] accel/tcg: Use CPU_FOREACH_TCG() |
Date: |
Mon, 6 Jan 2025 21:02:55 +0100 |
Only iterate over TCG vCPUs when running TCG specific code.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
accel/tcg/cputlb.c | 7 ++++---
accel/tcg/monitor.c | 3 ++-
accel/tcg/tb-maint.c | 7 ++++---
accel/tcg/tcg-accel-ops-rr.c | 10 +++++-----
accel/tcg/tcg-accel-ops.c | 8 ++++----
5 files changed, 19 insertions(+), 16 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index b4ccf0cdcb7..06f34df808b 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -48,6 +48,7 @@
#endif
#include "tcg/tcg-ldst.h"
#include "tcg/oversized-guest.h"
+#include "tcg-accel-ops.h"
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
/* #define DEBUG_TLB */
@@ -368,7 +369,7 @@ static void flush_all_helper(CPUState *src, run_on_cpu_func
fn,
{
CPUState *cpu;
- CPU_FOREACH(cpu) {
+ CPU_FOREACH_TCG(cpu) {
if (cpu != src) {
async_run_on_cpu(cpu, fn, d);
}
@@ -646,7 +647,7 @@ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState
*src_cpu,
TLBFlushPageByMMUIdxData *d;
/* Allocate a separate data block for each destination cpu. */
- CPU_FOREACH(dst_cpu) {
+ CPU_FOREACH_TCG(dst_cpu) {
if (dst_cpu != src_cpu) {
d = g_new(TLBFlushPageByMMUIdxData, 1);
d->addr = addr;
@@ -839,7 +840,7 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState
*src_cpu,
d.bits = bits;
/* Allocate a separate data block for each destination cpu. */
- CPU_FOREACH(dst_cpu) {
+ CPU_FOREACH_TCG(dst_cpu) {
if (dst_cpu != src_cpu) {
p = g_memdup(&d, sizeof(d));
async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
diff --git a/accel/tcg/monitor.c b/accel/tcg/monitor.c
index ae1dbeb79f8..98bd937ae20 100644
--- a/accel/tcg/monitor.c
+++ b/accel/tcg/monitor.c
@@ -19,6 +19,7 @@
#include "tcg/tcg.h"
#include "internal-common.h"
#include "tb-context.h"
+#include "tcg-accel-ops.h"
static void dump_drift_info(GString *buf)
@@ -131,7 +132,7 @@ static void tlb_flush_counts(size_t *pfull, size_t *ppart,
size_t *pelide)
CPUState *cpu;
size_t full = 0, part = 0, elide = 0;
- CPU_FOREACH(cpu) {
+ CPU_FOREACH_TCG(cpu) {
full += qatomic_read(&cpu->neg.tlb.c.full_flush_count);
part += qatomic_read(&cpu->neg.tlb.c.part_flush_count);
elide += qatomic_read(&cpu->neg.tlb.c.elide_flush_count);
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
index 3f1bebf6ab5..8598c59654f 100644
--- a/accel/tcg/tb-maint.c
+++ b/accel/tcg/tb-maint.c
@@ -36,6 +36,7 @@
#ifdef CONFIG_USER_ONLY
#include "user/page-protection.h"
#endif
+#include "tcg-accel-ops.h"
/* List iterators for lists of tagged pointers in TranslationBlock. */
@@ -771,7 +772,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data
tb_flush_count)
}
did_flush = true;
- CPU_FOREACH(cpu) {
+ CPU_FOREACH_TCG(cpu) {
tcg_flush_jmp_cache(cpu);
}
@@ -885,13 +886,13 @@ static void tb_jmp_cache_inval_tb(TranslationBlock *tb)
if (tb_cflags(tb) & CF_PCREL) {
/* A TB may be at any virtual address */
- CPU_FOREACH(cpu) {
+ CPU_FOREACH_TCG(cpu) {
tcg_flush_jmp_cache(cpu);
}
} else {
uint32_t h = tb_jmp_cache_hash_func(tb->pc);
- CPU_FOREACH(cpu) {
+ CPU_FOREACH_TCG(cpu) {
CPUJumpCache *jc = cpu->tb_jmp_cache;
if (qatomic_read(&jc->array[h].tb) == tb) {
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
index 028b385af9a..e5ce285efb9 100644
--- a/accel/tcg/tcg-accel-ops-rr.c
+++ b/accel/tcg/tcg-accel-ops-rr.c
@@ -42,7 +42,7 @@ void rr_kick_vcpu_thread(CPUState *unused)
{
CPUState *cpu;
- CPU_FOREACH(cpu) {
+ CPU_FOREACH_TCG(cpu) {
cpu_exit(cpu);
};
}
@@ -116,7 +116,7 @@ static void rr_wait_io_event(void)
rr_start_kick_timer();
- CPU_FOREACH(cpu) {
+ CPU_FOREACH_TCG(cpu) {
qemu_wait_io_event_common(cpu);
}
}
@@ -129,7 +129,7 @@ static void rr_deal_with_unplugged_cpus(void)
{
CPUState *cpu;
- CPU_FOREACH(cpu) {
+ CPU_FOREACH_TCG(cpu) {
if (cpu->unplug && !cpu_can_run(cpu)) {
tcg_cpu_destroy(cpu);
break;
@@ -160,7 +160,7 @@ static int rr_cpu_count(void)
if (cpu_list_generation_id_get() != last_gen_id) {
cpu_count = 0;
- CPU_FOREACH(cpu) {
+ CPU_FOREACH_TCG(cpu) {
++cpu_count;
}
last_gen_id = cpu_list_generation_id_get();
@@ -201,7 +201,7 @@ static void *rr_cpu_thread_fn(void *arg)
qemu_cond_wait_bql(first_cpu->halt_cond);
/* process any pending work */
- CPU_FOREACH(cpu) {
+ CPU_FOREACH_TCG(cpu) {
current_cpu = cpu;
qemu_wait_io_event_common(cpu);
}
diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c
index 1fb077f7b38..371bbaa0307 100644
--- a/accel/tcg/tcg-accel-ops.c
+++ b/accel/tcg/tcg-accel-ops.c
@@ -144,7 +144,7 @@ static int tcg_insert_breakpoint(CPUState *cs, int type,
vaddr addr, vaddr len)
switch (type) {
case GDB_BREAKPOINT_SW:
case GDB_BREAKPOINT_HW:
- CPU_FOREACH(cpu) {
+ CPU_FOREACH_TCG(cpu) {
err = cpu_breakpoint_insert(cpu, addr, BP_GDB, NULL);
if (err) {
break;
@@ -154,7 +154,7 @@ static int tcg_insert_breakpoint(CPUState *cs, int type,
vaddr addr, vaddr len)
case GDB_WATCHPOINT_WRITE:
case GDB_WATCHPOINT_READ:
case GDB_WATCHPOINT_ACCESS:
- CPU_FOREACH(cpu) {
+ CPU_FOREACH_TCG(cpu) {
err = cpu_watchpoint_insert(cpu, addr, len,
xlat_gdb_type(cpu, type), NULL);
if (err) {
@@ -175,7 +175,7 @@ static int tcg_remove_breakpoint(CPUState *cs, int type,
vaddr addr, vaddr len)
switch (type) {
case GDB_BREAKPOINT_SW:
case GDB_BREAKPOINT_HW:
- CPU_FOREACH(cpu) {
+ CPU_FOREACH_TCG(cpu) {
err = cpu_breakpoint_remove(cpu, addr, BP_GDB);
if (err) {
break;
@@ -185,7 +185,7 @@ static int tcg_remove_breakpoint(CPUState *cs, int type,
vaddr addr, vaddr len)
case GDB_WATCHPOINT_WRITE:
case GDB_WATCHPOINT_READ:
case GDB_WATCHPOINT_ACCESS:
- CPU_FOREACH(cpu) {
+ CPU_FOREACH_TCG(cpu) {
err = cpu_watchpoint_remove(cpu, addr, len,
xlat_gdb_type(cpu, type));
if (err) {
--
2.47.1
- [RFC PATCH 0/7] accel: Add per-accelerator vCPUs queue, Philippe Mathieu-Daudé, 2025/01/06
- [RFC PATCH 1/7] cpus: Restrict CPU_FOREACH_SAFE() to user emulation, Philippe Mathieu-Daudé, 2025/01/06
- [RFC PATCH 2/7] cpus: Introduce AccelOpsClass::get_cpus_queue(), Philippe Mathieu-Daudé, 2025/01/06
- [RFC PATCH 3/7] accel/tcg: Implement tcg_get_cpus_queue(), Philippe Mathieu-Daudé, 2025/01/06
- [RFC PATCH 4/7] accel/tcg: Use CPU_FOREACH_TCG(),
Philippe Mathieu-Daudé <=
- [RFC PATCH 5/7] accel/hw: Implement hw_accel_get_cpus_queue(), Philippe Mathieu-Daudé, 2025/01/06
- [RFC PATCH 6/7] accel/hvf: Use CPU_FOREACH_HVF(), Philippe Mathieu-Daudé, 2025/01/06
- [RFC PATCH 7/7] accel/kvm: Use CPU_FOREACH_KVM(), Philippe Mathieu-Daudé, 2025/01/06