[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [RFC PATCH V6 10/18] tcg: switch on multithread.
From: |
fred . konrad |
Subject: |
[Qemu-devel] [RFC PATCH V6 10/18] tcg: switch on multithread. |
Date: |
Fri, 26 Jun 2015 16:47:25 +0200 |
From: KONRAD Frederic <address@hidden>
This switches on multithread.
Signed-off-by: KONRAD Frederic <address@hidden>
Changes V5 -> V6:
* make qemu_cpu_kick calling qemu_cpu_kick_thread in case of TCG.
---
cpus.c | 95 ++++++++++++++++++++++++------------------------------------------
1 file changed, 34 insertions(+), 61 deletions(-)
diff --git a/cpus.c b/cpus.c
index 0291620..08267ed 100644
--- a/cpus.c
+++ b/cpus.c
@@ -65,7 +65,6 @@
#endif /* CONFIG_LINUX */
-static CPUState *next_cpu;
int64_t max_delay;
int64_t max_advance;
@@ -820,8 +819,6 @@ static unsigned iothread_requesting_mutex;
static QemuThread io_thread;
-static QemuThread *tcg_cpu_thread;
-
/* cpu creation */
static QemuCond qemu_cpu_cond;
/* system init */
@@ -928,10 +925,13 @@ static void qemu_wait_io_event_common(CPUState *cpu)
static void qemu_tcg_wait_io_event(CPUState *cpu)
{
- while (all_cpu_threads_idle()) {
- /* Start accounting real time to the virtual clock if the CPUs
- are idle. */
- qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
+ while (cpu_thread_is_idle(cpu)) {
+ /* Start accounting real time to the virtual clock if the CPUs
+ * are idle.
+ */
+ if ((all_cpu_threads_idle()) && (cpu->cpu_index == 0)) {
+ qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
+ }
qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
}
@@ -939,9 +939,7 @@ static void qemu_tcg_wait_io_event(CPUState *cpu)
qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
}
- CPU_FOREACH(cpu) {
- qemu_wait_io_event_common(cpu);
- }
+ qemu_wait_io_event_common(cpu);
}
static void qemu_kvm_wait_io_event(CPUState *cpu)
@@ -1033,7 +1031,7 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
#endif
}
-static void tcg_exec_all(void);
+static void tcg_exec_all(CPUState *cpu);
static void *qemu_tcg_cpu_thread_fn(void *arg)
{
@@ -1044,37 +1042,26 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
qemu_thread_get_self(cpu->thread);
qemu_mutex_lock_iothread();
- CPU_FOREACH(cpu) {
- cpu->thread_id = qemu_get_thread_id();
- cpu->created = true;
- cpu->can_do_io = 1;
- }
- qemu_cond_signal(&qemu_cpu_cond);
-
- /* wait for initial kick-off after machine start */
- while (first_cpu->stopped) {
- qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
-
- /* process any pending work */
- CPU_FOREACH(cpu) {
- qemu_wait_io_event_common(cpu);
- }
- }
+ cpu->thread_id = qemu_get_thread_id();
+ cpu->created = true;
+ cpu->can_do_io = 1;
- /* process any pending work */
- exit_request = 1;
+ qemu_cond_signal(&qemu_cpu_cond);
while (1) {
- tcg_exec_all();
+ if (!cpu->stopped) {
+ tcg_exec_all(cpu);
- if (use_icount) {
- int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
+ if (use_icount) {
+ int64_t deadline =
+ qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
- if (deadline == 0) {
- qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
+ if (deadline == 0) {
+ qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
+ }
}
}
- qemu_tcg_wait_io_event(QTAILQ_FIRST(&cpus));
+ qemu_tcg_wait_io_event(cpu);
}
return NULL;
@@ -1122,7 +1109,7 @@ static void qemu_cpu_kick_thread(CPUState *cpu)
void qemu_cpu_kick(CPUState *cpu)
{
qemu_cond_broadcast(cpu->halt_cond);
- if (!tcg_enabled() && !cpu->thread_kicked) {
+ if (!cpu->thread_kicked) {
qemu_cpu_kick_thread(cpu);
cpu->thread_kicked = true;
}
@@ -1232,23 +1219,15 @@ static void qemu_tcg_init_vcpu(CPUState *cpu)
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
qemu_cond_init(cpu->halt_cond);
-
- /* share a single thread for all cpus with TCG */
- if (!tcg_cpu_thread) {
- cpu->thread = g_malloc0(sizeof(QemuThread));
- snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
- cpu->cpu_index);
- qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
- cpu, QEMU_THREAD_JOINABLE);
+ cpu->thread = g_malloc0(sizeof(QemuThread));
+ snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG", cpu->cpu_index);
+ qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn, cpu,
+ QEMU_THREAD_JOINABLE);
#ifdef _WIN32
- cpu->hThread = qemu_thread_get_handle(cpu->thread);
+ cpu->hThread = qemu_thread_get_handle(cpu->thread);
#endif
- while (!cpu->created) {
- qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
- }
- tcg_cpu_thread = cpu->thread;
- } else {
- cpu->thread = tcg_cpu_thread;
+ while (!cpu->created) {
+ qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
}
}
@@ -1393,21 +1372,15 @@ static int tcg_cpu_exec(CPUArchState *env)
return ret;
}
-static void tcg_exec_all(void)
+static void tcg_exec_all(CPUState *cpu)
{
int r;
+ CPUArchState *env = cpu->env_ptr;
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */
qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
- if (next_cpu == NULL) {
- next_cpu = first_cpu;
- }
- for (; next_cpu != NULL && !first_cpu->exit_request;
- next_cpu = CPU_NEXT(next_cpu)) {
- CPUState *cpu = next_cpu;
- CPUArchState *env = cpu->env_ptr;
-
+ while (!cpu->exit_request) {
qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
(cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
@@ -1422,7 +1395,7 @@ static void tcg_exec_all(void)
}
}
- first_cpu->exit_request = 0;
+ cpu->exit_request = 0;
}
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
--
1.9.0
- Re: [Qemu-devel] [RFC PATCH V6 06/18] tcg: remove tcg_halt_cond global variable., (continued)
- [Qemu-devel] [RFC PATCH V6 08/18] cpu: remove exit_request global., fred . konrad, 2015/06/26
- [Qemu-devel] [RFC PATCH V6 09/18] cpu: add a tcg_executing flag., fred . konrad, 2015/06/26
- [Qemu-devel] [RFC PATCH V6 10/18] tcg: switch on multithread.,
fred . konrad <=
- [Qemu-devel] [RFC PATCH V6 11/18] cpus: make qemu_cpu_kick_thread public., fred . konrad, 2015/06/26
- [Qemu-devel] [RFC PATCH V6 07/18] Drop global lock during TCG code execution, fred . konrad, 2015/06/26
[Qemu-devel] [RFC PATCH V6 12/18] Use atomic cmpxchg to atomically check the exclusive value in a STREX, fred . konrad, 2015/06/26
[Qemu-devel] [RFC PATCH V6 14/18] add a callback when tb_invalidate is called., fred . konrad, 2015/06/26