[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-ppc] [Qemu-devel] [PATCH qom-cpu v2 22/40] translate-all: Chan
From: |
Andreas Färber |
Subject: |
Re: [Qemu-ppc] [Qemu-devel] [PATCH qom-cpu v2 22/40] translate-all: Change cpu_restore_state() argument to CPUState |
Date: |
Wed, 12 Mar 2014 00:23:11 +0100 |
User-agent: |
Mozilla/5.0 (X11; Linux x86_64; rv:24.0) Gecko/20100101 Thunderbird/24.3.0 |
Hi Max,
Am 11.03.2014 16:02, schrieb Max Filippov:
> Hi Andreas,
>
> On Mon, Mar 10, 2014 at 4:15 AM, Andreas Färber <address@hidden> wrote:
>> diff --git a/target-xtensa/op_helper.c b/target-xtensa/op_helper.c
>> index 4265378..8641e5d 100644
>> --- a/target-xtensa/op_helper.c
>> +++ b/target-xtensa/op_helper.c
>> @@ -54,7 +54,7 @@ static void do_unaligned_access(CPUXtensaState *env,
>> {
>> if (xtensa_option_enabled(env->config,
>> XTENSA_OPTION_UNALIGNED_EXCEPTION) &&
>> !xtensa_option_enabled(env->config,
>> XTENSA_OPTION_HW_ALIGNMENT)) {
>> - cpu_restore_state(env, retaddr);
>> + cpu_restore_state(ENV_GET_CPU(env), retaddr);
>
> In other patches if this series you use CPU(xtensa_env_get_cpu(env)),
> why ENV_GET_CPU here?
Shouldn't be, thanks for catching! :) I cherry-picked these patches from
v1 branch but didn't fully re-review all of them. I found a few more
ENV_GET_CPU() in this particular patch, but no further ones.
ENV_GET_CPU() is supposed to go away (or at least be limited to cputlb
code) once all common code uses CPUState; plus the env-to-actual-cpu
code is just an offset whereas ENV_GET_CPU() adds a CPU() cast and is
therefore limited to where to the branch or singular user where is
really needed - at least before CPU address spaces added more usages
(those patches were applied without my placement review).
diff --git a/target-sparc/helper.c b/target-sparc/helper.c
index 8d1b72d..f3c7fbf 100644
--- a/target-sparc/helper.c
+++ b/target-sparc/helper.c
@@ -71,6 +71,7 @@ void helper_tick_set_limit(void *opaque, uint64_t limit)
static target_ulong helper_udiv_common(CPUSPARCState *env, target_ulong a,
target_ulong b, int cc)
{
+ SPARCCPU *cpu = sparc_env_get_cpu(env);
int overflow = 0;
uint64_t x0;
uint32_t x1;
@@ -79,7 +80,7 @@ static target_ulong helper_udiv_common(CPUSPARCState
*env, target_ulong a,
x1 = (b & 0xffffffff);
if (x1 == 0) {
- cpu_restore_state(ENV_GET_CPU(env), GETPC());
+ cpu_restore_state(CPU(cpu), GETPC());
helper_raise_exception(env, TT_DIV_ZERO);
}
@@ -110,6 +111,7 @@ target_ulong helper_udiv_cc(CPUSPARCState *env,
target_ulong a, target_ulong b)
static target_ulong helper_sdiv_common(CPUSPARCState *env, target_ulong a,
target_ulong b, int cc)
{
+ SPARCCPU *cpu = sparc_env_get_cpu(env);
int overflow = 0;
int64_t x0;
int32_t x1;
@@ -118,7 +120,7 @@ static target_ulong helper_sdiv_common(CPUSPARCState
*env, target_ulong a,
x1 = (b & 0xffffffff);
if (x1 == 0) {
- cpu_restore_state(ENV_GET_CPU(env), GETPC());
+ cpu_restore_state(CPU(cpu), GETPC());
helper_raise_exception(env, TT_DIV_ZERO);
}
@@ -151,7 +153,9 @@ int64_t helper_sdivx(CPUSPARCState *env, int64_t a,
int64_t b)
{
if (b == 0) {
/* Raise divide by zero trap. */
- cpu_restore_state(ENV_GET_CPU(env), GETPC());
+ SPARCCPU *cpu = sparc_env_get_cpu(env);
+
+ cpu_restore_state(CPU(cpu), GETPC());
helper_raise_exception(env, TT_DIV_ZERO);
} else if (b == -1) {
/* Avoid overflow trap with i386 divide insn. */
@@ -165,7 +169,9 @@ uint64_t helper_udivx(CPUSPARCState *env, uint64_t
a, uint64_t b)
{
if (b == 0) {
/* Raise divide by zero trap. */
- cpu_restore_state(ENV_GET_CPU(env), GETPC());
+ SPARCCPU *cpu = sparc_env_get_cpu(env);
+
+ cpu_restore_state(CPU(cpu), GETPC());
helper_raise_exception(env, TT_DIV_ZERO);
}
return a / b;
@@ -175,6 +181,7 @@ uint64_t helper_udivx(CPUSPARCState *env, uint64_t
a, uint64_t b)
target_ulong helper_taddcctv(CPUSPARCState *env, target_ulong src1,
target_ulong src2)
{
+ SPARCCPU *cpu = sparc_env_get_cpu(env);
target_ulong dst;
/* Tag overflow occurs if either input has bits 0 or 1 set. */
@@ -197,13 +204,14 @@ target_ulong helper_taddcctv(CPUSPARCState *env,
target_ulong src1,
return dst;
tag_overflow:
- cpu_restore_state(ENV_GET_CPU(env), GETPC());
+ cpu_restore_state(CPU(cpu), GETPC());
helper_raise_exception(env, TT_TOVF);
}
target_ulong helper_tsubcctv(CPUSPARCState *env, target_ulong src1,
target_ulong src2)
{
+ SPARCCPU *cpu = sparc_env_get_cpu(env);
target_ulong dst;
/* Tag overflow occurs if either input has bits 0 or 1 set. */
@@ -226,7 +234,7 @@ target_ulong helper_tsubcctv(CPUSPARCState *env,
target_ulong src1,
return dst;
tag_overflow:
- cpu_restore_state(ENV_GET_CPU(env), GETPC());
+ cpu_restore_state(CPU(cpu), GETPC());
helper_raise_exception(env, TT_TOVF);
}
diff --git a/target-sparc/ldst_helper.c b/target-sparc/ldst_helper.c
index 2359a62..fb1e2f7 100644
--- a/target-sparc/ldst_helper.c
+++ b/target-sparc/ldst_helper.c
@@ -2420,12 +2420,13 @@ static void QEMU_NORETURN
do_unaligned_access(CPUSPARCState *env,
target_ulong addr, int
is_write,
int is_user, uintptr_t
retaddr)
{
+ SPARCCPU *cpu = sparc_env_get_cpu(env);
#ifdef DEBUG_UNALIGNED
printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
"\n", addr, env->pc);
#endif
if (retaddr) {
- cpu_restore_state(ENV_GET_CPU(env), retaddr);
+ cpu_restore_state(CPU(cpu), retaddr);
}
helper_raise_exception(env, TT_UNALIGNED);
}
diff --git a/target-xtensa/op_helper.c b/target-xtensa/op_helper.c
index 8641e5d..8233443 100644
--- a/target-xtensa/op_helper.c
+++ b/target-xtensa/op_helper.c
@@ -52,9 +52,11 @@ static void do_unaligned_access(CPUXtensaState *env,
static void do_unaligned_access(CPUXtensaState *env,
target_ulong addr, int is_write, int is_user, uintptr_t retaddr)
{
+ XtensaCPU *cpu = xtensa_env_get_cpu(env);
+
if (xtensa_option_enabled(env->config,
XTENSA_OPTION_UNALIGNED_EXCEPTION) &&
!xtensa_option_enabled(env->config,
XTENSA_OPTION_HW_ALIGNMENT)) {
- cpu_restore_state(ENV_GET_CPU(env), retaddr);
+ cpu_restore_state(CPU(cpu), retaddr);
HELPER(exception_cause_vaddr)(env,
env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
}
Regards,
Andreas
--
SUSE LINUX Products GmbH, Maxfeldstr. 5, 90409 Nürnberg, Germany
GF: Jeff Hawn, Jennifer Guild, Felix Imendörffer; HRB 16746 AG Nürnberg