[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v6 12/18] target/ppc: Don't store VRMA SLBE persistently
From: |
Fabiano Rosas |
Subject: |
Re: [PATCH v6 12/18] target/ppc: Don't store VRMA SLBE persistently |
Date: |
Mon, 24 Feb 2020 21:25:07 -0300 |
David Gibson <address@hidden> writes:
> Currently, we construct the SLBE used for VRMA translations when the LPCR
> is written (which controls some bits in the SLBE), then use it later for
> translations.
>
> This is a bit complex and confusing - simplify it by simply constructing
> the SLBE directly from the LPCR when we need it.
>
> Signed-off-by: David Gibson <address@hidden>
Reviewed-by: Fabiano Rosas <address@hidden>
> ---
> target/ppc/cpu.h | 3 ---
> target/ppc/mmu-hash64.c | 28 ++++++----------------------
> 2 files changed, 6 insertions(+), 25 deletions(-)
>
> diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
> index f9871b1233..5a55fb02bd 100644
> --- a/target/ppc/cpu.h
> +++ b/target/ppc/cpu.h
> @@ -1044,9 +1044,6 @@ struct CPUPPCState {
> uint32_t flags;
> uint64_t insns_flags;
> uint64_t insns_flags2;
> -#if defined(TARGET_PPC64)
> - ppc_slb_t vrma_slb;
> -#endif
>
> int error_code;
> uint32_t pending_interrupts;
> diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
> index ac21c14f68..f8bf92aa2e 100644
> --- a/target/ppc/mmu-hash64.c
> +++ b/target/ppc/mmu-hash64.c
> @@ -825,6 +825,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr
> eaddr,
> {
> CPUState *cs = CPU(cpu);
> CPUPPCState *env = &cpu->env;
> + ppc_slb_t vrma_slbe;
> ppc_slb_t *slb;
> unsigned apshift;
> hwaddr ptex;
> @@ -863,8 +864,8 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr
> eaddr,
> }
> } else if (ppc_hash64_use_vrma(env)) {
> /* Emulated VRMA mode */
> - slb = &env->vrma_slb;
> - if (!slb->sps) {
> + slb = &vrma_slbe;
> + if (build_vrma_slbe(cpu, slb) != 0) {
> /* Invalid VRMA setup, machine check */
> cs->exception_index = POWERPC_EXCP_MCHECK;
> env->error_code = 0;
> @@ -1012,6 +1013,7 @@ skip_slb_search:
> hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
> {
> CPUPPCState *env = &cpu->env;
> + ppc_slb_t vrma_slbe;
> ppc_slb_t *slb;
> hwaddr ptex, raddr;
> ppc_hash_pte64_t pte;
> @@ -1033,8 +1035,8 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu,
> target_ulong addr)
> return raddr | env->spr[SPR_HRMOR];
> } else if (ppc_hash64_use_vrma(env)) {
> /* Emulated VRMA mode */
> - slb = &env->vrma_slb;
> - if (!slb->sps) {
> + slb = &vrma_slbe;
> + if (build_vrma_slbe(cpu, slb) != 0) {
> return -1;
> }
> } else {
> @@ -1072,30 +1074,12 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
> target_ulong ptex,
> cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
> }
>
> -static void ppc_hash64_update_vrma(PowerPCCPU *cpu)
> -{
> - CPUPPCState *env = &cpu->env;
> - ppc_slb_t *slb = &env->vrma_slb;
> -
> - /* Is VRMA enabled ? */
> - if (ppc_hash64_use_vrma(env)) {
> - if (build_vrma_slbe(cpu, slb) == 0) {
> - return;
> - }
> - }
> -
> - /* Otherwise, clear it to indicate error */
> - slb->esid = slb->vsid = 0;
> - slb->sps = NULL;
> -}
> -
> void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
> {
> PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
> CPUPPCState *env = &cpu->env;
>
> env->spr[SPR_LPCR] = val & pcc->lpcr_mask;
> - ppc_hash64_update_vrma(cpu);
> }
>
> void helper_store_lpcr(CPUPPCState *env, target_ulong val)
- Re: [PATCH v6 08/18] target/ppc: Use class fields to simplify LPCR masking, (continued)
- [PATCH v6 10/18] target/ppc: Correct RMLS table, David Gibson, 2020/02/24
- [PATCH v6 11/18] target/ppc: Only calculate RMLS derived RMA limit on demand, David Gibson, 2020/02/24
- [PATCH v6 13/18] spapr: Don't use weird units for MIN_RMA_SLOF, David Gibson, 2020/02/24
- [PATCH v6 12/18] target/ppc: Don't store VRMA SLBE persistently, David Gibson, 2020/02/24
- [PATCH v6 17/18] spapr: Clean up RMA size calculation, David Gibson, 2020/02/24
- [PATCH v6 14/18] spapr,ppc: Simplify signature of kvmppc_rma_size(), David Gibson, 2020/02/24
- [PATCH v6 09/18] target/ppc: Streamline calculation of RMA limit from LPCR[RMLS], David Gibson, 2020/02/24