[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH v2 1/4] target/ppc: Introduce ppc_radix64_xlate() for Radix t
From: |
Cédric Le Goater |
Subject: |
Re: [PATCH v2 1/4] target/ppc: Introduce ppc_radix64_xlate() for Radix tree translation |
Date: |
Thu, 2 Apr 2020 08:40:28 +0200 |
User-agent: |
Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Thunderbird/68.5.0 |
On 4/2/20 3:59 AM, David Gibson wrote:
> On Wed, Apr 01, 2020 at 06:28:07PM +0200, Cédric Le Goater wrote:
>> This is moving code under a new ppc_radix64_xlate() routine shared by
>> the MMU Radix page fault handler and the 'get_phys_page_debug' PPC
>> callback. The difference being that 'get_phys_page_debug' does not
>> generate exceptions.
>>
>> The specific part of process-scoped Radix translation is moved under
>> ppc_radix64_process_scoped_xlate() in preparation of the future support
>> for partition-scoped Radix translation. Routines raising the exceptions
>> now take a 'cause_excp' bool to cover the 'get_phys_page_debug' case.
>>
>> It should be functionally equivalent.
>>
>> Signed-off-by: Suraj Jitindar Singh <address@hidden>
>> Signed-off-by: Cédric Le Goater <address@hidden>
>> ---
>> target/ppc/mmu-radix64.c | 223 ++++++++++++++++++++++-----------------
>> 1 file changed, 125 insertions(+), 98 deletions(-)
>>
>> diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
>> index d2422d1c54c9..410376fbeb65 100644
>> --- a/target/ppc/mmu-radix64.c
>> +++ b/target/ppc/mmu-radix64.c
>> @@ -69,11 +69,16 @@ static bool
>> ppc_radix64_get_fully_qualified_addr(CPUPPCState *env, vaddr eaddr,
>> return true;
>> }
>>
>> -static void ppc_radix64_raise_segi(PowerPCCPU *cpu, int rwx, vaddr eaddr)
>> +static void ppc_radix64_raise_segi(PowerPCCPU *cpu, int rwx, vaddr eaddr,
>> + bool cause_excp)
>> {
>> CPUState *cs = CPU(cpu);
>> CPUPPCState *env = &cpu->env;
>>
>> + if (!cause_excp) {
>> + return;
>> + }
>
> Hrm... adding a parameter which makes this function a no-op seems an
> odd choice, rather than putting an if in the caller.
because it removes all the 'if' in the callers which I find a good
reason.
Would you rather have a version with 'if' ?
C.
>
>> +
>> if (rwx == 2) { /* Instruction Segment Interrupt */
>> cs->exception_index = POWERPC_EXCP_ISEG;
>> } else { /* Data Segment Interrupt */
>> @@ -84,11 +89,15 @@ static void ppc_radix64_raise_segi(PowerPCCPU *cpu, int
>> rwx, vaddr eaddr)
>> }
>>
>> static void ppc_radix64_raise_si(PowerPCCPU *cpu, int rwx, vaddr eaddr,
>> - uint32_t cause)
>> + uint32_t cause, bool cause_excp)
>> {
>> CPUState *cs = CPU(cpu);
>> CPUPPCState *env = &cpu->env;
>>
>> + if (!cause_excp) {
>> + return;
>> + }
>> +
>> if (rwx == 2) { /* Instruction Storage Interrupt */
>> cs->exception_index = POWERPC_EXCP_ISI;
>> env->error_code = cause;
>> @@ -219,17 +228,118 @@ static bool validate_pate(PowerPCCPU *cpu, uint64_t
>> lpid, ppc_v3_pate_t *pate)
>> return true;
>> }
>>
>> +static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx,
>> + vaddr eaddr, uint64_t pid,
>> + ppc_v3_pate_t pate, hwaddr
>> *g_raddr,
>> + int *g_prot, int *g_page_size,
>> + bool cause_excp)
>> +{
>> + CPUState *cs = CPU(cpu);
>> + uint64_t offset, size, prtbe_addr, prtbe0, pte;
>> + int fault_cause = 0;
>> + hwaddr pte_addr;
>> +
>> + /* Index Process Table by PID to Find Corresponding Process Table Entry
>> */
>> + offset = pid * sizeof(struct prtb_entry);
>> + size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
>> + if (offset >= size) {
>> + /* offset exceeds size of the process table */
>> + ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE, cause_excp);
>> + return 1;
>> + }
>> + prtbe_addr = (pate.dw1 & PATE1_R_PRTB) + offset;
>> + prtbe0 = ldq_phys(cs->as, prtbe_addr);
>> +
>> + /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
>> + *g_page_size = PRTBE_R_GET_RTS(prtbe0);
>> + pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK,
>> + prtbe0 & PRTBE_R_RPDB, prtbe0 &
>> PRTBE_R_RPDS,
>> + g_raddr, g_page_size, &fault_cause,
>> &pte_addr);
>> +
>> + if (!(pte & R_PTE_VALID) ||
>> + ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, g_prot)) {
>> + /* No valid pte or access denied due to protection */
>> + ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause, cause_excp);
>> + return 1;
>> + }
>> +
>> + ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, g_prot);
>> +
>> + return 0;
>> +}
>> +
>> +static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, int rwx,
>> + bool relocation,
>> + hwaddr *raddr, int *psizep, int *protp,
>> + bool cause_excp)
>> +{
>> + uint64_t lpid = 0, pid = 0;
>> + ppc_v3_pate_t pate;
>> + int psize, prot;
>> + hwaddr g_raddr;
>> +
>> + /* Virtual Mode Access - get the fully qualified address */
>> + if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid,
>> &pid)) {
>> + ppc_radix64_raise_segi(cpu, rwx, eaddr, cause_excp);
>> + return 1;
>> + }
>> +
>> + /* Get Process Table */
>> + if (cpu->vhyp) {
>> + PPCVirtualHypervisorClass *vhc;
>> + vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
>> + vhc->get_pate(cpu->vhyp, &pate);
>> + } else {
>> + if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
>> + ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE, cause_excp);
>> + return 1;
>> + }
>> + if (!validate_pate(cpu, lpid, &pate)) {
>> + ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_R_BADCONFIG,
>> + cause_excp);
>> + return 1;
>> + }
>> + /* We don't support guest mode yet */
>> + if (lpid != 0) {
>> + error_report("PowerNV guest support Unimplemented");
>> + exit(1);
>> + }
>> + }
>> +
>> + *psizep = INT_MAX;
>> + *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
>> +
>> + /*
>> + * Perform process-scoped translation if relocation enabled.
>> + *
>> + * - Translates an effective address to a host real address in
>> + * quadrants 0 and 3 when HV=1.
>> + */
>> + if (relocation) {
>> + int ret = ppc_radix64_process_scoped_xlate(cpu, rwx, eaddr, pid,
>> + pate, &g_raddr, &prot,
>> + &psize, cause_excp);
>> + if (ret) {
>> + return ret;
>> + }
>> + *psizep = MIN(*psizep, psize);
>> + *protp &= prot;
>> + } else {
>> + g_raddr = eaddr & R_EADDR_MASK;
>> + }
>> +
>> + *raddr = g_raddr;
>> + return 0;
>> +}
>> +
>> int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
>> int mmu_idx)
>> {
>> CPUState *cs = CPU(cpu);
>> CPUPPCState *env = &cpu->env;
>> - PPCVirtualHypervisorClass *vhc;
>> - hwaddr raddr, pte_addr;
>> - uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte;
>> - int page_size, prot, fault_cause = 0;
>> - ppc_v3_pate_t pate;
>> + int page_size, prot;
>> bool relocation;
>> + hwaddr raddr;
>>
>> assert(!(msr_hv && cpu->vhyp));
>> assert((rwx == 0) || (rwx == 1) || (rwx == 2));
>> @@ -262,55 +372,12 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu,
>> vaddr eaddr, int rwx,
>> TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
>> }
>>
>> - /* Virtual Mode Access - get the fully qualified address */
>> - if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) {
>> - ppc_radix64_raise_segi(cpu, rwx, eaddr);
>> - return 1;
>> - }
>> -
>> - /* Get Process Table */
>> - if (cpu->vhyp) {
>> - vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
>> - vhc->get_pate(cpu->vhyp, &pate);
>> - } else {
>> - if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
>> - ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
>> - return 1;
>> - }
>> - if (!validate_pate(cpu, lpid, &pate)) {
>> - ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_R_BADCONFIG);
>> - }
>> - /* We don't support guest mode yet */
>> - if (lpid != 0) {
>> - error_report("PowerNV guest support Unimplemented");
>> - exit(1);
>> - }
>> - }
>> -
>> - /* Index Process Table by PID to Find Corresponding Process Table Entry
>> */
>> - offset = pid * sizeof(struct prtb_entry);
>> - size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
>> - if (offset >= size) {
>> - /* offset exceeds size of the process table */
>> - ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
>> - return 1;
>> - }
>> - prtbe0 = ldq_phys(cs->as, (pate.dw1 & PATE1_R_PRTB) + offset);
>> -
>> - /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
>> - page_size = PRTBE_R_GET_RTS(prtbe0);
>> - pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK,
>> - prtbe0 & PRTBE_R_RPDB, prtbe0 &
>> PRTBE_R_RPDS,
>> - &raddr, &page_size, &fault_cause,
>> &pte_addr);
>> - if (!pte || ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, &prot))
>> {
>> - /* Couldn't get pte or access denied due to protection */
>> - ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
>> + /* Translate eaddr to raddr (where raddr is addr qemu needs for access)
>> */
>> + if (ppc_radix64_xlate(cpu, eaddr, rwx, relocation, &raddr,
>> + &page_size, &prot, 1)) {
>> return 1;
>> }
>>
>> - /* Update Reference and Change Bits */
>> - ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, &prot);
>> -
>> tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
>> prot, mmu_idx, 1UL << page_size);
>> return 0;
>> @@ -318,58 +385,18 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu,
>> vaddr eaddr, int rwx,
>>
>> hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
>> {
>> - CPUState *cs = CPU(cpu);
>> CPUPPCState *env = &cpu->env;
>> - PPCVirtualHypervisorClass *vhc;
>> - hwaddr raddr, pte_addr;
>> - uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte;
>> - int page_size, fault_cause = 0;
>> - ppc_v3_pate_t pate;
>> + int psize, prot;
>> + hwaddr raddr;
>>
>> /* Handle Real Mode */
>> - if (msr_dr == 0) {
>> + if ((msr_dr == 0) && (msr_hv || cpu->vhyp)) {
>> /* In real mode top 4 effective addr bits (mostly) ignored */
>> return eaddr & 0x0FFFFFFFFFFFFFFFULL;
>> }
>>
>> - /* Virtual Mode Access - get the fully qualified address */
>> - if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) {
>> - return -1;
>> - }
>> -
>> - /* Get Process Table */
>> - if (cpu->vhyp) {
>> - vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
>> - vhc->get_pate(cpu->vhyp, &pate);
>> - } else {
>> - if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
>> - return -1;
>> - }
>> - if (!validate_pate(cpu, lpid, &pate)) {
>> - return -1;
>> - }
>> - /* We don't support guest mode yet */
>> - if (lpid != 0) {
>> - error_report("PowerNV guest support Unimplemented");
>> - exit(1);
>> - }
>> - }
>> -
>> - /* Index Process Table by PID to Find Corresponding Process Table Entry
>> */
>> - offset = pid * sizeof(struct prtb_entry);
>> - size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
>> - if (offset >= size) {
>> - /* offset exceeds size of the process table */
>> - return -1;
>> - }
>> - prtbe0 = ldq_phys(cs->as, (pate.dw1 & PATE1_R_PRTB) + offset);
>> -
>> - /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
>> - page_size = PRTBE_R_GET_RTS(prtbe0);
>> - pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK,
>> - prtbe0 & PRTBE_R_RPDB, prtbe0 &
>> PRTBE_R_RPDS,
>> - &raddr, &page_size, &fault_cause,
>> &pte_addr);
>> - if (!pte) {
>> + if (ppc_radix64_xlate(cpu, eaddr, 0, msr_dr, &raddr, &psize,
>> + &prot, 0)) {
>> return -1;
>> }
>>
>