qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v2 6/7] spapr_numa: move NVLink2 associativity handling to sp


From: David Gibson
Subject: Re: [PATCH v2 6/7] spapr_numa: move NVLink2 associativity handling to spapr_numa.c
Date: Thu, 3 Sep 2020 11:56:32 +1000

On Tue, Sep 01, 2020 at 09:56:44AM -0300, Daniel Henrique Barboza wrote:
> This patch adds a new spapr_numa_write_assoc_nvlink2() helper
> to handle the ibm,associativity for NVLink2 GPUs.
> 
> Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>

Reviewed-by: David Gibson <david@gibson.dropbear.id.au>

It might be nice to "precompute" the assoc arrays for the gpus as you
now do for the regular numa nodes.  That can be a later revision, though.

> ---
>  hw/ppc/spapr_numa.c         | 23 +++++++++++++++++++++++
>  hw/ppc/spapr_pci_nvlink2.c  | 19 ++-----------------
>  include/hw/ppc/spapr_numa.h |  3 +++
>  3 files changed, 28 insertions(+), 17 deletions(-)
> 
> diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
> index 9eb4bdbe80..785cc24624 100644
> --- a/hw/ppc/spapr_numa.c
> +++ b/hw/ppc/spapr_numa.c
> @@ -15,6 +15,8 @@
>  #include "hw/ppc/spapr_numa.h"
>  #include "hw/ppc/fdt.h"
>  
> +/* Moved from hw/ppc/spapr_pci_nvlink2.c */
> +#define SPAPR_GPU_NUMA_ID           (cpu_to_be32(1))
>  
>  void spapr_numa_associativity_init(MachineState *machine)
>  {
> @@ -114,6 +116,27 @@ int 
> spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt,
>      return ret;
>  }
>  
> +void spapr_numa_write_assoc_nvlink2(void *fdt, int offset, int numa_id,
> +                                    SpaprPhbState *sphb)
> +{
> +    uint32_t associativity[NUMA_ASSOC_SIZE];
> +    int i;
> +
> +    associativity[0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS);
> +    for (i = 1; i < NUMA_ASSOC_SIZE; i++) {
> +        associativity[i] = cpu_to_be32(numa_id);
> +    };
> +
> +    if (sphb->pre_5_1_assoc) {
> +        associativity[1] = SPAPR_GPU_NUMA_ID;
> +        associativity[2] = SPAPR_GPU_NUMA_ID;
> +        associativity[3] = SPAPR_GPU_NUMA_ID;
> +    }
> +
> +    _FDT((fdt_setprop(fdt, offset, "ibm,associativity", associativity,
> +                      sizeof(associativity))));
> +}
> +
>  /*
>   * Helper that writes ibm,associativity-reference-points and
>   * max-associativity-domains in the RTAS pointed by @rtas
> diff --git a/hw/ppc/spapr_pci_nvlink2.c b/hw/ppc/spapr_pci_nvlink2.c
> index 76ae77ebc8..662a0af990 100644
> --- a/hw/ppc/spapr_pci_nvlink2.c
> +++ b/hw/ppc/spapr_pci_nvlink2.c
> @@ -29,6 +29,7 @@
>  #include "qemu/error-report.h"
>  #include "hw/ppc/fdt.h"
>  #include "hw/pci/pci_bridge.h"
> +#include "hw/ppc/spapr_numa.h"
>  
>  #define PHANDLE_PCIDEV(phb, pdev)    (0x12000000 | \
>                                       (((phb)->index) << 16) | 
> ((pdev)->devfn))
> @@ -37,8 +38,6 @@
>  #define PHANDLE_NVLINK(phb, gn, nn)  (0x00130000 | (((phb)->index) << 8) | \
>                                       ((gn) << 4) | (nn))
>  
> -#define SPAPR_GPU_NUMA_ID           (cpu_to_be32(1))
> -
>  typedef struct SpaprPhbPciNvGpuSlot {
>          uint64_t tgt;
>          uint64_t gpa;
> @@ -360,13 +359,6 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState 
> *sphb, void *fdt)
>          Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev),
>                                                      "nvlink2-mr[0]",
>                                                      &error_abort);
> -        uint32_t associativity[] = {
> -            cpu_to_be32(0x4),
> -            cpu_to_be32(nvslot->numa_id),
> -            cpu_to_be32(nvslot->numa_id),
> -            cpu_to_be32(nvslot->numa_id),
> -            cpu_to_be32(nvslot->numa_id)
> -        };
>          uint64_t size = object_property_get_uint(nv_mrobj, "size", NULL);
>          uint64_t mem_reg[2] = { cpu_to_be64(nvslot->gpa), cpu_to_be64(size) 
> };
>          char *mem_name = g_strdup_printf("memory@%"PRIx64, nvslot->gpa);
> @@ -376,14 +368,7 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState 
> *sphb, void *fdt)
>          _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
>          _FDT((fdt_setprop(fdt, off, "reg", mem_reg, sizeof(mem_reg))));
>  
> -        if (sphb->pre_5_1_assoc) {
> -            associativity[1] = SPAPR_GPU_NUMA_ID;
> -            associativity[2] = SPAPR_GPU_NUMA_ID;
> -            associativity[3] = SPAPR_GPU_NUMA_ID;
> -        }
> -
> -        _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity,
> -                          sizeof(associativity))));
> +        spapr_numa_write_assoc_nvlink2(fdt, off, nvslot->numa_id, sphb);
>  
>          _FDT((fdt_setprop_string(fdt, off, "compatible",
>                                   "ibm,coherent-device-memory")));
> diff --git a/include/hw/ppc/spapr_numa.h b/include/hw/ppc/spapr_numa.h
> index f6127501a6..b6e0721b07 100644
> --- a/include/hw/ppc/spapr_numa.h
> +++ b/include/hw/ppc/spapr_numa.h
> @@ -15,6 +15,7 @@
>  
>  #include "hw/boards.h"
>  #include "hw/ppc/spapr.h"
> +#include "hw/pci-host/spapr.h"
>  
>  void spapr_numa_associativity_init(MachineState *machine);
>  void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas);
> @@ -24,6 +25,8 @@ int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void 
> *fdt,
>                              int offset, PowerPCCPU *cpu);
>  int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt,
>                                           int offset);
> +void spapr_numa_write_assoc_nvlink2(void *fdt, int offset, int numa_id,
> +                                    SpaprPhbState *sphb);
>  
>  
>  #endif /* HW_SPAPR_NUMA_H */

-- 
David Gibson                    | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au  | minimalist, thank you.  NOT _the_ _other_
                                | _way_ _around_!
http://www.ozlabs.org/~dgibson

Attachment: signature.asc
Description: PGP signature


reply via email to

[Prev in Thread] Current Thread [Next in Thread]