qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH] hw/arm/virt: Remove virt machine state 'smp_cpus'


From: Ying Fang
Subject: Re: [PATCH] hw/arm/virt: Remove virt machine state 'smp_cpus'
Date: Wed, 16 Dec 2020 17:01:24 +0800
User-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Thunderbird/78.6.0



On 12/16/2020 1:48 AM, Andrew Jones wrote:
virt machine's 'smp_cpus' and machine->smp.cpus must always have the
same value. And, anywhere we have virt machine state we have machine
state. So let's remove the redundancy. Also, to make it easier to see
that machine->smp is the true source for "smp_cpus" and "max_cpus",
avoid passing them in function parameters, preferring instead to get
them from the state.

No functional change intended.

Signed-off-by: Andrew Jones <drjones@redhat.com>

Reviewed-by: Ying Fang <fangying1@huawei.com>

---
  hw/arm/virt-acpi-build.c |  9 +++++----
  hw/arm/virt.c            | 24 +++++++++++-------------
  include/hw/arm/virt.h    |  3 +--
  3 files changed, 17 insertions(+), 19 deletions(-)

diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 711cf2069fe8..9d9ee2405345 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -59,11 +59,12 @@
#define ACPI_BUILD_TABLE_SIZE 0x20000 -static void acpi_dsdt_add_cpus(Aml *scope, int smp_cpus)
+static void acpi_dsdt_add_cpus(Aml *scope, VirtMachineState *vms)
  {
+    MachineState *ms = MACHINE(vms);
      uint16_t i;
- for (i = 0; i < smp_cpus; i++) {
+    for (i = 0; i < ms->smp.cpus; i++) {
          Aml *dev = aml_device("C%.03X", i);
          aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0007")));
          aml_append(dev, aml_name_decl("_UID", aml_int(i)));
@@ -484,7 +485,7 @@ build_madt(GArray *table_data, BIOSLinker *linker, 
VirtMachineState *vms)
      gicd->base_address = cpu_to_le64(memmap[VIRT_GIC_DIST].base);
      gicd->version = vms->gic_version;
- for (i = 0; i < vms->smp_cpus; i++) {
+    for (i = 0; i < MACHINE(vms)->smp.cpus; i++) {
          AcpiMadtGenericCpuInterface *gicc = acpi_data_push(table_data,
                                                             sizeof(*gicc));
          ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(i));
@@ -603,7 +604,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, 
VirtMachineState *vms)
       * the RTC ACPI device at all when using UEFI.
       */
      scope = aml_scope("\\_SB");
-    acpi_dsdt_add_cpus(scope, vms->smp_cpus);
+    acpi_dsdt_add_cpus(scope, vms);
      acpi_dsdt_add_uart(scope, &memmap[VIRT_UART],
                         (irqmap[VIRT_UART] + ARM_SPI_BASE));
      if (vmc->acpi_expose_flash) {
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 556592012ee0..534d306f3104 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -323,7 +323,7 @@ static void fdt_add_timer_nodes(const VirtMachineState *vms)
      if (vms->gic_version == VIRT_GIC_VERSION_2) {
          irqflags = deposit32(irqflags, GIC_FDT_IRQ_PPI_CPU_START,
                               GIC_FDT_IRQ_PPI_CPU_WIDTH,
-                             (1 << vms->smp_cpus) - 1);
+                             (1 << MACHINE(vms)->smp.cpus) - 1);
      }
qemu_fdt_add_subnode(vms->fdt, "/timer");
@@ -347,9 +347,9 @@ static void fdt_add_timer_nodes(const VirtMachineState *vms)
static void fdt_add_cpu_nodes(const VirtMachineState *vms)
  {
-    int cpu;
-    int addr_cells = 1;
      const MachineState *ms = MACHINE(vms);
+    int smp_cpus = ms->smp.cpus, cpu;
+    int addr_cells = 1;
/*
       * From Documentation/devicetree/bindings/arm/cpus.txt
@@ -364,7 +364,7 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
       *  The simplest way to go is to examine affinity IDs of all our CPUs. If
       *  at least one of them has Aff3 populated, we set #address-cells to 2.
       */
-    for (cpu = 0; cpu < vms->smp_cpus; cpu++) {
+    for (cpu = 0; cpu < smp_cpus; cpu++) {
          ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu));
if (armcpu->mp_affinity & ARM_AFF3_MASK) {
@@ -377,7 +377,7 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
      qemu_fdt_setprop_cell(vms->fdt, "/cpus", "#address-cells", addr_cells);
      qemu_fdt_setprop_cell(vms->fdt, "/cpus", "#size-cells", 0x0);
- for (cpu = vms->smp_cpus - 1; cpu >= 0; cpu--) {
+    for (cpu = smp_cpus - 1; cpu >= 0; cpu--) {
          char *nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
          ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu));
          CPUState *cs = CPU(armcpu);
@@ -387,8 +387,7 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
          qemu_fdt_setprop_string(vms->fdt, nodename, "compatible",
                                      armcpu->dtb_compatible);
- if (vms->psci_conduit != QEMU_PSCI_CONDUIT_DISABLED
-            && vms->smp_cpus > 1) {
+        if (vms->psci_conduit != QEMU_PSCI_CONDUIT_DISABLED && smp_cpus > 1) {
              qemu_fdt_setprop_string(vms->fdt, nodename,
                                          "enable-method", "psci");
          }
@@ -534,7 +533,7 @@ static void fdt_add_pmu_nodes(const VirtMachineState *vms)
      if (vms->gic_version == VIRT_GIC_VERSION_2) {
          irqflags = deposit32(irqflags, GIC_FDT_IRQ_PPI_CPU_START,
                               GIC_FDT_IRQ_PPI_CPU_WIDTH,
-                             (1 << vms->smp_cpus) - 1);
+                             (1 << MACHINE(vms)->smp.cpus) - 1);
      }
qemu_fdt_add_subnode(vms->fdt, "/pmu");
@@ -1673,9 +1672,9 @@ static void finalize_gic_version(VirtMachineState *vms)
   * virt_cpu_post_init() must be called after the CPUs have
   * been realized and the GIC has been created.
   */
-static void virt_cpu_post_init(VirtMachineState *vms, int max_cpus,
-                               MemoryRegion *sysmem)
+static void virt_cpu_post_init(VirtMachineState *vms, MemoryRegion *sysmem)
  {
+    int max_cpus = MACHINE(vms)->smp.max_cpus;
      bool aarch64, pmu, steal_time;
      CPUState *cpu;
@@ -1828,8 +1827,6 @@ static void machvirt_init(MachineState *machine)
          exit(1);
      }
- vms->smp_cpus = smp_cpus;
-
      if (vms->virt && kvm_enabled()) {
          error_report("mach-virt: KVM does not support providing "
                       "Virtualization extensions to the guest CPU");
@@ -1845,6 +1842,7 @@ static void machvirt_init(MachineState *machine)
      create_fdt(vms);
possible_cpus = mc->possible_cpu_arch_ids(machine);
+    assert(possible_cpus->len == max_cpus);
      for (n = 0; n < possible_cpus->len; n++) {
          Object *cpuobj;
          CPUState *cs;
@@ -1965,7 +1963,7 @@ static void machvirt_init(MachineState *machine)
create_gic(vms); - virt_cpu_post_init(vms, possible_cpus->len, sysmem);
+    virt_cpu_post_init(vms, sysmem);
fdt_add_pmu_nodes(vms); diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index abf54fab4981..e4a2d216420f 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -151,7 +151,6 @@ struct VirtMachineState {
      MemMapEntry *memmap;
      char *pciehb_nodename;
      const int *irqmap;
-    int smp_cpus;
      void *fdt;
      int fdt_size;
      uint32_t clock_phandle;
@@ -182,7 +181,7 @@ static inline int 
virt_gicv3_redist_region_count(VirtMachineState *vms)
assert(vms->gic_version == VIRT_GIC_VERSION_3); - return vms->smp_cpus > redist0_capacity ? 2 : 1;
+    return MACHINE(vms)->smp.cpus > redist0_capacity ? 2 : 1;
  }
#endif /* QEMU_ARM_VIRT_H */




reply via email to

[Prev in Thread] Current Thread [Next in Thread]