Merge patch series "Add ACPI NUMA support for RISC-V"

Haibo Xu <haibo1.xu@intel.com> says:

This patch series enable RISC-V ACPI NUMA support which was based on
the recently approved ACPI ECR[1].

Patch 1/4 add RISC-V specific acpi_numa.c file to parse NUMA information
from SRAT and SLIT ACPI tables.
Patch 2/4 add the common SRAT RINTC affinity structure handler.
Patch 3/4 change the ACPI_NUMA to a hidden option since it would be selected
by default on all supported platform.
Patch 4/4 replace pr_info with pr_debug in arch_acpi_numa_init() to avoid
potential boot noise on ACPI platforms that are not NUMA.

Based-on: https://github.com/linux-riscv/linux-riscv/tree/for-next

[1] https://drive.google.com/file/d/1YTdDx2IPm5IeZjAW932EYU-tUtgS08tX/view?usp=sharing

Testing:
Since the ACPI AIA/PLIC support patch set is still under upstream review,
hence it is tested using the poll based HVC SBI console and RAM disk.
1) Build latest Qemu with the following patch backported
   42bd4eeefd

2) Build latest EDK-II
   https://github.com/tianocore/edk2/blob/master/OvmfPkg/RiscVVirt/README.md

3) Build Linux with the following configs enabled
   CONFIG_RISCV_SBI_V01=y
   CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
   CONFIG_NONPORTABLE=y
   CONFIG_HVC_RISCV_SBI=y
   CONFIG_NUMA=y
   CONFIG_ACPI_NUMA=y

4) Build buildroot rootfs.cpio

5) Launch the Qemu machine
   qemu-system-riscv64 -nographic \
   -machine virt,pflash0=pflash0,pflash1=pflash1 -smp 4 -m 8G \
   -blockdev node-name=pflash0,driver=file,read-only=on,filename=RISCV_VIRT_CODE.fd \
   -blockdev node-name=pflash1,driver=file,filename=RISCV_VIRT_VARS.fd \
   -object memory-backend-ram,size=4G,id=m0 \
   -object memory-backend-ram,size=4G,id=m1 \
   -numa node,memdev=m0,cpus=0-1,nodeid=0 \
   -numa node,memdev=m1,cpus=2-3,nodeid=1 \
   -numa dist,src=0,dst=1,val=30 \
   -kernel linux/arch/riscv/boot/Image \
   -initrd buildroot/output/images/rootfs.cpio \
   -append "root=/dev/ram ro console=hvc0 earlycon=sbi"

[    0.000000] ACPI: SRAT: Node 0 PXM 0 [mem 0x80000000-0x17fffffff]
[    0.000000] ACPI: SRAT: Node 1 PXM 1 [mem 0x180000000-0x27fffffff]
[    0.000000] NUMA: NODE_DATA [mem 0x17fe3bc40-0x17fe3cfff]
[    0.000000] NUMA: NODE_DATA [mem 0x27fff4c40-0x27fff5fff]
...
[    0.000000] ACPI: NUMA: SRAT: PXM 0 -> HARTID 0x0 -> Node 0
[    0.000000] ACPI: NUMA: SRAT: PXM 0 -> HARTID 0x1 -> Node 0
[    0.000000] ACPI: NUMA: SRAT: PXM 1 -> HARTID 0x2 -> Node 1
[    0.000000] ACPI: NUMA: SRAT: PXM 1 -> HARTID 0x3 -> Node 1

* b4-shazam-merge:
  ACPI: NUMA: replace pr_info with pr_debug in arch_acpi_numa_init
  ACPI: NUMA: change the ACPI_NUMA to a hidden option
  ACPI: NUMA: Add handler for SRAT RINTC affinity structure
  ACPI: RISCV: Add NUMA support based on SRAT and SLIT

Link: https://lore.kernel.org/r/cover.1718268003.git.haibo1.xu@intel.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
This commit is contained in:
Palmer Dabbelt 2024-07-22 07:13:13 -07:00
commit 6a4aa4c94b
No known key found for this signature in database
GPG Key ID: 2E1319F35FBB1889
12 changed files with 187 additions and 18 deletions

View File

@ -1471,7 +1471,6 @@ config HOTPLUG_CPU
config NUMA
bool "NUMA Memory Allocation and Scheduler Support"
select GENERIC_ARCH_NUMA
select ACPI_NUMA if ACPI
select OF_NUMA
select HAVE_SETUP_PER_CPU_AREA
select NEED_PER_CPU_EMBED_FIRST_CHUNK

View File

@ -473,7 +473,6 @@ config NR_CPUS
config NUMA
bool "NUMA Support"
select SMP
select ACPI_NUMA if ACPI
help
Say Y to compile the kernel with NUMA (Non-Uniform Memory Access)
support. This option improves performance on systems with more

View File

@ -61,11 +61,14 @@ static inline void arch_fix_phys_package_id(int num, u32 slot) { }
void acpi_init_rintc_map(void);
struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu);
u32 get_acpi_id_for_cpu(int cpu);
static inline u32 get_acpi_id_for_cpu(int cpu)
{
return acpi_cpu_get_madt_rintc(cpu)->uid;
}
int acpi_get_riscv_isa(struct acpi_table_header *table,
unsigned int cpu, const char **isa);
static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size,
u32 *cboz_size, u32 *cbop_size);
#else
@ -87,4 +90,12 @@ static inline void acpi_get_cbo_block_size(struct acpi_table_header *table,
#endif /* CONFIG_ACPI */
#ifdef CONFIG_ACPI_NUMA
int acpi_numa_get_nid(unsigned int cpu);
void acpi_map_cpus_to_nodes(void);
#else
static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
static inline void acpi_map_cpus_to_nodes(void) { }
#endif /* CONFIG_ACPI_NUMA */
#endif /*_ASM_ACPI_H*/

View File

@ -110,3 +110,4 @@ obj-$(CONFIG_COMPAT) += compat_vdso/
obj-$(CONFIG_64BIT) += pi/
obj-$(CONFIG_ACPI) += acpi.o
obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o

View File

@ -191,11 +191,6 @@ struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu)
return &cpu_madt_rintc[cpu];
}
u32 get_acpi_id_for_cpu(int cpu)
{
return acpi_cpu_get_madt_rintc(cpu)->uid;
}
/*
* __acpi_map_table() will be called before paging_init(), so early_ioremap()
* or early_memremap() should be called here to for ACPI table mapping.

View File

@ -0,0 +1,131 @@
// SPDX-License-Identifier: GPL-2.0
/*
* ACPI 6.6 based NUMA setup for RISCV
* Lots of code was borrowed from arch/arm64/kernel/acpi_numa.c
*
* Copyright 2004 Andi Kleen, SuSE Labs.
* Copyright (C) 2013-2016, Linaro Ltd.
* Author: Hanjun Guo <hanjun.guo@linaro.org>
* Copyright (C) 2024 Intel Corporation.
*
* Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
*
* Called from acpi_numa_init while reading the SRAT and SLIT tables.
* Assumes all memory regions belonging to a single proximity domain
* are in one chunk. Holes between them will be included in the node.
*/
#define pr_fmt(fmt) "ACPI: NUMA: " fmt
#include <linux/acpi.h>
#include <linux/bitmap.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/mmzone.h>
#include <linux/module.h>
#include <linux/topology.h>
#include <asm/numa.h>
static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE };
int __init acpi_numa_get_nid(unsigned int cpu)
{
return acpi_early_node_map[cpu];
}
static inline int get_cpu_for_acpi_id(u32 uid)
{
int cpu;
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
if (uid == get_acpi_id_for_cpu(cpu))
return cpu;
return -EINVAL;
}
static int __init acpi_parse_rintc_pxm(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_srat_rintc_affinity *pa;
int cpu, pxm, node;
if (srat_disabled())
return -EINVAL;
pa = (struct acpi_srat_rintc_affinity *)header;
if (!pa)
return -EINVAL;
if (!(pa->flags & ACPI_SRAT_RINTC_ENABLED))
return 0;
pxm = pa->proximity_domain;
node = pxm_to_node(pxm);
/*
* If we can't map the UID to a logical cpu this
* means that the UID is not part of possible cpus
* so we do not need a NUMA mapping for it, skip
* the SRAT entry and keep parsing.
*/
cpu = get_cpu_for_acpi_id(pa->acpi_processor_uid);
if (cpu < 0)
return 0;
acpi_early_node_map[cpu] = node;
pr_info("SRAT: PXM %d -> HARTID 0x%lx -> Node %d\n", pxm,
cpuid_to_hartid_map(cpu), node);
return 0;
}
void __init acpi_map_cpus_to_nodes(void)
{
int i;
/*
* In ACPI, SMP and CPU NUMA information is provided in separate
* static tables, namely the MADT and the SRAT.
*
* Thus, it is simpler to first create the cpu logical map through
* an MADT walk and then map the logical cpus to their node ids
* as separate steps.
*/
acpi_table_parse_entries(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat),
ACPI_SRAT_TYPE_RINTC_AFFINITY, acpi_parse_rintc_pxm, 0);
for (i = 0; i < nr_cpu_ids; i++)
early_map_cpu_to_node(i, acpi_numa_get_nid(i));
}
/* Callback for Proximity Domain -> logical node ID mapping */
void __init acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa)
{
int pxm, node;
if (srat_disabled())
return;
if (pa->header.length < sizeof(struct acpi_srat_rintc_affinity)) {
pr_err("SRAT: Invalid SRAT header length: %d\n", pa->header.length);
bad_srat();
return;
}
if (!(pa->flags & ACPI_SRAT_RINTC_ENABLED))
return;
pxm = pa->proximity_domain;
node = acpi_map_pxm_to_node(pxm);
if (node == NUMA_NO_NODE) {
pr_err("SRAT: Too many proximity domains %d\n", pxm);
bad_srat();
return;
}
node_set(node, numa_nodes_parsed);
}

View File

@ -281,8 +281,10 @@ void __init setup_arch(char **cmdline_p)
setup_smp();
#endif
if (!acpi_disabled)
if (!acpi_disabled) {
acpi_init_rintc_map();
acpi_map_cpus_to_nodes();
}
riscv_init_cbo_blocksizes();
riscv_fill_hwcap();

View File

@ -96,7 +96,6 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un
if (hart == cpuid_to_hartid_map(0)) {
BUG_ON(found_boot_cpu);
found_boot_cpu = true;
early_map_cpu_to_node(0, acpi_numa_get_nid(cpu_count));
return 0;
}
@ -106,7 +105,6 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un
}
cpuid_to_hartid_map(cpu_count) = hart;
early_map_cpu_to_node(cpu_count, acpi_numa_get_nid(cpu_count));
cpu_count++;
return 0;

View File

@ -1,9 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
config ACPI_NUMA
bool "NUMA support"
depends on NUMA
depends on (X86 || ARM64 || LOONGARCH)
default y if ARM64
def_bool NUMA && !X86
config ACPI_HMAT
bool "ACPI Heterogeneous Memory Attribute Table Support"

View File

@ -167,6 +167,19 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
}
}
break;
case ACPI_SRAT_TYPE_RINTC_AFFINITY:
{
struct acpi_srat_rintc_affinity *p =
(struct acpi_srat_rintc_affinity *)header;
pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n",
p->acpi_processor_uid,
p->proximity_domain,
(p->flags & ACPI_SRAT_RINTC_ENABLED) ?
"enabled" : "disabled");
}
break;
default:
pr_warn("Found unsupported SRAT entry (type = 0x%x)\n",
header->type);
@ -450,6 +463,21 @@ acpi_parse_gi_affinity(union acpi_subtable_headers *header,
}
#endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */
static int __init
acpi_parse_rintc_affinity(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_srat_rintc_affinity *rintc_affinity;
rintc_affinity = (struct acpi_srat_rintc_affinity *)header;
acpi_table_print_srat_entry(&header->common);
/* let architecture-dependent part to do it */
acpi_numa_rintc_affinity_init(rintc_affinity);
return 0;
}
static int __init acpi_parse_srat(struct acpi_table_header *table)
{
struct acpi_table_srat *srat = (struct acpi_table_srat *)table;
@ -485,7 +513,7 @@ int __init acpi_numa_init(void)
/* SRAT: System Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
struct acpi_subtable_proc srat_proc[4];
struct acpi_subtable_proc srat_proc[5];
memset(srat_proc, 0, sizeof(srat_proc));
srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY;
@ -496,6 +524,8 @@ int __init acpi_numa_init(void)
srat_proc[2].handler = acpi_parse_gicc_affinity;
srat_proc[3].id = ACPI_SRAT_TYPE_GENERIC_AFFINITY;
srat_proc[3].handler = acpi_parse_gi_affinity;
srat_proc[4].id = ACPI_SRAT_TYPE_RINTC_AFFINITY;
srat_proc[4].handler = acpi_parse_rintc_affinity;
acpi_table_parse_entries_array(ACPI_SIG_SRAT,
sizeof(struct acpi_table_srat),

View File

@ -445,7 +445,7 @@ static int __init arch_acpi_numa_init(void)
ret = acpi_numa_init();
if (ret) {
pr_info("Failed to initialise from firmware\n");
pr_debug("Failed to initialise from firmware\n");
return ret;
}

View File

@ -259,6 +259,12 @@ static inline void
acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { }
#endif
#ifdef CONFIG_RISCV
void acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa);
#else
static inline void acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa) { }
#endif
#ifndef PHYS_CPUID_INVALID
typedef u32 phys_cpuid_t;
#define PHYS_CPUID_INVALID (phys_cpuid_t)(-1)